diff --git a/ap_ctl.py b/ap_ctl.py index 0a67ea04..c562b302 100755 --- a/ap_ctl.py +++ b/ap_ctl.py @@ -79,7 +79,7 @@ def usage(): print("-s|--scheme (serial|telnet|ssh): connect to controller via serial, ssh or telnet") print("--tty Serial port for accessing AP") print("-l|--log file: log messages here") - print("-b|--band: a (5Ghz) or b (2.4Ghz) or abgn for dual-band 2.4Ghz AP") + print("-b|--baud: serial baud rate") print("-z|--action: action") print("-h|--help") diff --git a/influxgrafanaghost_fedora_install.sh b/influxgrafanaghost_fedora_install.sh new file mode 100755 index 00000000..62a9def3 --- /dev/null +++ b/influxgrafanaghost_fedora_install.sh @@ -0,0 +1,46 @@ +#!/bin/bash +# This bash script installs Influx, Grafana, and Ghost on Fedora or CentOS. +# Run this script as a normal user with sudo access. +# You need to provide your username at the beginning of the script. +# There are a few fields you will need to enter when this installs Ghost, and you will be prompted by the script. +# Many scripts in this library are built around Influx, Grafana, and Ghost. Influx is a time series database, +# Grafana has dashboards which display the data stored in Influx, +# and Ghost is a blogging platform which creates an easy way for a user to view automated reports which are built using LANforge scripts +# Once a user uses this script, the user can use those features with the credentials for the system this script sets up. + +# After running this script, Grafana is at port 3000, Influx is at port 8086, and Ghost is at port 2368 +# The user will need to login to those through a web browser to create login credentials, and find API tokens. +# These API tokens are needed to run many scripts in LANforge scripts with these three programs. + +echo Type in your username here +read -r USER + +#Influx installation +wget https://dl.influxdata.com/influxdb/releases/influxdb2-2.0.4.x86_64.rpm +sudo yum localinstall influxdb2-2.0.4.x86_64.rpm +sudo service influxdb start +sudo service influxdb enable + +#Grafana installation +wget https://dl.grafana.com/oss/release/grafana-7.5.3-1.x86_64.rpm +sudo yum localinstall grafana-7.5.3-1.x86_64.rpm -y +sudo systemctl start grafana-server +sudo systemctl enable grafana-server + +#Ghost installation +sudo adduser ghost +sudo usermod -aG sudo ghost +sudo ufw allow 'Nginx Full' +curl -sL https://deb.nodesource.com/setup_14.x | sudo -E bash +sudo npm install ghost-cli@latest -g +# Ensure that NPM is up to date +npm cache verify +sudo npm install -g n +sudo n stable +curl -sL https://deb.nodesource.com/setup_14.x | sudo -E bash +npm install ghost-cli@latest -g +sudo mkdir -p /var/www/ghostsite +sudo chown ${USER}:${USER} /var/www/ghostsite +sudo chmod 775 /var/www/ghostsite +cd /var/www/ghostsite +ghost install local \ No newline at end of file diff --git a/influxgrafanaghost_ubuntu_install.sh b/influxgrafanaghost_ubuntu_install.sh new file mode 100755 index 00000000..5220db80 --- /dev/null +++ b/influxgrafanaghost_ubuntu_install.sh @@ -0,0 +1,52 @@ +#!/bin/bash +#This script installs Influx, Grafana, and Ghost on Ubuntu. +#Run this script as a normal user with sudo access. +#You need to provide your username at the beginning of the script. +#There are a few fields you will need to enter when it is installing Ghost, and you will be prompted by the script. +#Lanforge scripts is built around Influx, Grafana, and Ghost. Influx is a time series database, +#Grafana has dashboards which display the data stored in Influx, +#and Ghost is a blogging platform which creates an easy way for a user to view automated reports which are built using LANforge scripts +#Once a user uses this script, the user can use those features with the credentials for the system this script sets up. + +#After running this script, Grafana is accessible through port 3000, Influx is at port 8086, and Ghost is accessible at 2368 +#The user will need to login to those through a web browser to create login credentials, and find API tokens. +#These API tokens are needed to run many scripts in LANforge scripts with the functionality these three programs provide. + +#Update necessary parts of system +echo Type in your username here +read -r USER + +sudo apt-get update && sudo apt-get upgrade -y +sudo apt-get install nginx mysql-server nodejs npm -y + +#Influx installation +wget https://dl.influxdata.com/influxdb/releases/influxdb2-2.0.7-amd64.deb +sudo dpkg -i influxdb2-2.0.7-amd64.deb +sudo systemctl unmask influxdb +sudo systemctl start influxdb +sudo systemctl enable influxdb + +#Grafana installation +sudo apt-get install -y adduser libfontconfig1 +wget https://dl.grafana.com/oss/release/grafana_8.0.5_amd64.deb +sudo dpkg -i grafana_8.0.5_amd64.deb +sudo systemctl start grafana-server +sudo systemctl enable grafana-server + +#Ghost installation +sudo adduser ghost +sudo usermod -aG sudo ghost +sudo ufw allow 'Nginx Full' +curl -sL https://deb.nodesource.com/setup_14.x | sudo -E bash +sudo npm install ghost-cli@latest -g +# Ensure that NPM is up to date +npm cache verify +sudo npm install -g n +sudo n stable +curl -sL https://deb.nodesource.com/setup_14.x | sudo -E bash +npm install ghost-cli@latest -g +sudo mkdir -p /var/www/ghostsite +sudo chown ${USER}:${USER} /var/www/ghostsite +sudo chmod 775 /var/www/ghostsite +cd /var/www/ghostsite +ghost install local \ No newline at end of file diff --git a/py-dashboard/GhostRequest.py b/py-dashboard/GhostRequest.py index aa593cdc..23f08522 100644 --- a/py-dashboard/GhostRequest.py +++ b/py-dashboard/GhostRequest.py @@ -3,7 +3,7 @@ # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - # Class holds default settings for json requests to Ghost - # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -import ast + import os import sys @@ -14,18 +14,23 @@ if sys.version_info[0] != 3: import requests import jwt -from datetime import datetime as date +from datetime import datetime +from dateutil import tz import json import subprocess from scp import SCPClient import paramiko from GrafanaRequest import GrafanaRequest +from influx2 import RecordInflux +import time +from collections import Counter +import shutil class CSVReader: def read_csv(self, file, - sep=','): + sep='\t'): df = open(file).read().split('\n') rows = list() for x in df: @@ -44,6 +49,70 @@ class CSVReader: values.append(row[index]) return values + def get_columns(self, df, targets): + target_index = [] + for item in targets: + target_index.append(df[0].index(item)) + results = [] + for row in df: + row_data = [] + for x in target_index: + row_data.append(row[x]) + results.append(row_data) + return results + + def to_html(self, df): + html = '' + html = html + ('' + '' + '' + '' + '' + '' + '' + '') + for row in df: + for item in row: + html = html + ('' % item) + html = html + ('\n') + html = html + ('' + '
%s
') + return html + + def filter_df(self, df, column, expression, target): + target_index = df[0].index(column) + counter = 0 + targets = [0] + for row in df: + try: + if expression == 'less than': + if float(row[target_index]) < target: + targets.append(counter) + counter += 1 + else: + counter += 1 + if expression == 'greater than': + if float(row[target_index]) > target: + targets.append(counter) + counter += 1 + else: + counter += 1 + if expression == 'greater than or equal to': + if float(row[target_index]) >= target: + targets.append(counter) + counter += 1 + else: + counter += 1 + except: + counter += 1 + return list(map(df.__getitem__, targets)) + + def concat(self, dfs): + final_df = dfs[0] + for df in dfs[1:]: + final_df = final_df + df[1:] + return final_df + class GhostRequest: def __init__(self, @@ -52,7 +121,12 @@ class GhostRequest: _api_token=None, _overwrite='false', debug_=False, - die_on_error_=False): + die_on_error_=False, + influx_host=None, + influx_port=8086, + influx_org=None, + influx_token=None, + influx_bucket=None): self.debug = debug_ self.die_on_error = die_on_error_ self.ghost_json_host = _ghost_json_host @@ -64,6 +138,11 @@ class GhostRequest: self.api_token = _api_token self.images = list() self.pdfs = list() + self.influx_host = influx_host + self.influx_port = influx_port + self.influx_org = influx_org + self.influx_token = influx_token + self.influx_bucket = influx_bucket def encode_token(self): @@ -71,7 +150,7 @@ class GhostRequest: key_id, secret = self.api_token.split(':') # Prepare header and payload - iat = int(date.now().timestamp()) + iat = int(datetime.now().timestamp()) header = {'alg': 'HS256', 'typ': 'JWT', 'kid': key_id} payload = { @@ -147,111 +226,265 @@ class GhostRequest: tags='custom', authors=authors) - def wifi_capacity_to_ghost(self, - authors, - folders, - title=None, - server_pull=None, - ghost_host=None, - port='22', - user_pull='lanforge', - password_pull='lanforge', - user_push=None, - password_push=None, - customer=None, - testbed='Unknown Testbed', - test_run=None, - target_folders=list(), - grafana_dashboard=None, - grafana_token=None, - grafana_host=None, - grafana_port=3000): + def list_append(self, list_1, value): + list_1.append(value) + + def kpi_to_ghost(self, + authors, + folders, + parent_folder=None, + title=None, + server_pull=None, + ghost_host=None, + port=22, + user_push=None, + password_push=None, + customer=None, + testbed=None, + test_run=None, + target_folders=list(), + grafana_token=None, + grafana_host=None, + grafana_port=3000, + grafana_datasource='InfluxDB', + grafana_bucket=None): + global dut_hw, dut_sw, dut_model, dut_serial + + now = datetime.now() + text = '' csvreader = CSVReader() - if test_run is None: - test_run = sorted(folders)[0].split('/')[-1].strip('/') - for folder in folders: - print(folder) - ssh_pull = paramiko.SSHClient() - ssh_pull.set_missing_host_key_policy(paramiko.client.AutoAddPolicy) - ssh_pull.connect(server_pull, - port, - username=user_pull, - password=password_pull, - allow_agent=False, - look_for_keys=False) - scp_pull = SCPClient(ssh_pull.get_transport()) - scp_pull.get(folder, recursive=True) - target_folder = str(folder).rstrip('/').split('/')[-1] - target_folders.append(target_folder) - print(target_folder) + if grafana_token is not None: + grafana = GrafanaRequest(grafana_token, + grafana_host, + grafanajson_port=grafana_port + ) + if self.debug: + print('Folders: %s' % folders) + + ssh_push = paramiko.SSHClient() + ssh_push.set_missing_host_key_policy(paramiko.client.AutoAddPolicy) + ssh_push.connect(ghost_host, + port, + username=user_push, + password=password_push, + allow_agent=False, + look_for_keys=False) + scp_push = SCPClient(ssh_push.get_transport()) + + if parent_folder is not None: + print("parent_folder %s" % parent_folder) + files = os.listdir(parent_folder) + print(files) + for file in files: + if os.path.isdir(parent_folder + '/' + file) is True: + if os.path.exists(file): + shutil.rmtree(file) + shutil.copytree(parent_folder + '/' + file, file) + target_folders.append(file) + print('Target folders: %s' % target_folders) + else: + for folder in folders: + if self.debug: + print(folder) + target_folders.append(folder) + + testbeds = list() + pdfs = list() + high_priority_list = list() + low_priority_list = list() + images = list() + times = list() + test_pass_fail = list() + + for target_folder in target_folders: try: target_file = '%s/kpi.csv' % target_folder - print('target file %s' % target_file) df = csvreader.read_csv(file=target_file, sep='\t') - csv_testbed = csvreader.get_column(df, 'test-rig')[0] - print(csv_testbed) - except: - pass - if len(csv_testbed) > 2: - testbed = csv_testbed - text = text + 'Testbed: %s
' % testbed - if testbed == 'Unknown Testbed': - raise UserWarning('Please define your testbed') - print('testbed %s' % testbed) + test_rig = csvreader.get_column(df, 'test-rig')[0] + pass_fail = Counter(csvreader.get_column(df, 'pass/fail')) + test_pass_fail.append(pass_fail) + dut_hw = csvreader.get_column(df, 'dut-hw-version')[0] + dut_sw = csvreader.get_column(df, 'dut-sw-version')[0] + dut_model = csvreader.get_column(df, 'dut-model-num')[0] + dut_serial = csvreader.get_column(df, 'dut-serial-num')[0] + duts = [dut_serial, dut_hw, dut_sw, dut_model, test_rig] + times_append = csvreader.get_column(df, 'Date') + for target_time in times_append: + times.append(float(target_time) / 1000) + if pass_fail['PASS'] + pass_fail['FAIL'] > 0: + text = text + 'Tests passed: %s
' % pass_fail['PASS'] + text = text + 'Tests failed: %s
' % pass_fail['FAIL'] + text = text + 'Percentage of tests passed: %s
' % ( + pass_fail['PASS'] / (pass_fail['PASS'] + pass_fail['FAIL'])) + else: + text = text + 'Tests passed: 0
' \ + 'Tests failed : 0
' \ + 'Percentage of tests passed: Not Applicable
' + + except: + print("Failure") + target_folders.remove(target_folder) + break + testbeds.append(test_rig) + if testbed is None: + testbed = test_rig + + if test_run is None: + test_run = now.strftime('%B-%d-%Y-%I-%M-%p-report') - ssh_push = paramiko.SSHClient() - ssh_push.set_missing_host_key_policy(paramiko.client.AutoAddPolicy) - ssh_push.connect(ghost_host, - port, - username=user_push, - password=password_push, - allow_agent=False, - look_for_keys=False) - scp_push = SCPClient(ssh_push.get_transport()) local_path = '/home/%s/%s/%s/%s' % (user_push, customer, testbed, test_run) - transport = paramiko.Transport((ghost_host, port)) + + transport = paramiko.Transport(ghost_host, port) transport.connect(None, user_push, password_push) sftp = paramiko.sftp_client.SFTPClient.from_transport(transport) - print(local_path) + + if self.debug: + print(local_path) + print(target_folder) + + try: + sftp.mkdir('/home/%s/%s/%s' % (user_push, customer, testbed)) + except: + pass + try: sftp.mkdir(local_path) except: - print('folder %s already exists' % local_path) - scp_push.put(target_folder, recursive=True, remote_path=local_path) + pass + scp_push.put(target_folder, local_path, recursive=True) files = sftp.listdir(local_path + '/' + target_folder) - # print('Files: %s' % files) for file in files: if 'pdf' in file: url = 'http://%s/%s/%s/%s/%s/%s' % ( ghost_host, customer.strip('/'), testbed, test_run, target_folder, file) - text = text + 'PDF of results: %s
' % (url, file) - print(url) - scp_pull.close() + pdfs.append('PDF of results: %s
' % (url, file)) scp_push.close() self.upload_images(target_folder) for image in self.images: if 'kpi-' in image: if '-print' not in image: - text = text + '' % image + images.append('' % image) self.images = [] - if grafana_token is not None: - GR = GrafanaRequest(grafana_token, - grafana_host, - grafanajson_port=grafana_port - ) - GR.create_snapshot(title=grafana_dashboard) - snapshot = GR.list_snapshots()[-1] - text = text + '' % (snapshot['externalUrl'], '%') + results = csvreader.get_columns(df, ['short-description', 'numeric-score', 'test details', 'pass/fail', + 'test-priority']) - now = date.now() + results[0] = ['Short Description', 'Score', 'Test Details', 'Pass or Fail', 'test-priority'] + + low_priority = csvreader.filter_df(results, 'test-priority', 'less than', 94) + high_priority = csvreader.filter_df(results, 'test-priority', 'greater than or equal to', 95) + high_priority_list.append(high_priority) + + low_priority_list.append(low_priority) + + + test_pass_fail_results = sum((Counter(test) for test in test_pass_fail), Counter()) + + end_time = max(times) + start_time = '2021-07-01' + end_time = datetime.utcfromtimestamp(end_time)#.strftime('%Y-%m-%d %H:%M:%S') + now = time.time() + offset = datetime.fromtimestamp(now) - datetime.utcfromtimestamp(now) + end_time = end_time + offset + + high_priority = csvreader.concat(high_priority_list) + low_priority = csvreader.concat(low_priority_list) + + high_priority = csvreader.get_columns(high_priority, + ['Short Description', 'Score', 'Test Details']) + low_priority = csvreader.get_columns(low_priority, + ['Short Description', 'Score', 'Test Details']) + high_priority.append(['Total Passed', test_pass_fail_results['PASS'], 'Total subtests passed during this run']) + high_priority.append(['Total Failed', test_pass_fail_results['FAIL'], 'Total subtests failed during this run']) if title is None: - title = "%s %s %s %s:%s report" % (now.day, now.month, now.year, now.hour, now.minute) + title = end_time.strftime('%B %d, %Y %I:%M %p report') - if grafana_dashboard is not None: - pass + # create Grafana Dashboard + target_files = [] + for folder in target_folders: + target_files.append(folder.split('/')[-1] + '/kpi.csv') + if self.debug: + print('Target files: %s' % target_files) + grafana.create_custom_dashboard(target_csvs=target_files, + title=title, + datasource=grafana_datasource, + bucket=grafana_bucket, + from_date=start_time, + to_date=end_time.strftime('%Y-%m-%d %H:%M:%S'), + pass_fail='GhostRequest', + testbed=testbeds[0]) + + if self.influx_token is not None: + influxdb = RecordInflux(_influx_host=self.influx_host, + _influx_port=self.influx_port, + _influx_org=self.influx_org, + _influx_token=self.influx_token, + _influx_bucket=self.influx_bucket) + short_description = 'Ghost Post Tests passed' # variable name + numeric_score = test_pass_fail_results['PASS'] # value + tags = dict() + print(datetime.utcfromtimestamp(max(times))) + tags['testbed'] = testbeds[0] + tags['script'] = 'GhostRequest' + tags['Graph-Group'] = 'PASS' + date = datetime.utcfromtimestamp(max(times)).isoformat() + influxdb.post_to_influx(short_description, numeric_score, tags, date) + + short_description = 'Ghost Post Tests failed' # variable name + numeric_score = test_pass_fail_results['FAIL'] # value + tags = dict() + tags['testbed'] = testbeds[0] + tags['script'] = 'GhostRequest' + tags['Graph-Group'] = 'FAIL' + date = datetime.utcfromtimestamp(max(times)).isoformat() + influxdb.post_to_influx(short_description, numeric_score, tags, date) + + text = 'Testbed: %s
' % testbeds[0] + dut_table = '' \ + '' \ + '' \ + '' \ + '' \ + '' \ + '' \ + '' \ + '' \ + '' \ + '' \ + '' \ + '' \ + '' \ + '' \ + '' % ( + duts[4], duts[1], duts[2], duts[3], duts[0], test_pass_fail_results['PASS'], + test_pass_fail_results['FAIL']) + + dut_table = dut_table + '
Test Information
Testbed%s
DUT_HW%s
DUT_SW%s
DUT model%s
DUT Serial%s
Tests passed%s
Tests failed%s
' + text = text + dut_table + + for pdf in pdfs: + print(pdf) + text = text + pdf + + for image in images: + text = text + image + + text = text + 'High priority results: %s' % csvreader.to_html(high_priority) + + if grafana_token is not None: + # get the details of the dashboard through the API, and set the end date to the youngest KPI + grafana.list_dashboards() + + grafana.create_snapshot(title='Testbed: ' + title) + time.sleep(3) + snapshot = grafana.list_snapshots()[-1] + text = text + '
' % ( + grafana_host, snapshot['key'], '%') + + text = text + 'Low priority results: %s' % csvreader.to_html(low_priority) self.create_post(title=title, text=text, diff --git a/py-dashboard/GrafanaRequest.py b/py-dashboard/GrafanaRequest.py index fc585ab6..a6349938 100644 --- a/py-dashboard/GrafanaRequest.py +++ b/py-dashboard/GrafanaRequest.py @@ -12,6 +12,35 @@ if sys.version_info[0] != 3: import requests import json +import string +import random + + +class CSVReader: + def __init__(self): + self.shape = None + + def read_csv(self, + file, + sep='\t'): + df = open(file).read().split('\n') + rows = list() + for x in df: + if len(x) > 0: + rows.append(x.split(sep)) + length = list(range(0, len(df[0]))) + columns = dict(zip(df[0], length)) + self.shape = (length, columns) + return rows + + def get_column(self, + df, + value): + index = df[0].index(value) + values = [] + for row in df[1:]: + values.append(row[index]) + return values class GrafanaRequest: @@ -35,6 +64,8 @@ class GrafanaRequest: self.grafanajson_url = "http://%s:%s" % (_grafanajson_host, grafanajson_port) self.data = dict() self.data['overwrite'] = _overwrite + self.csvreader = CSVReader() + self.units = dict() def create_bucket(self, bucket_name=None): @@ -45,7 +76,7 @@ class GrafanaRequest: def list_dashboards(self): url = self.grafanajson_url + '/api/search' print(url) - return json.loads(requests.get(url,headers=self.headers).text) + return json.loads(requests.get(url, headers=self.headers).text) def create_dashboard(self, dashboard_name=None, @@ -77,32 +108,277 @@ class GrafanaRequest: datastore['dashboard'] = dashboard datastore['overwrite'] = False data = json.dumps(datastore, indent=4) - #return print(data) return requests.post(grafanajson_url, headers=self.headers, data=data, verify=False) def create_dashboard_from_dict(self, - dictionary=None): + dictionary=None, + overwrite=False): grafanajson_url = self.grafanajson_url + '/api/dashboards/db' datastore = dict() dashboard = dict(json.loads(dictionary)) datastore['dashboard'] = dashboard - datastore['overwrite'] = False + datastore['overwrite'] = overwrite data = json.dumps(datastore, indent=4) - #return print(data) return requests.post(grafanajson_url, headers=self.headers, data=data, verify=False) + def get_graph_groups(self, target_csvs): # Get the unique values in the Graph-Group column + dictionary = dict() + for target_csv in target_csvs: + if len(target_csv) > 1: + csv = self.csvreader.read_csv(target_csv) + # Unique values in the test-id column + scripts = list(set(self.csvreader.get_column(csv, 'test-id'))) + # we need to make sure we match each Graph Group to the script it occurs in + for script in scripts: + # Unique Graph Groups for each script + graph_groups = self.csvreader.get_column(csv, 'Graph-Group') + dictionary[script] = list(set(graph_groups)) + units = self.csvreader.get_column(csv, 'Units') + self.units[script] = dict() + for index in range(0, len(graph_groups)): + self.units[script][graph_groups[index]] = units[index] + print(dictionary) + return dictionary + + def maketargets(self, + bucket, + scriptname, + groupBy, + index, + graph_group, + testbed): + query = ( + 'from(bucket: "%s")\n ' + '|> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n ' + '|> filter(fn: (r) => r["script"] == "%s")\n ' + '|> group(columns: ["_measurement"])\n ' + % (bucket, scriptname)) + queryend = ('|> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n ' + '|> yield(name: "mean")\n ') + if graph_group is not None: + graphgroup = ('|> filter(fn: (r) => r["Graph-Group"] == "%s")\n' % graph_group) + query += graphgroup + if testbed is not None: + query += ('|> filter(fn: (r) => r["testbed"] == "%s")\n' % testbed) + targets = dict() + targets['delimiter'] = ',' + targets['groupBy'] = groupBy + targets['header'] = True + targets['ignoreUnknown'] = False + targets['orderByTime'] = 'ASC' + targets['policy'] = 'default' + targets['query'] = query + queryend + targets['refId'] = dict(enumerate(string.ascii_uppercase, 1))[index + 1] + targets['resultFormat'] = "time_series" + targets['schema'] = list() + targets['skipRows'] = 0 + targets['tags'] = list() + return targets + + def groupby(self, params, grouptype): + dic = dict() + dic['params'] = list() + dic['params'].append(params) + dic['type'] = grouptype + return dic def create_custom_dashboard(self, - datastore=None): - data = json.dumps(datastore, indent=4) - return requests.post(self.grafanajson_url, headers=self.headers, data=data, verify=False) + scripts=None, + title=None, + bucket=None, + graph_groups=None, + graph_groups_file=None, + target_csvs=None, + testbed=None, + datasource='InfluxDB', + from_date='now-1y', + to_date='now', + graph_height=8, + graph__width=12, + pass_fail=None): + options = string.ascii_lowercase + string.ascii_uppercase + string.digits + uid = ''.join(random.choice(options) for i in range(9)) + input1 = dict() + annotations = dict() + annotations['builtIn'] = 1 + annotations['datasource'] = '-- Grafana --' + annotations['enable'] = True + annotations['hide'] = True + annotations['iconColor'] = 'rgba(0, 211, 255, 1)' + annotations['name'] = 'Annotations & Alerts' + annotations['type'] = 'dashboard' + annot = dict() + annot['list'] = list() + annot['list'].append(annotations) + + templating = dict() + templating['list'] = list() + + timedict = dict() + timedict['from'] = from_date + timedict['to'] = to_date + + panels = list() + index = 1 + if graph_groups_file: + print("graph_groups_file: %s" % graph_groups_file) + target_csvs = open(graph_groups_file).read().split('\n') + graph_groups = self.get_graph_groups( + target_csvs) # Get the list of graph groups which are in the tests we ran + if target_csvs: + print('Target CSVs: %s' % target_csvs) + graph_groups = self.get_graph_groups( + target_csvs) # Get the list of graph groups which are in the tests we ran + if pass_fail is not None: + graph_groups[pass_fail] = ['PASS', 'FAIL'] + + for scriptname in graph_groups.keys(): + print(scriptname) + for graph_group in graph_groups[scriptname]: + panel = dict() + + gridpos = dict() + gridpos['h'] = graph_height + gridpos['w'] = graph__width + gridpos['x'] = 0 + gridpos['y'] = 0 + + legend = dict() + legend['avg'] = False + legend['current'] = False + legend['max'] = False + legend['min'] = False + legend['show'] = True + legend['total'] = False + legend['values'] = False + + options = dict() + options['alertThreshold'] = True + + groupBy = list() + groupBy.append(self.groupby('$__interval', 'time')) + groupBy.append(self.groupby('null', 'fill')) + + targets = list() + counter = 0 + new_target = self.maketargets(bucket, scriptname, groupBy, counter, graph_group, testbed) + targets.append(new_target) + + fieldConfig = dict() + fieldConfig['defaults'] = dict() + fieldConfig['overrides'] = list() + + transformation = dict() + transformation['id'] = "renameByRegex" + transformation_options = dict() + transformation_options['regex'] = "(.*) value.*" + transformation_options['renamePattern'] = "$1" + transformation['options'] = transformation_options + + xaxis = dict() + xaxis['buckets'] = None + xaxis['mode'] = "time" + xaxis['name'] = None + xaxis['show'] = True + xaxis['values'] = list() + + yaxis = dict() + yaxis['format'] = 'short' + try: + yaxis['label'] = self.units[scriptname][graph_group] + except: + pass + yaxis['logBase'] = 1 + yaxis['max'] = None + yaxis['min'] = None + yaxis['show'] = True + + yaxis1 = dict() + yaxis1['align'] = False + yaxis1['alignLevel'] = None + + panel['aliasColors'] = dict() + panel['bars'] = False + panel['dashes'] = False + panel['dashLength'] = 10 + panel['datasource'] = datasource + panel['fieldConfig'] = fieldConfig + panel['fill'] = 0 + panel['fillGradient'] = 0 + panel['gridPos'] = gridpos + panel['hiddenSeries'] = False + panel['id'] = index + panel['legend'] = legend + panel['lines'] = True + panel['linewidth'] = 1 + panel['nullPointMode'] = 'null' + panel['options'] = options + panel['percentage'] = False + panel['pluginVersion'] = '7.5.4' + panel['pointradius'] = 2 + panel['points'] = True + panel['renderer'] = 'flot' + panel['seriesOverrides'] = list() + panel['spaceLength'] = 10 + panel['stack'] = False + panel['steppedLine'] = False + panel['targets'] = targets + panel['thresholds'] = list() + panel['timeFrom'] = None + panel['timeRegions'] = list() + panel['timeShift'] = None + if graph_group is not None: + panel['title'] = scriptname + ' ' + graph_group + else: + panel['title'] = scriptname + if 'PASS' in panel['title']: + panel['title'] = 'Total Passed' + if 'FAIL' in panel['title']: + panel['title'] = 'Total Failed' + panel['transformations'] = list() + panel['transformations'].append(transformation) + panel['type'] = "graph" + panel['xaxis'] = xaxis + panel['yaxes'] = list() + panel['yaxes'].append(yaxis) + panel['yaxes'].append(yaxis) + panel['yaxis'] = yaxis1 + + panels.append(panel) + index = index + 1 + input1['annotations'] = annot + input1['editable'] = True + input1['gnetId'] = None + input1['graphTooltip'] = 0 + input1['links'] = list() + input1['panels'] = panels + input1['refresh'] = False + input1['schemaVersion'] = 27 + input1['style'] = 'dark' + input1['tags'] = list() + input1['templating'] = templating + input1['time'] = timedict + input1['timepicker'] = dict() + input1['timezone'] = '' + input1['title'] = ("Testbed: %s" % title) + input1['uid'] = uid + input1['version'] = 11 + return self.create_dashboard_from_dict(dictionary=json.dumps(input1)) + + # def create_custom_dashboard(self, + # datastore=None): + # data = json.dumps(datastore, indent=4) + # return requests.post(self.grafanajson_url, headers=self.headers, data=data, verify=False) def create_snapshot(self, title): + print('create snapshot') grafanajson_url = self.grafanajson_url + '/api/snapshots' - data=self.get_dashboard(title) - data['expires'] = 3600 - data['external'] = True - print(data) + data = self.get_dashboard(title) + data['expires'] = 360000 + data['external'] = False + data['timeout'] = 15 + if self.debug: + print(data) return requests.post(grafanajson_url, headers=self.headers, json=data, verify=False).text def list_snapshots(self): @@ -112,9 +388,21 @@ class GrafanaRequest: def get_dashboard(self, target): dashboards = self.list_dashboards() + print(target) for dashboard in dashboards: if dashboard['title'] == target: uid = dashboard['uid'] grafanajson_url = self.grafanajson_url + '/api/dashboards/uid/' + uid print(grafanajson_url) - return json.loads(requests.get(grafanajson_url, headers=self.headers, verify=False).text) \ No newline at end of file + return json.loads(requests.get(grafanajson_url, headers=self.headers, verify=False).text) + + def get_units(self, csv): + df = self.csvreader.read_csv(csv) + units = self.csvreader.get_column(df, 'Units') + test_id = self.csvreader.get_column(df, 'test-id') + maxunit = max(set(units), key=units.count) + maxtest = max(set(test_id), key=test_id.count) + d = dict() + d[maxunit] = maxtest + print(maxunit, maxtest) + return d diff --git a/py-json/LANforge/LFUtils.py b/py-json/LANforge/LFUtils.py index 464c1e21..7005425e 100644 --- a/py-json/LANforge/LFUtils.py +++ b/py-json/LANforge/LFUtils.py @@ -746,4 +746,58 @@ def exec_wrap(cmd): print("\nError with '" + cmd + "', bye\n") exit(1) + +def expand_endp_histogram(distribution_payload=None): + """ + Layer 3 endpoints can contain DistributionPayloads that appear like + "rx-silence-5m" : { + # "histo_category_width" : 1, + # "histogram" : [ + # 221, + # 113, + # 266, + # 615, + # 16309, + # 56853, + # 7954, + # 1894, + # 29246, + # 118, + # 12, + # 2, + # 0, + # 0, + # 0, + # 0 + # ], + # "time window ms" : 300000, + # "window avg" : 210.285, + # "window max" : 228, + # "window min" : 193 + + These histogbrams are a set of linear categorys roughly power-of-two categories. + :param distribution_payload: dictionary requiring histo_category_width and histogram + :return: dictionary containing expanded category ranges and values for categories + """ + if distribution_payload is None: + return None + if ("histogram" not in distribution_payload) \ + or ("histo_category_width" not in distribution_payload): + raise ValueError("Unexpected histogram format.") + multiplier = int(distribution_payload["histo_category_width"]) + formatted_dict = { + #"00000 <= x <= 00001" : "0" + } + for bucket_index in range(len(distribution_payload["histogram"]) - 1): + pow1 = (2**bucket_index) * multiplier + pow2 = (2**(bucket_index+1)) * multiplier + if bucket_index == 0: + category_name = "00000 <= x <= {:-05.0f}".format(pow2) + else: + category_name = "{:-05.0f} < x <= {:-05.0f}".format(pow1, pow2) + formatted_dict[category_name] = distribution_payload["histogram"][bucket_index] + + pprint.pprint([("historgram", distribution_payload["histogram"]), + ("formatted", formatted_dict)]) + return formatted_dict ### diff --git a/py-json/create_wanlink.py b/py-json/create_wanlink.py index 94a16ef5..fe9839df 100755 --- a/py-json/create_wanlink.py +++ b/py-json/create_wanlink.py @@ -1,17 +1,12 @@ #!/usr/bin/python3 - # Create and modify WAN Links Using LANforge JSON AP : http://www.candelatech.com/cookbook.php?vol=cli&book=JSON:+Managing+WANlinks+using+JSON+and+Python - # Written by Candela Technologies Inc. -# Updated by: - +# Updated by: Erin Grimes import sys import urllib - if sys.version_info[0] != 3: print("This script requires Python 3") exit() - import time from time import sleep from urllib import error @@ -22,19 +17,22 @@ from LANforge import LFUtils from LANforge.LFUtils import NA j_printer = pprint.PrettyPrinter(indent=2) -# typically you're using resource 1 in stand alone realm +# todo: this needs to change resource_id = 1 -def main(base_url="http://localhost:8080"): +def main(base_url="http://localhost:8080", args={}): json_post = "" json_response = "" num_wanlinks = -1 + # see if there are old wanlinks to remove lf_r = LFRequest.LFRequest(base_url+"/wl/list") print(lf_r.get_as_json()) + # ports to set as endpoints port_a ="rd0a" port_b ="rd1a" + try: json_response = lf_r.getAsJson() LFUtils.debug_printer.pprint(json_response) @@ -71,13 +69,12 @@ def main(base_url="http://localhost:8080"): # create wanlink 1a lf_r = LFRequest.LFRequest(base_url+"/cli-json/add_wl_endp") lf_r.addPostData({ - 'alias': 'wl_eg1-A', - 'shelf': 1, - 'resource': '1', - 'port': port_a, - 'latency': '75', - 'max_rate': '128000', - 'description': 'cookbook-example' + 'alias': 'wl_eg1-A', + 'shelf': 1, + 'resource': '1', + 'port': port_a, + 'latency': args['latency_A'], + 'max_rate': args['rate_A'] }) lf_r.jsonPost() sleep(0.05) @@ -85,13 +82,12 @@ def main(base_url="http://localhost:8080"): # create wanlink 1b lf_r = LFRequest.LFRequest(base_url+"/cli-json/add_wl_endp") lf_r.addPostData({ - 'alias': 'wl_eg1-B', - 'shelf': 1, - 'resource': '1', - 'port': port_b, - 'latency': '95', - 'max_rate': '256000', - 'description': 'cookbook-example' + 'alias': 'wl_eg1-B', + 'shelf': 1, + 'resource': '1', + 'port': port_b, + 'latency': args['latency_B'], + 'max_rate': args['rate_B'] }) lf_r.jsonPost() sleep(0.05) @@ -134,6 +130,7 @@ def main(base_url="http://localhost:8080"): continue print("starting wanlink:") + # print("the latency is {laten}".format(laten=latency)) lf_r = LFRequest.LFRequest(base_url+"/cli-json/set_cx_state") lf_r.addPostData({ 'test_mgr': 'all', @@ -163,25 +160,7 @@ def main(base_url="http://localhost:8080"): print("Error code "+error.code) continue - print("Wanlink is running, wait one sec...") - sleep(1) - # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - # Now we can alter the delay and speed of the wanlink by - # updating its endpoints see https://www.candelatech.com/lfcli_ug.php#set_wanlink_info - # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - print("Updating Wanlink...") - lf_r = LFRequest.LFRequest(base_url+"/cli-json/set_wanlink_info") - lf_r.addPostData({ - 'name': 'wl_eg1-A', - 'speed': 265333, - 'latency': 30, - 'reorder_freq': 3200, # thats 3200/1000000 - 'drop_freq': 2000, # 2000/1000000 - 'dup_freq': 1325, # 1325/1000000 - 'jitter_freq': 25125, # 25125/1000000 - }) - lf_r.jsonPost() - sleep(1) + print("Wanlink is running") # stop wanlink lf_r = LFRequest.LFRequest(base_url+"/cli-json/set_cx_state") @@ -214,22 +193,19 @@ def main(base_url="http://localhost:8080"): print("Wanlink is stopped.") - print("Wanlink info:") - lf_r = LFRequest.LFRequest(base_url+"/wl/wl_eg1") - json_response = lf_r.getAsJson() - LFUtils.debug_printer.pprint(json_response) + # print("Wanlink info:") + # lf_r = LFRequest.LFRequest(base_url+"/wl/wl_eg1") + # json_response = lf_r.getAsJson() + # LFUtils.debug_printer.pprint(json_response) - lf_r = LFRequest.LFRequest(base_url+"/wl_ep/wl_eg1-A") - json_response = lf_r.getAsJson() - LFUtils.debug_printer.pprint(json_response) + # lf_r = LFRequest.LFRequest(base_url+"/wl_ep/wl_eg1-A") + # json_response = lf_r.getAsJson() + # LFUtils.debug_printer.pprint(json_response) - lf_r = LFRequest.LFRequest(base_url+"/wl_ep/wl_eg1-B") - json_response = lf_r.getAsJson() - LFUtils.debug_printer.pprint(json_response) + # lf_r = LFRequest.LFRequest(base_url+"/wl_ep/wl_eg1-B") + # json_response = lf_r.getAsJson() + # LFUtils.debug_printer.pprint(json_response) # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - if __name__ == '__main__': main() - -### -### \ No newline at end of file diff --git a/py-json/cv_test_manager.py b/py-json/cv_test_manager.py index f681d5a5..0354839e 100644 --- a/py-json/cv_test_manager.py +++ b/py-json/cv_test_manager.py @@ -5,13 +5,12 @@ Note: This script is working as library for chamberview tests. import time -from LANforge.lfcli_base import LFCliBase from realm import Realm import json from pprint import pprint -import argparse from cv_test_reports import lanforge_reports as lf_rpt from csv_to_influx import * +import os.path def cv_base_adjust_parser(args): @@ -67,12 +66,14 @@ class cv_test(Realm): def __init__(self, lfclient_host="localhost", lfclient_port=8080, - report_dir="" + lf_report_dir="", + debug=False ): super().__init__(lfclient_host=lfclient_host, lfclient_port=lfclient_port) - self.report_dir = report_dir + self.lf_report_dir = lf_report_dir self.report_name = None + self.debug = debug # Add a config line to a text blob. Will create new text blob # if none exists already. @@ -127,7 +128,7 @@ class cv_test(Realm): "cmd": command } debug_par = "" - rsp = self.json_post("/gui-json/cmd%s" % debug_par, data, debug_=False, response_json_list_=response_json) + rsp = self.json_post("/gui-json/cmd%s" % debug_par, data, debug_=self.debug, response_json_list_=response_json) try: if response_json[0]["LAST"]["warnings"].startswith("Unknown"): print("Unknown command?\n"); @@ -286,7 +287,7 @@ class cv_test(Realm): # cv_cmds: Array of raw chamber-view commands, such as "cv click 'button-name'" # These (and the sets) are applied after the test is created and before it is started. def create_and_run_test(self, load_old_cfg, test_name, instance_name, config_name, sets, - pull_report, lf_host, lf_user, lf_password, cv_cmds, local_path="", ssh_port=22, + pull_report, lf_host, lf_user, lf_password, cv_cmds, local_lf_report_dir="", ssh_port=22, graph_groups_file=None): load_old = "false" if load_old_cfg: @@ -349,12 +350,12 @@ class cv_test(Realm): filelocation.write(location + '/kpi.csv\n') filelocation.close() print(location) - self.report_dir = location + self.lf_report_dir = location if pull_report: try: print(lf_host) report.pull_reports(hostname=lf_host, username=lf_user, password=lf_password, - port=ssh_port, local_path=local_path, + port=ssh_port, report_dir=local_lf_report_dir, report_location=location) except Exception as e: print("SCP failed, user %s, password %s, dest %s", (lf_user, lf_password, lf_host)) @@ -385,7 +386,7 @@ class cv_test(Realm): # Takes cmd-line args struct or something that looks like it. # See csv_to_influx.py::influx_add_parser_args for options, or --help. def check_influx_kpi(self, args): - if self.report_dir == "": + if self.lf_report_dir == "": # Nothing to report on. print("Not submitting to influx, no report-dir.\n") return @@ -399,16 +400,21 @@ class cv_test(Realm): (args.influx_host, args.influx_port, args.influx_org, args.influx_token, args.influx_bucket)) # lfjson_host would be if we are reading out of LANforge or some other REST # source, which we are not. So dummy those out. - influxdb = RecordInflux(_lfjson_host="", - _lfjson_port="", - _influx_host=args.influx_host, + influxdb = RecordInflux(_influx_host=args.influx_host, _influx_port=args.influx_port, _influx_org=args.influx_org, _influx_token=args.influx_token, _influx_bucket=args.influx_bucket) - path = "%s/kpi.csv" % (self.report_dir) - + # lf_wifi_capacity_test.py may be run / initiated by a remote system against a lanforge + # the local_lf_report_dir is data is stored, if there is no local_lf_report_dir then the test is run directly on lanforge + if self.local_lf_report_dir == "": + path = "%s/kpi.csv" % (self.lf_report_dir) + else: + kpi_location = self.local_lf_report_dir + "/" + os.path.basename(self.lf_report_dir) + # the local_lf_report_dir is the parent directory, need to get the directory name + path = "%s/kpi.csv" % (kpi_location) + print("Attempt to submit kpi: ", path) csvtoinflux = CSVtoInflux(influxdb=influxdb, target_csv=path, diff --git a/py-json/cv_test_reports.py b/py-json/cv_test_reports.py index e385057d..325bb9d9 100644 --- a/py-json/cv_test_reports.py +++ b/py-json/cv_test_reports.py @@ -5,13 +5,13 @@ class lanforge_reports: def pull_reports(self, hostname="localhost", port=22, username="lanforge", password="lanforge", report_location="/home/lanforge/html-reports/", - local_path="../../../reports/"): + report_dir="../../../reports/"): ssh = paramiko.SSHClient() ssh.load_system_host_keys() ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) - ssh.connect(hostname=hostname, username=username, password=password, port=port) + ssh.connect(hostname=hostname, username=username, password=password, port=port, allow_agent=False, look_for_keys=False) with SCPClient(ssh.get_transport()) as scp: - scp.get(remote_path=report_location, local_path=local_path, recursive=True) + scp.get(remote_path=report_location, local_path=report_dir, recursive=True) scp.close() diff --git a/py-json/gen_cxprofile.py b/py-json/gen_cxprofile.py index 811eccd6..0bea33a6 100644 --- a/py-json/gen_cxprofile.py +++ b/py-json/gen_cxprofile.py @@ -152,8 +152,8 @@ class GenCXProfile(LFCliBase): count = 40 for i in range(0, count): port_info = self.local_realm.name_to_eid(sta_port) - resource = port_info[0] - shelf = port_info[1] + resource = port_info[1] + shelf = port_info[0] name = port_info[2] gen_name_a = "%s-%s" % (self.name_prefix, name) + "_" + str(i) + add @@ -167,8 +167,8 @@ class GenCXProfile(LFCliBase): for i in range(0, 5): port_info = self.local_realm.name_to_eid(port_name) try: - resource = port_info[0] - shelf = port_info[1] + resource = port_info[1] + shelf = port_info[0] name = port_info[2] except: raise ValueError("Unexpected name for port_name %s" % port_name) @@ -279,8 +279,8 @@ class GenCXProfile(LFCliBase): endp_tpls = [] for port_name in ports: port_info = self.local_realm.name_to_eid(port_name) - resource = port_info[0] - shelf = port_info[1] + resource = port_info[1] + shelf = port_info[0] name = port_info[2] # this naming convention follows what you see when you use diff --git a/py-json/l4_cxprofile.py b/py-json/l4_cxprofile.py index 0fccce11..87e327ca 100644 --- a/py-json/l4_cxprofile.py +++ b/py-json/l4_cxprofile.py @@ -21,6 +21,7 @@ class L4CXProfile(LFCliBase): self.local_realm = local_realm self.created_cx = {} self.created_endp = [] + self.test_type = "urls" self.lfclient_port = lfclient_port self.lfclient_host = lfclient_host @@ -69,6 +70,34 @@ class L4CXProfile(LFCliBase): print(".", end='') print("") + def compare_vals(self, old_list, new_list): + passes = 0 + expected_passes = 0 + if len(old_list) == len(new_list): + for item, value in old_list.items(): + expected_passes += 1 + if new_list[item] > old_list[item]: + passes += 1 + if passes == expected_passes: + return True + else: + return False + else: + return False + + def get_bytes(self): + time.sleep(1) + cx_list = self.json_get("layer4/list?fields=name,%s" % self.test_type, debug_=self.debug) + # print("==============\n", cx_list, "\n==============") + cx_map = {} + for cx_name in cx_list['endpoint']: + if cx_name != 'uri' and cx_name != 'handler': + for item, value in cx_name.items(): + for value_name, value_rx in value.items(): + if item in self.created_cx.keys() and value_name == self.test_type: + cx_map[item] = value_rx + return cx_map + def check_request_rate(self): endp_list = self.json_get("layer4/list?fields=urls/s") expected_passes = 0 @@ -83,12 +112,11 @@ class L4CXProfile(LFCliBase): if name in self.created_cx.keys(): expected_passes += 1 if info['urls/s'] * self.requests_per_ten >= self.target_requests_per_ten * .9: - print(name, info['urls/s'], info['urls/s'] * self.requests_per_ten, self.target_requests_per_ten * .9) + # print(name, info['urls/s'], info['urls/s'] * self.requests_per_ten, self.target_requests_per_ten * .9) passes += 1 return passes == expected_passes - def cleanup(self): print("Cleaning up cxs and endpoints") if len(self.created_cx) != 0: @@ -110,7 +138,7 @@ class L4CXProfile(LFCliBase): def create(self, ports=[], sleep_time=.5, debug_=False, suppress_related_commands_=None): cx_post_data = [] for port_name in ports: - print("port_name: {} len: {} self.local_realm.name_to_eid(port_name): {}".format(port_name,len(self.local_realm.name_to_eid(port_name)),self.local_realm.name_to_eid(port_name),)) + print("port_name: {} len: {} self.local_realm.name_to_eid(port_name): {}".format(port_name, len(self.local_realm.name_to_eid(port_name)), self.local_realm.name_to_eid(port_name))) shelf = self.local_realm.name_to_eid(port_name)[0] resource = self.local_realm.name_to_eid(port_name)[1] name = self.local_realm.name_to_eid(port_name)[2] @@ -186,7 +214,6 @@ class L4CXProfile(LFCliBase): print(header_row) # Step 2 - Monitor columns - start_time = datetime.datetime.now() end_time = start_time + datetime.timedelta(seconds=duration_sec) sleep_interval = round(duration_sec // 5) @@ -198,6 +225,9 @@ class L4CXProfile(LFCliBase): passes = 0 expected_passes = 0 timestamps = [] + if self.test_type != 'urls': + old_rx_values = self.get_bytes() + for test in range(1+iterations): while datetime.datetime.now() < end_time: if col_names is None: @@ -219,16 +249,27 @@ class L4CXProfile(LFCliBase): timestamps.append(t) value_map[t] = response expected_passes += 1 - if self.check_errors(debug): - if self.check_request_rate(): + if self.test_type == 'urls': + if self.check_errors(self.debug): + if self.check_request_rate(): + passes += 1 + else: + self._fail("FAIL: Request rate did not exceed target rate") + break + else: + self._fail("FAIL: Errors found getting to %s " % self.url) + break + + else: + new_rx_values = self.get_bytes() + if self.compare_vals(old_rx_values, new_rx_values): passes += 1 else: - self._fail("FAIL: Request rate did not exceed 90% target rate") - self.exit_fail() - else: - self._fail("FAIL: Errors found getting to %s " % self.url) - self.exit_fail() + self._fail("FAIL: Not all stations increased traffic") + + # self.exit_fail() time.sleep(monitor_interval) + print(value_map) #[further] post-processing data, after test completion diff --git a/py-json/lf_attenmod.py b/py-json/lf_attenmod.py index ce1a33b3..33b7f2b3 100644 --- a/py-json/lf_attenmod.py +++ b/py-json/lf_attenmod.py @@ -57,7 +57,7 @@ class ATTENUATORProfile(LFCliBase): def create(self, debug=False): if len(self.atten_serno) == 0 or len(self.atten_idx) == 0 or len(self.atten_val) == 0: print("ERROR: Must specify atten_serno, atten_idx, and atten_val when setting attenuator.\n") - print("Creating Attenuator...") + print("Setting Attenuator...") self.set_command_param("set_attenuator", "serno", self.atten_serno) self.set_command_param("set_attenuator", "atten_idx", self.atten_idx) self.set_command_param("set_attenuator", "val", self.atten_val) diff --git a/py-json/station_profile.py b/py-json/station_profile.py index 6561e10f..74c649b1 100644 --- a/py-json/station_profile.py +++ b/py-json/station_profile.py @@ -193,6 +193,10 @@ class StationProfile: self.set_command_param("add_sta", "ieee80211w", 2) # self.add_sta_data["key"] = passwd + def station_mode_to_number(self,mode): + modes = ['a', 'b', 'g', 'abg', 'an', 'abgn', 'bgn', 'bg', 'abgn-AC', 'bgn-AC', 'an-AC'] + return modes.index(mode) + 1 + def add_security_extra(self, security): types = {"wep": "wep_enable", "wpa": "wpa_enable", "wpa2": "wpa2_enable", "wpa3": "use-wpa3", "open": "[BLANK]"} if self.desired_add_sta_flags.__contains__(types[security]) and \ diff --git a/py-json/test_histogram.py b/py-json/test_histogram.py new file mode 100755 index 00000000..b6bf3cd0 --- /dev/null +++ b/py-json/test_histogram.py @@ -0,0 +1,38 @@ +#!/usr/bin/env python3 +""" ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- + internal test driving LFUtils.expand_endp_histogram +----- ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- """ +import LANforge +from LANforge import LFUtils +import pprint + +distrib_load = { + "histo_category_width" : 3, + "histogram" : [ + 221, + 113, + 266, + 615, + 16309, + 56853, + 7954, + 1894, + 29246, + 118, + 12, + 2, + 0, + 0, + 0, + 0 + ], + "time window ms" : 300000, + "window avg" : 210.285, + "window max" : 228, + "window min" : 193 +} + +if __name__ == '__main__': + LFUtils.expand_endp_histogram(distrib_load) + + diff --git a/py-scripts/artifacts/candela_swirl_small-72h.png b/py-scripts/artifacts/candela_swirl_small-72h.png new file mode 100644 index 00000000..e288f8c9 Binary files /dev/null and b/py-scripts/artifacts/candela_swirl_small-72h.png differ diff --git a/py-scripts/artifacts/report.css b/py-scripts/artifacts/report.css new file mode 100644 index 00000000..420a5265 --- /dev/null +++ b/py-scripts/artifacts/report.css @@ -0,0 +1,306 @@ +html, body,div { + margin: 0; + padding:0; + font-size: 14px; +} +h1,h2,h3,h4 { + padding: 0em; + line-height: 1.5; + text-align: left; + color: rgb(42,91,41); +} +@font-face { + font-family: CenturyGothic; + src: url("CenturyGothic.woff"), + url("images/CenturyGothic.woff"), + url("/images/CenturyGothic.woff"), + url("http://www.candelatech.com/images/CenturyGothic.woff"); +} +body,h1,h2,h3,h4 { + font-family: CenturyGothic, "Century Gothic", Arial, Helvetica, sans-serif; +} +h1 { font-size: 30px;} +h2 { font-size: 24px;} +h3 { font-size: 18px;} +h4 { font-size: 14px;} +li,pre,tt { + text-align: left; +} +pre { + font-size: 10px; +} +table { + border-collapse: collapse; + background: #e0e0e0; +} +table, td, th { + border: 1px solid gray; + padding 4px; +} +table.noborder, table.noborder td, table.noborder th { + border: 0 none; +} +td { + background: white; +} +td.ar { + text-align: right; +} +th { + color: rgb(42,91,41); + text-align: center; +} +#lf_title { + text-align: center; + background-image: url(candela_swirl_small-72h.png); + background-position: right; + background-repeat: no-repeat; + height: 90px; +} +#new_chart { + display: block; + height: 250px; + min-width: 200px; + width: 80%; + border: 1px solid black; + margin: 14px auto; + padding: 14px; + vertical-align: bottom; + text-align: center; +} +.lf_chart { + margin: 1em; + padding: 5px; +} +#error_types ul { + background: #f0f0f0; + font-size: 12px; + line-height: 1.5; + margin: 1em; + padding: 0.25em inherit 0.25em inherit; + max-height: 8em; + overflow: auto; +} +li { + line-height: 1.5; +} +.contentDiv { + min-width: 800px; + max-width: 8in; + margin: 1em auto; + padding: 0; +} +.ct-point { + stroke-width: 6px;} + +.o_el { + display: inline-block; + width: 100px; + height: 230px; + border: none; + margin: 1px 1px 16px 1px; + padding: 10px 10px 0 10px; + background: #eee; + text-align: center; + vertical-align: bottom; +} +.bar_el { + display: block; + background: green; + border: none; + min-height: 1px; + + margin: 0 0 5px 0; + padding: 0; + text-align: center; +} +.label_el { + color: black; + display: block; + font-size: 14px; + font-family: Arial,Helvetica,sans-serif,mono; + margin: 1px; + text-align: center; + vertical-align: bottom; + width: inherit; +} +.value_el { + font-family: Arial,Helvetica,sans-serif,mono; + color: black; + display: block; + font-size: 14px; + margin: 0 auto; + padding: none; + border: none; + background: white; + text-align: center; + vertical-align: bottom; + width: auto; +} +.value_el>span { + background: #f0f0f0a0; + border: 1px solid #f0f0f0a0; + border-radius: 5px; + padding: 1px; + min-width: 2em; +} +.error { + color: red; +} + +@media only screen { +.hideFromPrint { } +.hideFromScreen { display:none; } +} +@media only print { +.hideFromScreen { } +.hideFromPrint { display:none; } +} + +/* these styles will get overridden by custom.css */ +#BannerBack { + background-color: #e68b15; + height: 205px; + max-height: 205px; + border: 0 none; + margin: 0; + padding: 0; + top: 0; + left: 0; + width: 100%; +} +#Banner { + background-image:url("banner.png"); + background-repeat:no-repeat; + padding: 0; + margin: 0 auto; + min-width: 1000px; + min-height: 205px; + width: 1000px; + height: 205px; + max-width: 1000px; + max-height: 205px; +} +#BannerLogo { + text-align: right; + padding: 25px; + margin: 5px; + width: 200px; + border: none; +} +#BannerLogoFooter { + text-align: right; + padding: 1px; + margin: 1px; + width: 200px; + border: none; +} +.TitleFontScreen { + margin-left: auto; + margin-right: auto; + margin-top: 1em; + margin-bottom: 0.2em; + font-size: 50px; + padding-top: 1em; +} + +.TitleFontPrint { + line-height: 1; + margin-left: 0px; + margin-right: auto; + margin-top: 0.5em; + margin-bottom: 0.2em; + padding-top: 20px; + padding-left: 20px; + color: darkgreen; +} + +.TitleFontPrintSub { + line-height: 1; + margin-left: 0px; + margin-right: auto; + margin-top: 0; + margin-bottom: 0; + /*font-size: 20px; Let 'h3', etc control this */ + padding-top: 0px; + padding-left: 20px; +} + +.HeaderFont {} +.TableFont {} +.TableBorder {} +.ImgStyle {} +div.Section h1, div.Section h2 { + margin: 0 0 0 0em; +} +div.HeaderStyle h1, div.HeaderStyle h2 { + text-align: left; + margin: 0 0 0 0; + max-width: 8in; + min-width: 800px; +} +div.Section { + padding 5px; + position: relative; +} +div.Section img { + margin: 0; + padding: 0; + position: relative; + top: 50%; + transform: translateY(-50%); +} +div.FooterStyle { + width: 100%; + vertical-align: middle; + border: 0 none; + border-top: 2px solid #2A5B29; + color: #2A5B29; + font-size: 12px; + margin-top: 2em; +} +div.FooterStyle img { + width: auto; + height: auto; + text-align: right; +} +div.FooterStyle span.Gradient { + background: white; + color: #2A5B29; + display: inline-block; + height: 30px; + line-height: 1; + padding-top: 22px; + padding-bottom: 20px; + padding-left: 2em; + vertical-align: middle; + max-width:80%; + float:left; + width:50%; +} +.FooterStyle a, .FooterStyle a:visited { + color: #2A5B29; + font-size: 12px; + line-height: 1; + height: 30px; + margin: 0; + padding: 0; + vertical-align: middle; +} +div.FooterStyle a.LogoImgLink { + display: inline-block; + text-align: right; + float: right; +} +a .LogoImgLink { +} +a.LogoImgLink img { +} + +table.dataframe { + margin: 1em; + padding: 0; +} +table.dataframe tr th { + padding: 0.5em; +} \ No newline at end of file diff --git a/py-scripts/cicd_TipIntegration.py b/py-scripts/cicd_TipIntegration.py index 02651f2c..b246d522 100755 --- a/py-scripts/cicd_TipIntegration.py +++ b/py-scripts/cicd_TipIntegration.py @@ -1,543 +1,543 @@ - -import base64 -import urllib.request -from bs4 import BeautifulSoup -import ssl -import subprocess, os -from artifactory import ArtifactoryPath -import tarfile -import paramiko -from paramiko import SSHClient -from scp import SCPClient -import os -import pexpect -from pexpect import pxssh -import sys -import paramiko -from scp import SCPClient -import pprint -from pprint import pprint -from os import listdir -import re - -# For finding files -# https://stackoverflow.com/questions/3207219/how-do-i-list-all-files-of-a-directory -import glob -external_results_dir=/var/tmp/lanforge - -local_dir=os.getenv('LOG_DIR') -print("Local Directory where all files will be copied and logged", local_dir) -cicd_user=os.getenv('CICD_USER') -print("cicd_user = ", cicd_user) -cicd_pw=os.getenv('CICD_PW') -print("cicd pw =",cicd_pw) -ap_pw=os.getenv('AP_PW') -ap_user=os.getenv('AP_USER') -tr_user=os.getenv('TR_USER') -print("Testrail user id = ", tr_user) -tr_pw=os.getenv('TR_PW') -print ("Testrail password =", tr_pw) -aws_host='3.96.56.0' -aws_user='ubuntu' - - - - -if sys.version_info[0] != 3: - print("This script requires Python 3") - exit(1) -if 'py-json' not in sys.path: - sys.path.append('../py-json') - -from LANforge.LFUtils import * -# if you lack __init__.py in this directory you will not find sta_connect module# -import sta_connect -import testrail_api -from sta_connect import StaConnect -from testrail_api import APIClient - -client: APIClient = APIClient('https://telecominfraproject.testrail.com') -client.user = tr_user -client.password = tr_pw - - -print('Beginning file download with requests') - -class GetBuild: - def __init__(self): - self.user = cicd_user - self.password = cicd_pw - ssl._create_default_https_context = ssl._create_unverified_context - - def get_latest_image(self,url): - - auth = str( - base64.b64encode( - bytes('%s:%s' % (cicd_user,cicd_pw ), 'utf-8') - ), - 'ascii' - ).strip() - headers = {'Authorization': 'Basic ' + auth} - - ''' FIND THE LATEST FILE NAME''' - print(url) - req = urllib.request.Request(url, headers=headers) - response = urllib.request.urlopen(req) - html = response.read() - soup = BeautifulSoup(html, features="html.parser") - last_link = soup.find_all('a', href=True)[-1] - latest_file=last_link['href'] - - filepath = local_dir - os.chdir(filepath) - #file_url = url + latest_file - - ''' Download the binary file from Jfrog''' - path = ArtifactoryPath(url,auth=(cicd_user, cicd_pw)) - path.touch() - for file in path: - print('File =', file) - - path = ArtifactoryPath(file, auth=(cicd_user, cicd_pw)) - print("file to be downloaded :" ,latest_file) - print("File Path:",file) - with path.open() as des: - with open(latest_file, "wb") as out: - out.write(des.read()) - des.close() - print("Extract the tar.gz file and upgrade the AP ") - housing_tgz = tarfile.open(latest_file) - housing_tgz.extractall() - housing_tgz.close() - return "pass" - print("Extract the tar file, and copying the file to Linksys AP directory") - #with open("/Users/syamadevi/Desktop/syama/ea8300/ap_sysupgrade_output.log", "a") as output: - # subprocess.call("scp /Users/syamadevi/Desktop/syama/ea8300/openwrt-ipq40xx-generic-linksys_ea8300-squashfs-sysupgrade.bin root@192.100.1.1:/tmp/openwrt-ipq40xx-generic-linksys_ea8300-squashfs-sysupgrade.bin",shell=True, stdout=output, - # stderr=output) - - print('SSH to Linksys and upgrade the file') - - ''' - - ssh = SSHClient() - ssh.load_system_host_keys() - ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) - ssh.connect(hostname='192.100.1.1', - port='22', - username='root', - password='Dadun123$', - look_for_keys=False, - pkey='load_key_if_relevant') - - # SCPCLient takes a paramiko transport as its only argument - scp = SCPClient(ssh.get_transport()) - - scp.put('test.txt', 'testD.txt') - scp.close() - - - - # client = paramiko.SSHClient() - #client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) - #client.connect('192.100.1.1', username='syama', password='Dadun123$') - - stdin, stdout, stderr = ssh.exec_command('sysupgrade /tmp/openwrt-ipq40xx-generic-linksys_ea8300-squashfs-sysupgrade.bin') - - for line in stdout: - print (line.strip('\n')) - client.close() - ''' - - def run_opensyncgw_in_docker(self): - #user_password = 'fepv6nj9guCPeEHC' - #my_env = os.environ.copy() - #my_env["userpass"] = user_password - #my_command = 'python --version' - #subprocess.Popen('echo', env=my_env) - with open(local_dir +"docker_jfrog_login.log", "a") as output: - subprocess.call("docker login --username" + cicd_user + "--password" + cicd_pw + " https://tip-tip-wlan-cloud-docker-repo.jfrog.io", shell=True, stdout=output, - stderr=output) - with open(local_dir +"opensyncgw_upgrade.log", "a") as output: - subprocess.call("docker pull tip-tip-wlan-cloud-docker-repo.jfrog.io/opensync-gateway-and-mqtt:0.0.1-SNAPSHOT", shell=True, stdout=output, - stderr=output) - with open(local_dir+"opensyncgw.log", "a") as output: - subprocess.call("docker run --rm -i -p 1883:1883 -p 6640:6640 -p 6643:6643 -p 4043:4043 \ - -v ~/mosquitto/data:/mosquitto/data \ - -v ~/mosquitto/log:/mosquitto/log \ - -v ~/wlan-pki-cert-scripts:/opt/tip-wlan/certs \ - -v ~/app/log:/app/logs \ - -v ~//app/config:/app/config \ - -e OVSDB_CONFIG_FILE='/app/config/config_2_ssids.json' \ - tip-tip-wlan-cloud-docker-repo.jfrog.io/opensync-gateway-and-mqtt:0.0.1-SNAPSHOT",shell=True, stdout=output, - stderr=output) - print("opensync Gateway is running") - return "pass" - - def run_opensyncgw_in_aws(self): - try: - s = pxssh.pxssh() - - os.chdir(local_dir) - print("AWS OPENSYNC GW UPGRADE VIA HELM") - print( - 'Helm upgrades the latest image in the GW if a new image is found from jfrog and the AWS gateway is not upto date ') - # makesure the client key file is in the fame directory to login to AWS VM - s.login(aws_host, aws_user, ssh_key='id_key.pem') - s.sendline('kubectl get pods') - - # run a command - s.prompt() # match the prompt - print(s.before) # print everything before the prompt. - s.sendline( - 'helm upgrade tip-wlan wlan-cloud-helm/tip-wlan/ -n default -f wlan-cloud-helm/tip-wlan/resources/environments/dev-amazon.yaml') - s.prompt() # match the prompt - print(s.before) # print everything before the prompt. - s.sendline('kubectl get pods') - - # run a command - s.prompt() # match the prompt - print(s.before) # print everything before the prompt. - s.logout() - return "pass" - - except pxssh.ExceptionPxssh as e: - print("ALERT !!!!!! pxssh failed on login.") - print(e) - - -class openwrt_ap: - - def ap_upgrade(src,user2,host2,tgt,pwd,opts='', timeout=60): - ''' Performs the scp command. Transfers file(s) from local host to remote host ''' - print("AP Model getting upgarded is :", apModel) - if apModel == "ecw5410": - ap_firmware = 'openwrt-ipq806x-generic-edgecore_ecw5410-squashfs-nand-sysupgrade.bin' - AP_IP = '10.10.10.207' - else: - if apModel == "ea8300": - ap_firmware = 'openwrt-ipq40xx-generic-linksys_ea8300-squashfs-sysupgrade.bin' - AP_IP = '10.10.10.208' - host2 = AP_IP - src = src+ ap_firmware - print("src =", src) - print("AP IP ", AP_IP) - print("AP USER =", ap_user) - print("AP PASSWORD =", ap_pw) - cmd = f'''/bin/bash -c "scp {opts} {src} {user2}@{AP_IP}:{tgt}"''' - print("Executing the following cmd:",cmd,sep='\n') - - tmpFl = '/tmp/scp.log' - fp = open(tmpFl,'wb') - print(tmpFl) - childP = pexpect.spawn(cmd,timeout=timeout) - try: - childP.sendline(cmd) - childP.expect([f"{user2}@{host2}'s password:"]) - childP.sendline(pwd) - childP.logfile = fp - childP.expect(pexpect.EOF) - childP.close() - fp.close() - fp = open(tmpFl,'r') - stdout = fp.read() - fp.close() - - if childP.exitstatus != 0: - raise Exception(stdout) - except KeyboardInterrupt: - childP.close() - fp.close() - return - print(stdout) - - try: - s = pxssh.pxssh() - s.login(host2, user2, pwd) - #s.sendline('sysupgrade /tmp/openwrt-ipq40xx-generic-linksys_ea8300-squashfs-sysupgrade.bin&') - s.sendline('sysupgrade /tmp/openwrt-ipq806x-generic-edgecore_ecw5410-squashfs-nand-sysupgrade.bin&') - #s.logout() - #s.prompt() # match the prompt - print(s.before) # print everything before the prompt. - sleep(100) - #s.login(host2, user2, pwd) - s.prompt() - #os.system(f"scp {local_dir}/cacert.pem root@10.10.10.207:/usr/plume/certs/ca.pem") - #os.system(f"scp {local_dir}/clientcert.pem root@10.10.10.207:/usr/plume/certs/client.pem") - #os.system(f"scp {local_dir}/clientkey_dec.pem root@10.10.10.207:/usr/plume/certs/client_dec.key") - #s.sendline('service opensync restart') - #s.prompt() # match the prompt - #print(s.before) # print everything before the prompt. - s.logout() - return "pass" - except pxssh.ExceptionPxssh as e: - print("ALERT !!!!!! pxssh failed on login.") - print(e) - def apCopyCert(src,user2,host2,tgt,pwd,opts='', timeout=60): - - print("Copying the AP Certs") - ''' - s = pxssh.pxssh() - print(src, users2,pwd) - s.login(host2, user2, pwd) - s.prompt() # match the prompt - print("Copying ca.pem") - os.system(f"scp {src}/cacert.pem root@10.10.10.207:/usr/plume/certs/ca.pem") - print("Copying the client.pem") - os.system(f"scp {src}/clientcert.pem root@10.10.10.207:/usr/plume/certs/client.pem") - print("Copying the client_dec.key") - os.system(f"scp {src}/clientkey_dec.pem root@10.10.10.207:/usr/plume/certs/client_dec.key") - s.sendline('service opensync restart') - s.prompt() # match the prompt - print(s.before) # print everything before the prompt. - s.logout() - ''' - cacert=src+"ca.pem" - clientcert = src+"client.pem" - clientkey=src+"client_dec.key" - tgt ="/usr/plume/certs" - ap_pw - - print("src =", src) - print("AP IP ", host2) - print("AP USER =", ap_user) - print("AP PASSWORD =", ap_pw) - #cmd = f'''/bin/bash -c "scp {opts} {src} {user2}@{AP_IP}:{tgt}"''' - #cmd = f'''/bin/bash -c "scp {opts} {cacert} {user2}@{AP_IP}:{tgt}"''' - #cmd = f'''/bin/bash -c "scp {opts} {clientcert} {user2}@{AP_IP}:{tgt}"''' - cmd = f'''/bin/bash -c "scp {opts} {cacert} {clientcert} {clientkey} {user2}@{host2}:{tgt}"''' - print("Executing the following cmd:", cmd, sep='\n') - tmpFl = '/tmp/cert.log' - fp = open(tmpFl, 'wb') - print(tmpFl) - childP = pexpect.spawn(cmd, timeout=timeout) - try: - childP.sendline(cmd) - childP.expect([f"{user2}@{host2}'s password:"]) - childP.sendline(ap_pw) - childP.logfile = fp - childP.expect(pexpect.EOF) - fp.close() - fp = open(tmpFl,'r') - stdout = fp.read() - fp.close() - - if childP.exitstatus != 0: - #raise Exception(stdout) - print("there is an excess status non 0") - except KeyboardInterrupt: - childP.close() - fp.close() - return - print(stdout) - def restartGw(src,user2,host2,tgt,pwd,opts='', timeout=60): - print("Restarting opensync GW") - s = pxssh.pxssh() - s.login(host2, user2, pwd) - # s.sendline('sysupgrade /tmp/openwrt-ipq40xx-generic-linksys_ea8300-squashfs-sysupgrade.bin&') - s.sendline('service opensync restart') - # s.logout() - # s.prompt() # match the prompt - print(s.before) # print everything before the prompt. - s.prompt() - s.logout() - - -class RunTest: - def TestCase_938(self, rid): - '''SINGLE CLIENT CONNECTIVITY''' - staConnect = StaConnect("10.10.10.201", 8080, _debugOn=False) - staConnect.sta_mode = 0 - staConnect.upstream_resource = 1 - staConnect.upstream_port = "eth2" - staConnect.radio = "wiphy1" - staConnect.resource = 1 - staConnect.dut_ssid = "autoProvisionedSsid-5u" - #staConnect.dut_passwd = "4C0nnectUS!" - staConnect.dut_passwd = "12345678" - staConnect.dut_security = "wpa2" - staConnect.station_names = ["sta01010"] - staConnect.runtime_secs = 30 - staConnect.cleanup_on_exit = True - staConnect.run() - run_results = staConnect.get_result_list() - for result in run_results: - print("test result: " + result) - #result = 'pass' - print("Single Client Connectivity :",staConnect.passes) - if staConnect.passes() == True: - client.update_testrail(case_id=938, run_id=rid, status_id=1, msg='client Connectivity to 5GHZ Open SSID is Passed ') - else: - client.update_testrail(case_id=938, run_id=rid, status_id=5, msg='client connectivity to 5GHZ OPEN SSID is Failed') - - def TestCase_941(self, rid): - #MULTI CLIENT CONNECTIVITY - staConnect = StaConnect("10.10.10.201", 8080, _debugOn=False) - staConnect.sta_mode = 0 - staConnect.upstream_resource = 1 - staConnect.upstream_port = "eth2" - staConnect.radio = "wiphy1" - staConnect.resource = 1 - staConnect.dut_ssid = "autoProvisionedSsid-5u" - # staConnect.dut_passwd = "4C0nnectUS!" - staConnect.dut_passwd = "12345678" - staConnect.dut_security = "wpa2" - staConnect.station_names = ["sta0020", 'sta0021', 'sta0022', 'sta0023'] - staConnect.runtime_secs = 20 - staConnect.cleanup_on_exit = True - staConnect.run() - run_results = staConnect.get_result_list() - for result in run_results: - print("test result: " + result) - if staConnect.passes() == True: - client.update_testrail(case_id=941, run_id=rid, status_id=1, - msg='client Connectivity to 5GHZ Open SSID is Passed ') - else: - client.update_testrail(case_id=941, run_id=rid, status_id=5, - msg='client connectivity to 5GHZ OPEN SSID is Failed') - - # Check for externally run test case results. - def TestCase_LF_External(self, rid): - #https://stackoverflow.com/questions/3207219/how-do-i-list-all-files-of-a-directory - results = glob.glob("%s/*_CICD_RESULTS.txt"%external_results_dir) - for r in results: - rfile = open(r, 'r') - lines = rfile.readlines() - - # File contents looks something like: - #CASE_ID 9999 - #RUN_ID 15 - #STATUS 1 - #MSG Test passed nicely - #MSG Build ID: deadbeef - #MSG Results: http://cicd.telecominfraproject.com - - _case_id = -1 - _status_id = 1 # Default to pass - _msg = "" - _rid = rid - - for line in Lines: - m = re.search(r'(\S+) (.*)', line) - k = m.group(0); - v = m.group(1); - - if k == "CASE_ID": - _case_id = v - if k == "RUN_ID": - _rid = v - if k == "STATUS": - _status_id = v - if k == "MSG": - if _msg == "": - _msg == v - else: - _msg += "\n" - _msg += v - if _case_id != -1: - client.update_testrail(case_id=_case_id, run_id=_rid, status_id=_status_id, msg=_msg) - os.unlink(r) - - def TestCase_939(self, rid): - ''' Client Count in MQTT Log''' - try: - print("Counting clients in MQTT") - s = pxssh.pxssh() - #aws_host = os.getenv(AWS_HOST) - #aws_user=os.getenv(AWS_USER) - os.chdir(local_dir) - # makesure the client key file is in the fame directory to login to AWS VM - s.login(aws_host,aws_user,ssh_key='id_key.pem') - s.sendline('kubectl cp tip-wlan-opensync-gw-static-f795d45-ctb5z:/app/logs/mqttData.log mqttData.log') - # run a command - s.prompt() # match the prompt - print(s.before) # print everything before the prompt. - s.sendline() - s.logout() - #return "pass" - print(aws_host, aws_user) - ssh = paramiko.SSHClient() - ssh.load_system_host_keys() - ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) - k = paramiko.RSAKey.from_private_key_file('id_key.pem') - ssh.connect(aws_host, username=aws_user, pkey=k) - print("Connected") - scp = SCPClient(ssh.get_transport()) - scp.get("mqttData.log") - scp.close() - # Get the client Count - ClientCount = subprocess.getoutput( - 'grep \'{\"nodeID\"\' mqttData.log | grep clientList | tail -1 |cut -d \'=\' -f 3 | json_pp | grep macAddres | grep \'04:F0:21:55\' | tr -d , | sort | uniq | wc -l ') - print("client count =", ClientCount) - if (int(ClientCount) >= 1): - client.update_testrail(case_id=939, run_id=rid, status_id=1, - msg=ClientCount + ' Client/Clients Connected ') - else: - client.update_testrail(case_id=939, run_id=rid, status_id=5, - msg='No Client Connected') - except pxssh.ExceptionPxssh as e: - print("ALERT !!!!!! pxssh failed on login.") - print(e) - - -params = { - 'src': local_dir, - 'user2': ap_user, - 'host2': '10.10.10.207', - 'tgt': '/tmp/', - 'pwd': ap_pw, - 'opts': '' -} -apModel= "ecw5410" - - -url = 'https://tip.jfrog.io/artifactory/tip-wlan-ap-firmware/' -url = url + apModel -projId = client.get_project_id(project_name= 'WLAN') -print("TIP WLAN Project ID Is :", projId) - -rid = client.get_run_id(test_run_name= 'TIP-DEMO4') -print(rid) -Test: RunTest = RunTest() -Build: GetBuild = GetBuild() -''' -binary_fetch_result = Build.get_latest_image(url) -print("UPDATING TEST RAIL WITH TEST RESULT FOR CASE_ID 940: Download latest openwrt image from Jfrog") - -if binary_fetch_result == 'pass': - client.update_testrail(case_id=940, run_id=rid, status_id=1, msg='latest firmware downloaded') -else: - client.update_testrail(case_id=940, run_id=rid, status_id=5, msg='Firmware Download failed') - -sleep(10) -print("Upgrading AP with latest image downloaded") -ap_upgrade_result = openwrt_ap.ap_upgrade(**params) -sleep(10) -print("UPDATING TEST RAIL WITH TEST RESULT FOR CASE_ID 937") -sleep(10) -if ap_upgrade_result == 'pass': - client.update_testrail(case_id=937, run_id=rid, status_id=1, msg='AP upgraded with latest Firmware') -else: - client.update_testrail(case_id=937, run_id=rid, status_id=5, msg='Firmware upgrade failed in AP ') -print("Upgrading AWS Opensync gateway with latest docker image from Jfrog") -OpensyncGw_UpgResult = Build.run_opensyncgw_in_aws() -if OpensyncGw_UpgResult == 'pass': - client.update_testrail(case_id=936, run_id=rid, status_id=1, msg='Opensync GW upgraded with latest Firmware') -else: - client.update_testrail(case_id=936, run_id=rid, status_id=5, msg='Firmware upgrade failed in Opensync Gateway') -sleep(10) -''' -pprint.pprint(params) -ap_cert_result = openwrt_ap.apCopyCert(**params) -print("Executing TestCase 938: single Client Connectivity test") -openwrt_ap.restartGw(**params) -Test.TestCase_938(rid) - -print("Executing TestCase 941: Multi Client Connectivity test") -Test.TestCase_941(rid) -sleep(100) -print("Executing TestCase 939:Counting The number of Clients Connected from MQTT") -Test.TestCase_939(rid) - - - - +# +# import base64 +# import urllib.request +# from bs4 import BeautifulSoup +# import ssl +# import subprocess, os +# from artifactory import ArtifactoryPath +# import tarfile +# import paramiko +# from paramiko import SSHClient +# from scp import SCPClient +# import os +# import pexpect +# from pexpect import pxssh +# import sys +# import paramiko +# from scp import SCPClient +# import pprint +# from pprint import pprint +# from os import listdir +# import re +# +# # For finding files +# # https://stackoverflow.com/questions/3207219/how-do-i-list-all-files-of-a-directory +# import glob +# external_results_dir=/var/tmp/lanforge +# +# local_dir=os.getenv('LOG_DIR') +# print("Local Directory where all files will be copied and logged", local_dir) +# cicd_user=os.getenv('CICD_USER') +# print("cicd_user = ", cicd_user) +# cicd_pw=os.getenv('CICD_PW') +# print("cicd pw =",cicd_pw) +# ap_pw=os.getenv('AP_PW') +# ap_user=os.getenv('AP_USER') +# tr_user=os.getenv('TR_USER') +# print("Testrail user id = ", tr_user) +# tr_pw=os.getenv('TR_PW') +# print ("Testrail password =", tr_pw) +# aws_host='3.96.56.0' +# aws_user='ubuntu' +# +# +# +# +# if sys.version_info[0] != 3: +# print("This script requires Python 3") +# exit(1) +# if 'py-json' not in sys.path: +# sys.path.append('../py-json') +# +# from LANforge.LFUtils import * +# # if you lack __init__.py in this directory you will not find sta_connect module# +# import sta_connect +# import testrail_api +# from sta_connect import StaConnect +# from testrail_api import APIClient +# +# client: APIClient = APIClient('https://telecominfraproject.testrail.com') +# client.user = tr_user +# client.password = tr_pw +# +# +# print('Beginning file download with requests') +# +# class GetBuild: +# def __init__(self): +# self.user = cicd_user +# self.password = cicd_pw +# ssl._create_default_https_context = ssl._create_unverified_context +# +# def get_latest_image(self,url): +# +# auth = str( +# base64.b64encode( +# bytes('%s:%s' % (cicd_user,cicd_pw ), 'utf-8') +# ), +# 'ascii' +# ).strip() +# headers = {'Authorization': 'Basic ' + auth} +# +# ''' FIND THE LATEST FILE NAME''' +# print(url) +# req = urllib.request.Request(url, headers=headers) +# response = urllib.request.urlopen(req) +# html = response.read() +# soup = BeautifulSoup(html, features="html.parser") +# last_link = soup.find_all('a', href=True)[-1] +# latest_file=last_link['href'] +# +# filepath = local_dir +# os.chdir(filepath) +# #file_url = url + latest_file +# +# ''' Download the binary file from Jfrog''' +# path = ArtifactoryPath(url,auth=(cicd_user, cicd_pw)) +# path.touch() +# for file in path: +# print('File =', file) +# +# path = ArtifactoryPath(file, auth=(cicd_user, cicd_pw)) +# print("file to be downloaded :" ,latest_file) +# print("File Path:",file) +# with path.open() as des: +# with open(latest_file, "wb") as out: +# out.write(des.read()) +# des.close() +# print("Extract the tar.gz file and upgrade the AP ") +# housing_tgz = tarfile.open(latest_file) +# housing_tgz.extractall() +# housing_tgz.close() +# return "pass" +# print("Extract the tar file, and copying the file to Linksys AP directory") +# #with open("/Users/syamadevi/Desktop/syama/ea8300/ap_sysupgrade_output.log", "a") as output: +# # subprocess.call("scp /Users/syamadevi/Desktop/syama/ea8300/openwrt-ipq40xx-generic-linksys_ea8300-squashfs-sysupgrade.bin root@192.100.1.1:/tmp/openwrt-ipq40xx-generic-linksys_ea8300-squashfs-sysupgrade.bin",shell=True, stdout=output, +# # stderr=output) +# +# print('SSH to Linksys and upgrade the file') +# +# ''' +# +# ssh = SSHClient() +# ssh.load_system_host_keys() +# ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) +# ssh.connect(hostname='192.100.1.1', +# port='22', +# username='root', +# password='Dadun123$', +# look_for_keys=False, +# pkey='load_key_if_relevant') +# +# # SCPCLient takes a paramiko transport as its only argument +# scp = SCPClient(ssh.get_transport()) +# +# scp.put('test.txt', 'testD.txt') +# scp.close() +# +# +# +# # client = paramiko.SSHClient() +# #client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) +# #client.connect('192.100.1.1', username='syama', password='Dadun123$') +# +# stdin, stdout, stderr = ssh.exec_command('sysupgrade /tmp/openwrt-ipq40xx-generic-linksys_ea8300-squashfs-sysupgrade.bin') +# +# for line in stdout: +# print (line.strip('\n')) +# client.close() +# ''' +# +# def run_opensyncgw_in_docker(self): +# #user_password = 'fepv6nj9guCPeEHC' +# #my_env = os.environ.copy() +# #my_env["userpass"] = user_password +# #my_command = 'python --version' +# #subprocess.Popen('echo', env=my_env) +# with open(local_dir +"docker_jfrog_login.log", "a") as output: +# subprocess.call("docker login --username" + cicd_user + "--password" + cicd_pw + " https://tip-tip-wlan-cloud-docker-repo.jfrog.io", shell=True, stdout=output, +# stderr=output) +# with open(local_dir +"opensyncgw_upgrade.log", "a") as output: +# subprocess.call("docker pull tip-tip-wlan-cloud-docker-repo.jfrog.io/opensync-gateway-and-mqtt:0.0.1-SNAPSHOT", shell=True, stdout=output, +# stderr=output) +# with open(local_dir+"opensyncgw.log", "a") as output: +# subprocess.call("docker run --rm -i -p 1883:1883 -p 6640:6640 -p 6643:6643 -p 4043:4043 \ +# -v ~/mosquitto/data:/mosquitto/data \ +# -v ~/mosquitto/log:/mosquitto/log \ +# -v ~/wlan-pki-cert-scripts:/opt/tip-wlan/certs \ +# -v ~/app/log:/app/logs \ +# -v ~//app/config:/app/config \ +# -e OVSDB_CONFIG_FILE='/app/config/config_2_ssids.json' \ +# tip-tip-wlan-cloud-docker-repo.jfrog.io/opensync-gateway-and-mqtt:0.0.1-SNAPSHOT",shell=True, stdout=output, +# stderr=output) +# print("opensync Gateway is running") +# return "pass" +# +# def run_opensyncgw_in_aws(self): +# try: +# s = pxssh.pxssh() +# +# os.chdir(local_dir) +# print("AWS OPENSYNC GW UPGRADE VIA HELM") +# print( +# 'Helm upgrades the latest image in the GW if a new image is found from jfrog and the AWS gateway is not upto date ') +# # makesure the client key file is in the fame directory to login to AWS VM +# s.login(aws_host, aws_user, ssh_key='id_key.pem') +# s.sendline('kubectl get pods') +# +# # run a command +# s.prompt() # match the prompt +# print(s.before) # print everything before the prompt. +# s.sendline( +# 'helm upgrade tip-wlan wlan-cloud-helm/tip-wlan/ -n default -f wlan-cloud-helm/tip-wlan/resources/environments/dev-amazon.yaml') +# s.prompt() # match the prompt +# print(s.before) # print everything before the prompt. +# s.sendline('kubectl get pods') +# +# # run a command +# s.prompt() # match the prompt +# print(s.before) # print everything before the prompt. +# s.logout() +# return "pass" +# +# except pxssh.ExceptionPxssh as e: +# print("ALERT !!!!!! pxssh failed on login.") +# print(e) +# +# +# class openwrt_ap: +# +# def ap_upgrade(src,user2,host2,tgt,pwd,opts='', timeout=60): +# ''' Performs the scp command. Transfers file(s) from local host to remote host ''' +# print("AP Model getting upgarded is :", apModel) +# if apModel == "ecw5410": +# ap_firmware = 'openwrt-ipq806x-generic-edgecore_ecw5410-squashfs-nand-sysupgrade.bin' +# AP_IP = '10.10.10.207' +# else: +# if apModel == "ea8300": +# ap_firmware = 'openwrt-ipq40xx-generic-linksys_ea8300-squashfs-sysupgrade.bin' +# AP_IP = '10.10.10.208' +# host2 = AP_IP +# src = src+ ap_firmware +# print("src =", src) +# print("AP IP ", AP_IP) +# print("AP USER =", ap_user) +# print("AP PASSWORD =", ap_pw) +# cmd = f'''/bin/bash -c "scp {opts} {src} {user2}@{AP_IP}:{tgt}"''' +# print("Executing the following cmd:",cmd,sep='\n') +# +# tmpFl = '/tmp/scp.log' +# fp = open(tmpFl,'wb') +# print(tmpFl) +# childP = pexpect.spawn(cmd,timeout=timeout) +# try: +# childP.sendline(cmd) +# childP.expect([f"{user2}@{host2}'s password:"]) +# childP.sendline(pwd) +# childP.logfile = fp +# childP.expect(pexpect.EOF) +# childP.close() +# fp.close() +# fp = open(tmpFl,'r') +# stdout = fp.read() +# fp.close() +# +# if childP.exitstatus != 0: +# raise Exception(stdout) +# except KeyboardInterrupt: +# childP.close() +# fp.close() +# return +# print(stdout) +# +# try: +# s = pxssh.pxssh() +# s.login(host2, user2, pwd) +# #s.sendline('sysupgrade /tmp/openwrt-ipq40xx-generic-linksys_ea8300-squashfs-sysupgrade.bin&') +# s.sendline('sysupgrade /tmp/openwrt-ipq806x-generic-edgecore_ecw5410-squashfs-nand-sysupgrade.bin&') +# #s.logout() +# #s.prompt() # match the prompt +# print(s.before) # print everything before the prompt. +# sleep(100) +# #s.login(host2, user2, pwd) +# s.prompt() +# #os.system(f"scp {local_dir}/cacert.pem root@10.10.10.207:/usr/plume/certs/ca.pem") +# #os.system(f"scp {local_dir}/clientcert.pem root@10.10.10.207:/usr/plume/certs/client.pem") +# #os.system(f"scp {local_dir}/clientkey_dec.pem root@10.10.10.207:/usr/plume/certs/client_dec.key") +# #s.sendline('service opensync restart') +# #s.prompt() # match the prompt +# #print(s.before) # print everything before the prompt. +# s.logout() +# return "pass" +# except pxssh.ExceptionPxssh as e: +# print("ALERT !!!!!! pxssh failed on login.") +# print(e) +# def apCopyCert(src,user2,host2,tgt,pwd,opts='', timeout=60): +# +# print("Copying the AP Certs") +# ''' +# s = pxssh.pxssh() +# print(src, users2,pwd) +# s.login(host2, user2, pwd) +# s.prompt() # match the prompt +# print("Copying ca.pem") +# os.system(f"scp {src}/cacert.pem root@10.10.10.207:/usr/plume/certs/ca.pem") +# print("Copying the client.pem") +# os.system(f"scp {src}/clientcert.pem root@10.10.10.207:/usr/plume/certs/client.pem") +# print("Copying the client_dec.key") +# os.system(f"scp {src}/clientkey_dec.pem root@10.10.10.207:/usr/plume/certs/client_dec.key") +# s.sendline('service opensync restart') +# s.prompt() # match the prompt +# print(s.before) # print everything before the prompt. +# s.logout() +# ''' +# cacert=src+"ca.pem" +# clientcert = src+"client.pem" +# clientkey=src+"client_dec.key" +# tgt ="/usr/plume/certs" +# ap_pw +# +# print("src =", src) +# print("AP IP ", host2) +# print("AP USER =", ap_user) +# print("AP PASSWORD =", ap_pw) +# #cmd = f'''/bin/bash -c "scp {opts} {src} {user2}@{AP_IP}:{tgt}"''' +# #cmd = f'''/bin/bash -c "scp {opts} {cacert} {user2}@{AP_IP}:{tgt}"''' +# #cmd = f'''/bin/bash -c "scp {opts} {clientcert} {user2}@{AP_IP}:{tgt}"''' +# cmd = f'''/bin/bash -c "scp {opts} {cacert} {clientcert} {clientkey} {user2}@{host2}:{tgt}"''' +# print("Executing the following cmd:", cmd, sep='\n') +# tmpFl = '/tmp/cert.log' +# fp = open(tmpFl, 'wb') +# print(tmpFl) +# childP = pexpect.spawn(cmd, timeout=timeout) +# try: +# childP.sendline(cmd) +# childP.expect([f"{user2}@{host2}'s password:"]) +# childP.sendline(ap_pw) +# childP.logfile = fp +# childP.expect(pexpect.EOF) +# fp.close() +# fp = open(tmpFl,'r') +# stdout = fp.read() +# fp.close() +# +# if childP.exitstatus != 0: +# #raise Exception(stdout) +# print("there is an excess status non 0") +# except KeyboardInterrupt: +# childP.close() +# fp.close() +# return +# print(stdout) +# def restartGw(src,user2,host2,tgt,pwd,opts='', timeout=60): +# print("Restarting opensync GW") +# s = pxssh.pxssh() +# s.login(host2, user2, pwd) +# # s.sendline('sysupgrade /tmp/openwrt-ipq40xx-generic-linksys_ea8300-squashfs-sysupgrade.bin&') +# s.sendline('service opensync restart') +# # s.logout() +# # s.prompt() # match the prompt +# print(s.before) # print everything before the prompt. +# s.prompt() +# s.logout() +# +# +# class RunTest: +# def TestCase_938(self, rid): +# '''SINGLE CLIENT CONNECTIVITY''' +# staConnect = StaConnect("10.10.10.201", 8080, _debugOn=False) +# staConnect.sta_mode = 0 +# staConnect.upstream_resource = 1 +# staConnect.upstream_port = "eth2" +# staConnect.radio = "wiphy1" +# staConnect.resource = 1 +# staConnect.dut_ssid = "autoProvisionedSsid-5u" +# #staConnect.dut_passwd = "4C0nnectUS!" +# staConnect.dut_passwd = "12345678" +# staConnect.dut_security = "wpa2" +# staConnect.station_names = ["sta01010"] +# staConnect.runtime_secs = 30 +# staConnect.cleanup_on_exit = True +# staConnect.run() +# run_results = staConnect.get_result_list() +# for result in run_results: +# print("test result: " + result) +# #result = 'pass' +# print("Single Client Connectivity :",staConnect.passes) +# if staConnect.passes() == True: +# client.update_testrail(case_id=938, run_id=rid, status_id=1, msg='client Connectivity to 5GHZ Open SSID is Passed ') +# else: +# client.update_testrail(case_id=938, run_id=rid, status_id=5, msg='client connectivity to 5GHZ OPEN SSID is Failed') +# +# def TestCase_941(self, rid): +# #MULTI CLIENT CONNECTIVITY +# staConnect = StaConnect("10.10.10.201", 8080, _debugOn=False) +# staConnect.sta_mode = 0 +# staConnect.upstream_resource = 1 +# staConnect.upstream_port = "eth2" +# staConnect.radio = "wiphy1" +# staConnect.resource = 1 +# staConnect.dut_ssid = "autoProvisionedSsid-5u" +# # staConnect.dut_passwd = "4C0nnectUS!" +# staConnect.dut_passwd = "12345678" +# staConnect.dut_security = "wpa2" +# staConnect.station_names = ["sta0020", 'sta0021', 'sta0022', 'sta0023'] +# staConnect.runtime_secs = 20 +# staConnect.cleanup_on_exit = True +# staConnect.run() +# run_results = staConnect.get_result_list() +# for result in run_results: +# print("test result: " + result) +# if staConnect.passes() == True: +# client.update_testrail(case_id=941, run_id=rid, status_id=1, +# msg='client Connectivity to 5GHZ Open SSID is Passed ') +# else: +# client.update_testrail(case_id=941, run_id=rid, status_id=5, +# msg='client connectivity to 5GHZ OPEN SSID is Failed') +# +# # Check for externally run test case results. +# def TestCase_LF_External(self, rid): +# #https://stackoverflow.com/questions/3207219/how-do-i-list-all-files-of-a-directory +# results = glob.glob("%s/*_CICD_RESULTS.txt"%external_results_dir) +# for r in results: +# rfile = open(r, 'r') +# lines = rfile.readlines() +# +# # File contents looks something like: +# #CASE_ID 9999 +# #RUN_ID 15 +# #STATUS 1 +# #MSG Test passed nicely +# #MSG Build ID: deadbeef +# #MSG Results: http://cicd.telecominfraproject.com +# +# _case_id = -1 +# _status_id = 1 # Default to pass +# _msg = "" +# _rid = rid +# +# for line in Lines: +# m = re.search(r'(\S+) (.*)', line) +# k = m.group(0); +# v = m.group(1); +# +# if k == "CASE_ID": +# _case_id = v +# if k == "RUN_ID": +# _rid = v +# if k == "STATUS": +# _status_id = v +# if k == "MSG": +# if _msg == "": +# _msg == v +# else: +# _msg += "\n" +# _msg += v +# if _case_id != -1: +# client.update_testrail(case_id=_case_id, run_id=_rid, status_id=_status_id, msg=_msg) +# os.unlink(r) +# +# def TestCase_939(self, rid): +# ''' Client Count in MQTT Log''' +# try: +# print("Counting clients in MQTT") +# s = pxssh.pxssh() +# #aws_host = os.getenv(AWS_HOST) +# #aws_user=os.getenv(AWS_USER) +# os.chdir(local_dir) +# # makesure the client key file is in the fame directory to login to AWS VM +# s.login(aws_host,aws_user,ssh_key='id_key.pem') +# s.sendline('kubectl cp tip-wlan-opensync-gw-static-f795d45-ctb5z:/app/logs/mqttData.log mqttData.log') +# # run a command +# s.prompt() # match the prompt +# print(s.before) # print everything before the prompt. +# s.sendline() +# s.logout() +# #return "pass" +# print(aws_host, aws_user) +# ssh = paramiko.SSHClient() +# ssh.load_system_host_keys() +# ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) +# k = paramiko.RSAKey.from_private_key_file('id_key.pem') +# ssh.connect(aws_host, username=aws_user, pkey=k) +# print("Connected") +# scp = SCPClient(ssh.get_transport()) +# scp.get("mqttData.log") +# scp.close() +# # Get the client Count +# ClientCount = subprocess.getoutput( +# 'grep \'{\"nodeID\"\' mqttData.log | grep clientList | tail -1 |cut -d \'=\' -f 3 | json_pp | grep macAddres | grep \'04:F0:21:55\' | tr -d , | sort | uniq | wc -l ') +# print("client count =", ClientCount) +# if (int(ClientCount) >= 1): +# client.update_testrail(case_id=939, run_id=rid, status_id=1, +# msg=ClientCount + ' Client/Clients Connected ') +# else: +# client.update_testrail(case_id=939, run_id=rid, status_id=5, +# msg='No Client Connected') +# except pxssh.ExceptionPxssh as e: +# print("ALERT !!!!!! pxssh failed on login.") +# print(e) +# +# +# params = { +# 'src': local_dir, +# 'user2': ap_user, +# 'host2': '10.10.10.207', +# 'tgt': '/tmp/', +# 'pwd': ap_pw, +# 'opts': '' +# } +# apModel= "ecw5410" +# +# +# url = 'https://tip.jfrog.io/artifactory/tip-wlan-ap-firmware/' +# url = url + apModel +# projId = client.get_project_id(project_name= 'WLAN') +# print("TIP WLAN Project ID Is :", projId) +# +# rid = client.get_run_id(test_run_name= 'TIP-DEMO4') +# print(rid) +# Test: RunTest = RunTest() +# Build: GetBuild = GetBuild() +# ''' +# binary_fetch_result = Build.get_latest_image(url) +# print("UPDATING TEST RAIL WITH TEST RESULT FOR CASE_ID 940: Download latest openwrt image from Jfrog") +# +# if binary_fetch_result == 'pass': +# client.update_testrail(case_id=940, run_id=rid, status_id=1, msg='latest firmware downloaded') +# else: +# client.update_testrail(case_id=940, run_id=rid, status_id=5, msg='Firmware Download failed') +# +# sleep(10) +# print("Upgrading AP with latest image downloaded") +# ap_upgrade_result = openwrt_ap.ap_upgrade(**params) +# sleep(10) +# print("UPDATING TEST RAIL WITH TEST RESULT FOR CASE_ID 937") +# sleep(10) +# if ap_upgrade_result == 'pass': +# client.update_testrail(case_id=937, run_id=rid, status_id=1, msg='AP upgraded with latest Firmware') +# else: +# client.update_testrail(case_id=937, run_id=rid, status_id=5, msg='Firmware upgrade failed in AP ') +# print("Upgrading AWS Opensync gateway with latest docker image from Jfrog") +# OpensyncGw_UpgResult = Build.run_opensyncgw_in_aws() +# if OpensyncGw_UpgResult == 'pass': +# client.update_testrail(case_id=936, run_id=rid, status_id=1, msg='Opensync GW upgraded with latest Firmware') +# else: +# client.update_testrail(case_id=936, run_id=rid, status_id=5, msg='Firmware upgrade failed in Opensync Gateway') +# sleep(10) +# ''' +# pprint.pprint(params) +# ap_cert_result = openwrt_ap.apCopyCert(**params) +# print("Executing TestCase 938: single Client Connectivity test") +# openwrt_ap.restartGw(**params) +# Test.TestCase_938(rid) +# +# print("Executing TestCase 941: Multi Client Connectivity test") +# Test.TestCase_941(rid) +# sleep(100) +# print("Executing TestCase 939:Counting The number of Clients Connected from MQTT") +# Test.TestCase_939(rid) +# +# +# +# diff --git a/py-scripts/create_l3.py b/py-scripts/create_l3.py index 98602f34..3b51dbe8 100755 --- a/py-scripts/create_l3.py +++ b/py-scripts/create_l3.py @@ -3,6 +3,8 @@ """ This script will create a variable number of layer3 stations each with their own set of cross-connects and endpoints. + If you want to + Use './create_l3.py --help' to see command line usage and options """ @@ -164,7 +166,7 @@ python3 ./test_ipv4_variable_time.py if (args.num_stations is not None) and (int(args.num_stations) > 0): num_sta = int(args.num_stations) - station_list = LFUtils.portNameSeries(prefix_="sta", start_id_=0, end_id_=num_sta - 1, padding_number_=10000, + station_list = LFUtils.portNameSeries(prefix_="sta", start_id_=int(args.number_template), end_id_=num_sta+int(args.number_template) - 1, padding_number_=10000, radio=args.radio) ip_var_test = CreateL3(host=args.mgr, port=args.mgr_port, diff --git a/py-scripts/create_station.py b/py-scripts/create_station.py index df685f58..f35aa101 100755 --- a/py-scripts/create_station.py +++ b/py-scripts/create_station.py @@ -27,12 +27,14 @@ class CreateStation(Realm): _password=None, _host=None, _port=None, + _mode=0, _sta_list=None, _number_template="00000", _radio="wiphy0", _proxy_str=None, _debug_on=False, _up=True, + _set_txo_data=None, _exit_on_error=False, _exit_on_fail=False): super().__init__(_host, @@ -42,25 +44,26 @@ class CreateStation(Realm): self.ssid = _ssid self.security = _security self.password = _password + self.mode = _mode self.sta_list = _sta_list self.radio = _radio self.timeout = 120 self.number_template = _number_template self.debug = _debug_on self.up = _up + self.set_txo_data = _set_txo_data self.station_profile = self.new_station_profile() self.station_profile.lfclient_url = self.lfclient_url self.station_profile.ssid = self.ssid self.station_profile.ssid_pass = self.password, self.station_profile.security = self.security self.station_profile.number_template_ = self.number_template - self.station_profile.mode = 0 + self.station_profile.mode = self.mode if self.debug: print("----- Station List ----- ----- ----- ----- ----- ----- \n") pprint.pprint(self.sta_list) print("---- ~Station List ----- ----- ----- ----- ----- ----- \n") - def build(self): # Build stations self.station_profile.use_security(self.security, self.ssid, self.password) @@ -70,6 +73,15 @@ class CreateStation(Realm): self.station_profile.set_command_flag("add_sta", "create_admin_down", 1) self.station_profile.set_command_param("set_port", "report_timer", 1500) self.station_profile.set_command_flag("set_port", "rpt_timer", 1) + if self.set_txo_data is not None: + self.station_profile.set_wifi_txo(txo_ena=self.set_txo_data["txo_enable"], + tx_power=self.set_txo_data["txpower"], + pream=self.set_txo_data["pream"], + mcs=self.set_txo_data["mcs"], + nss=self.set_txo_data["nss"], + bw=self.set_txo_data["bw"], + retries=self.set_txo_data["retries"], + sgi=self.set_txo_data["sgi"], ) self.station_profile.create(radio=self.radio, sta_names_=self.sta_list, debug=self.debug) if self.up: self.station_profile.admin_up() @@ -78,7 +90,7 @@ class CreateStation(Realm): def main(): - parser = LFCliBase.create_basic_argparse( + parser = LFCliBase.create_basic_argparse( # see create_basic_argparse in ../py-json/LANforge/lfcli_base.py prog='create_station.py', formatter_class=argparse.RawTextHelpFormatter, epilog='''\ @@ -91,6 +103,7 @@ def main(): Command example: ./create_station.py --radio wiphy0 + --start_id 2 --num_stations 3 --security open --ssid netgear @@ -98,14 +111,21 @@ Command example: --debug ''') required = parser.add_argument_group('required arguments') - #required.add_argument('--security', help='WiFi Security protocol: < open | wep | wpa | wpa2 | wpa3 >', required=True) + required.add_argument('--start_id', help='--start_id default 0', default=0) + + optional = parser.add_argument_group('Optional arguments') + optional.add_argument('--mode', help='Mode for your station (as a number)',default=0) args = parser.parse_args() - #if args.debug: + # if args.debug: # pprint.pprint(args) # time.sleep(5) if (args.radio is None): - raise ValueError("--radio required") + raise ValueError("--radio required") + + start_id = 0 + if (args.start_id != 0): + start_id = int(args.start_id) num_sta = 2 if (args.num_stations is not None) and (int(args.num_stations) > 0): @@ -113,20 +133,34 @@ Command example: num_sta = num_stations_converted station_list = LFUtils.port_name_series(prefix="sta", - start_id=0, - end_id=num_sta-1, - padding_number=10000, - radio=args.radio) + start_id=start_id, + end_id=start_id + num_sta - 1, + padding_number=10000, + radio=args.radio) + + print("station_list {}".format(station_list)) + set_txo_data={ + "txo_enable": 1, + "txpower": 255, + "pream": 0, + "mcs": 0, + "nss": 0, + "bw": 3, + "retries": 1, + "sgi": 0 + } create_station = CreateStation(_host=args.mgr, - _port=args.mgr_port, - _ssid=args.ssid, - _password=args.passwd, - _security=args.security, - _sta_list=station_list, - _radio=args.radio, - _proxy_str=args.proxy, - _debug_on=args.debug) + _port=args.mgr_port, + _ssid=args.ssid, + _password=args.passwd, + _security=args.security, + _sta_list=station_list, + _mode=args.mode, + _radio=args.radio, + _set_txo_data=None, + _proxy_str=args.proxy, + _debug_on=args.debug) create_station.build() print('Created %s stations' % num_sta) diff --git a/py-scripts/csv_to_influx.py b/py-scripts/csv_to_influx.py index f710356f..8c126b0d 100755 --- a/py-scripts/csv_to_influx.py +++ b/py-scripts/csv_to_influx.py @@ -42,7 +42,7 @@ class CSVtoInflux(): target_csv=None, sep='\t'): self.influxdb = influxdb - self.target_csv = target_csv.replace('/home/lanforge/html-reports/', '') + self.target_csv = target_csv self.influx_tag = _influx_tag self.sep = sep @@ -69,7 +69,10 @@ class CSVtoInflux(): tags = dict() print("row: %s" % row) short_description = row[columns['short-description']] - numeric_score = float(row[columns['numeric-score']]) + if row[columns['numeric-score']] == 'NaN': + numeric_score = '0x0' + else: + numeric_score = float(row[columns['numeric-score']]) date = row[columns['Date']] date = datetime.datetime.utcfromtimestamp(int(date) / 1000).isoformat() #convert to datetime so influx can read it, this is required for variable in csv_variables: @@ -146,9 +149,7 @@ python3 csv_to_influx.py --influx_host localhost --influx_org Candela --influx_t args = parser.parse_args() - influxdb = RecordInflux(_lfjson_host=lfjson_host, - _lfjson_port=lfjson_port, - _influx_host=args.influx_host, + influxdb = RecordInflux(_influx_host=args.influx_host, _influx_port=args.influx_port, _influx_org=args.influx_org, _influx_token=args.influx_token, diff --git a/py-scripts/cv_to_grafana.py b/py-scripts/cv_to_grafana.py index df3d2c02..3e1574c5 100755 --- a/py-scripts/cv_to_grafana.py +++ b/py-scripts/cv_to_grafana.py @@ -16,7 +16,6 @@ Influx from this script. --line "Resource=1.1 Profile=default Amount=4 Uses-1=wiphy1 DUT=DUT_TO_GRAFANA_DUT Traffic=wiphy1 Freq=-1" --line "Resource=1.1 Profile=upstream Amount=1 Uses-1=eth1 DUT=DUT_TO_GRAFANA_DUT Traffic=eth1 Freq=-1" --dut DUT_TO_GRAFANA ---test_rig Stidmatt-01 --create_scenario DUT_TO_GRAFANA_SCENARIO --station 1.1.sta00002 --duration 15s @@ -103,7 +102,6 @@ def main(): --line --line --dut - --test_rig --create_scenario --station --influx_tag diff --git a/py-scripts/ghost_profile.py b/py-scripts/ghost_profile.py new file mode 100755 index 00000000..297b4fd6 --- /dev/null +++ b/py-scripts/ghost_profile.py @@ -0,0 +1,220 @@ +#!/usr/bin/env python3 + +""" +NAME: ghost_profile.py +PURPOSE: modify ghost database from the command line. +SETUP: A Ghost installation which the user has admin access to. +EXAMPLE: ./ghost_profile.py --article_text_file text.txt --title Test --authors Matthew --ghost_token SECRET_KEY --host 192.168.1.1 + +There is a specific class for uploading kpi graphs called kpi_to_ghost. + +EXAMPLE: ./ghost_profile.py --ghost_token TOKEN --ghost_host 192.168.100.147 +--folders /home/lanforge/html-reports/wifi-capacity-2021-06-04-02-51-07 +--kpi_to_ghost appl --authors Matthew --title 'wifi capacity 2021 06 04 02 51 07' --server 192.168.93.51 +--user_pull lanforge --password_pull lanforge --customer candela --testbed heather --test_run test-run-6 +--user_push matt --password_push PASSWORD + +EXAMPLE 2: ./ghost_profile.py --ghost_token TOKEN +--ghost_host 192.168.100.147 --server 192.168.93.51 --customer candela +--testbed heather --user_push matt --password_push "amount%coverage;Online" --kpi_to_ghost app +--folders /home/lanforge/html-reports/wifi-capacity-2021-06-14-10-42-29 --grafana_token TOKEN +--grafana_host 192.168.100.201 + +this script uses pyjwt. If you get the issue module 'jwt' has no attribute 'encode', run this: pip3 uninstall jwt pyjwt && pip install pyjwt + Matthew Stidham + Copyright 2021 Candela Technologies Inc + License: Free to distribute and modify. LANforge systems must be licensed. +""" +import sys +import os +import argparse + +if sys.version_info[0] != 3: + print("This script requires Python 3") + exit(1) + +if 'py-json' not in sys.path: + sys.path.append(os.path.join(os.path.abspath('..'), 'py-json')) + sys.path.append(os.path.join(os.path.abspath('..'), 'py-dashboard')) + +from GhostRequest import GhostRequest + + +class UseGhost(GhostRequest): + def __init__(self, + _ghost_token=None, + host="localhost", + port=8080, + _debug_on=False, + _exit_on_fail=False, + _ghost_host="localhost", + _ghost_port=2368, + influx_host=None, + influx_port=None, + influx_org=None, + influx_token=None, + influx_bucket=None): + super().__init__(_ghost_host, + str(_ghost_port), + _api_token=_ghost_token, + influx_host=influx_host, + influx_port=influx_port, + influx_org=influx_org, + influx_token=influx_token, + influx_bucket=influx_bucket, + debug_=_debug_on) + self.ghost_host = _ghost_host + self.ghost_port = _ghost_port + self.ghost_token = _ghost_token + self.influx_host = influx_host + self.influx_port = influx_port + self.influx_org = influx_org + self.influx_token = influx_token + self.influx_bucket = influx_bucket + + def create_post_from_file(self, title, file, tags, authors): + text = open(file).read() + return self.create_post(title=title, text=text, tags=tags, authors=authors) + + def kpi(self, + authors, + folders, + parent_folder, + title, + server_pull, + ghost_host, + port, + user_push, + password_push, + customer, + testbed, + test_run, + grafana_token, + grafana_host, + grafana_port, + datasource, + grafana_bucket): + target_folders = list() + return self.kpi_to_ghost(authors, + folders, + parent_folder, + title, + server_pull, + ghost_host, + port, + user_push, + password_push, + customer, + testbed, + test_run, + target_folders, + grafana_token, + grafana_host, + grafana_port, + datasource, + grafana_bucket) + + +def main(): + parser = argparse.ArgumentParser( + prog='ghost_profile.py', + formatter_class=argparse.RawTextHelpFormatter, + epilog='''Manage Ghost Website''', + description=''' + ghost_profile.py + ---------------- + Command example: + ./ghost_profile.py + --ghost_token''' + ) + optional = parser.add_argument_group('optional arguments') + optional.add_argument('--ghost_token', default=None) + optional.add_argument('--create_post', default=None) + optional.add_argument('--article_text_file', default=None) + + optional.add_argument('--ghost_port', help='Ghost port if different from 2368', default=2368) + optional.add_argument('--ghost_host', help='Ghost host if different from localhost', default='localhost') + optional.add_argument('--article_text') + optional.add_argument('--article_tags', action='append') + optional.add_argument('--authors', action='append') + optional.add_argument('--title', default=None) + optional.add_argument('--image', default=None) + optional.add_argument('--folder', default=None) + optional.add_argument('--custom_post', default=None) + optional.add_argument('--kpi_to_ghost', help='Generate a Ghost report from KPI spreadsheets', action="store_true") + optional.add_argument('--folders', action='append', default=None) + optional.add_argument('--server_pull') + optional.add_argument('--port', default=22) + optional.add_argument('--user_push') + optional.add_argument('--password_push') + optional.add_argument('--customer') + optional.add_argument('--testbed') + optional.add_argument('--test_run', default=None) + optional.add_argument('--grafana_token', default=None) + optional.add_argument('--grafana_host', default=None) + optional.add_argument('--grafana_port', default=3000) + optional.add_argument('--parent_folder', default=None) + optional.add_argument('--datasource', default='InfluxDB') + optional.add_argument('--grafana_bucket', default=None) + optional.add_argument('--influx_host') + optional.add_argument('--influx_token', help='Username for your Influx database') + optional.add_argument('--influx_bucket', help='Password for your Influx database') + optional.add_argument('--influx_org', help='Name of your Influx database') + optional.add_argument('--influx_port', help='Port where your influx database is located', default=8086) + optional.add_argument('--influx_tag', action='append', nargs=2, + help='--influx_tag Can add more than one of these.') + optional.add_argument('--influx_mgr', + help='IP address of the server your Influx database is hosted if different from your LANforge Manager', + default=None) + optional.add_argument('--debug', help='Enable debugging', default=False, action="store_true") + args = parser.parse_args() + + Ghost = UseGhost(_ghost_token=args.ghost_token, + _ghost_port=args.ghost_port, + _ghost_host=args.ghost_host, + influx_host=args.influx_host, + influx_port=args.influx_port, + influx_org=args.influx_org, + influx_token=args.influx_token, + influx_bucket=args.influx_bucket, + _debug_on=args.debug) + + if args.create_post is not None: + Ghost.create_post(args.title, args.article_text, args.article_tags, args.authors) + if args.article_text_file is not None: + Ghost.create_post_from_file(args.title, args.article_text_file, args.article_tags, args.authors) + + if args.image is not None: + Ghost.upload_image(args.image) + + if args.custom_post is not None: + if args.folders is not None: + Ghost.custom_post(args.folders, args.authors) + else: + Ghost.custom_post(args.folder, args.authors) + else: + if args.folder is not None: + Ghost.upload_images(args.folder) + + if args.kpi_to_ghost is True: + Ghost.kpi(args.authors, + args.folders, + args.parent_folder, + args.title, + args.server_pull, + args.ghost_host, + args.port, + args.user_push, + args.password_push, + args.customer, + args.testbed, + args.test_run, + args.grafana_token, + args.grafana_host, + args.grafana_port, + args.datasource, + args.grafana_bucket) + + +if __name__ == "__main__": + main() diff --git a/py-scripts/grafana_profile.py b/py-scripts/grafana_profile.py index 06c15a07..73ace537 100755 --- a/py-scripts/grafana_profile.py +++ b/py-scripts/grafana_profile.py @@ -19,8 +19,8 @@ if 'py-json' not in sys.path: from GrafanaRequest import GrafanaRequest from LANforge.lfcli_base import LFCliBase -import json import string +<<<<<<< HEAD import random @@ -161,109 +161,11 @@ class UseGrafana(LFCliBase): options = dict() options['alertThreshold'] = True +======= - groupBy = list() - groupBy.append(self.groupby('$__interval', 'time')) - groupBy.append(self.groupby('null', 'fill')) +class UseGrafana(GrafanaRequest): +>>>>>>> 0ef021e1165cbaa612e5128bc48d6abfbb7b887b - targets = list() - counter = 0 - new_target = self.maketargets(bucket, scriptname, groupBy, counter, graph_group,testbed) - targets.append(new_target) - - fieldConfig = dict() - fieldConfig['defaults'] = dict() - fieldConfig['overrides'] = list() - - transformation = dict() - transformation['id'] = "renameByRegex" - transformation_options = dict() - transformation_options['regex'] = "(.*) value.*" - transformation_options['renamePattern'] = "$1" - transformation['options'] = transformation_options - - xaxis = dict() - xaxis['buckets'] = None - xaxis['mode'] = "time" - xaxis['name'] = None - xaxis['show'] = True - xaxis['values'] = list() - - yaxis = dict() - yaxis['format'] = 'short' - yaxis['label'] = unit_dict[graph_group] - yaxis['logBase'] = 1 - yaxis['max'] = None - yaxis['min'] = None - yaxis['show'] = True - - yaxis1 = dict() - yaxis1['align'] = False - yaxis1['alignLevel'] = None - - panel['aliasColors'] = dict() - panel['bars'] = False - panel['dashes'] = False - panel['dashLength'] = 10 - panel['datasource'] = datasource - panel['fieldConfig'] = fieldConfig - panel['fill'] = 0 - panel['fillGradient'] = 0 - panel['gridPos'] = gridpos - panel['hiddenSeries'] = False - panel['id'] = index - panel['legend'] = legend - panel['lines'] = True - panel['linewidth'] = 1 - panel['nullPointMode'] = 'null' - panel['options'] = options - panel['percentage'] = False - panel['pluginVersion'] = '7.5.4' - panel['pointradius'] = 2 - panel['points'] = True - panel['renderer'] = 'flot' - panel['seriesOverrides'] = list() - panel['spaceLength'] = 10 - panel['stack'] = False - panel['steppedLine'] = False - panel['targets'] = targets - panel['thresholds'] = list() - panel['timeFrom'] = None - panel['timeRegions'] = list() - panel['timeShift'] = None - if graph_group is not None: - panel['title'] = scriptname + ' ' + graph_group - else: - panel['title'] = scriptname - panel['transformations'] = list() - panel['transformations'].append(transformation) - panel['type'] = "graph" - panel['xaxis'] = xaxis - panel['yaxes'] = list() - panel['yaxes'].append(yaxis) - panel['yaxes'].append(yaxis) - panel['yaxis'] = yaxis1 - - panels.append(panel) - index = index + 1 - input1['annotations'] = annot - input1['editable'] = True - input1['gnetId'] = None - input1['graphTooltip'] = 0 - input1['links'] = list() - input1['panels'] = panels - input1['refresh'] = False - input1['schemaVersion'] = 27 - input1['style'] = 'dark' - input1['tags'] = list() - input1['templating'] = templating - input1['time'] = timedict - input1['timepicker'] = dict() - input1['timezone'] = '' - input1['title'] = ("Testbed: %s" % title) - input1['uid'] = uid - input1['version'] = 11 - return self.GR.create_dashboard_from_dict(dictionary=json.dumps(input1)) def read_csv(self, file): csv = open(file).read().split('\n') @@ -280,19 +182,6 @@ class UseGrafana(LFCliBase): results.append(row[value]) return results - def get_graph_groups(self,target_csvs): # Get the unique values in the Graph-Group column - dictionary = dict() - for target_csv in target_csvs: - if len(target_csv) > 1: - csv = self.read_csv(target_csv) - # Unique values in the test-id column - scripts = list(set(self.get_values(csv,'test-id'))) - # we need to make sure we match each Graph Group to the script it occurs in - for script in scripts: - # Unique Graph Groups for each script - dictionary[script] = list(set(self.get_values(csv,'Graph-Group'))) - print(dictionary) - return dictionary def get_units(self, target_csv): csv = self.read_csv(target_csv) @@ -324,6 +213,12 @@ def main(): --graph_groups 'Per Stations Rate DL' --graph_groups 'Per Stations Rate UL' --graph_groups 'Per Stations Rate UL+DL' + + Create a snapshot of a dashboard: + ./grafana_profile.py --grafana_token TOKEN + --grafana_host HOST + --create_snapshot + --title TITLE_OF_DASHBOARD ''') required = parser.add_argument_group('required arguments') required.add_argument('--grafana_token', help='token to access your Grafana database', required=True) diff --git a/py-scripts/influx2.py b/py-scripts/influx2.py index 0e561945..48ead535 100755 --- a/py-scripts/influx2.py +++ b/py-scripts/influx2.py @@ -21,13 +21,11 @@ import json import influxdb_client from influxdb_client.client.write_api import SYNCHRONOUS import datetime -from LANforge.lfcli_base import LFCliBase +#from LANforge.lfcli_base import LFCliBase import time -class RecordInflux(LFCliBase): +class RecordInflux: def __init__(self, - _lfjson_host="lanforge", - _lfjson_port=8080, _influx_host="localhost", _influx_port=8086, _influx_org=None, @@ -35,9 +33,6 @@ class RecordInflux(LFCliBase): _influx_bucket=None, _debug_on=False, _exit_on_fail=False): - super().__init__(_lfjson_host, _lfjson_port, - _debug=_debug_on, - _exit_on_fail=_exit_on_fail) self.influx_host = _influx_host self.influx_port = _influx_port self.influx_org = _influx_org @@ -49,10 +44,6 @@ class RecordInflux(LFCliBase): org=self.influx_org, debug=_debug_on) self.write_api = self.client.write_api(write_options=SYNCHRONOUS) - #print("org: ", self.influx_org) - #print("token: ", self.influx_token) - #print("bucket: ", self.influx_bucket) - #exit(0) def post_to_influx(self, key, value, tags, time): p = influxdb_client.Point(key) diff --git a/py-scripts/lf_ap_auto_test.py b/py-scripts/lf_ap_auto_test.py index d77cc3d2..573e44a0 100755 --- a/py-scripts/lf_ap_auto_test.py +++ b/py-scripts/lf_ap_auto_test.py @@ -23,7 +23,7 @@ the options and how best to input data. --set 'Skip 2.4Ghz Tests' 1 --set 'Skip 5Ghz Tests' 1 \ --set 'Throughput vs Pkt Size' 0 --set 'Capacity' 0 --set 'Stability' 0 --set 'Band-Steering' 0 \ --set 'Multi-Station Throughput vs Pkt Size' 0 --set 'Long-Term' 0 \ - --test_rig Testbed-01 --pull_report \ + --pull_report \ --influx_host c7-graphana --influx_port 8086 --influx_org Candela \ --influx_token=-u_Wd-L8o992701QF0c5UmqEp7w7Z7YOMaWLxOMgmHfATJGnQbbmYyNxHBR9PgD6taM_tcxqJl6U8DjU1xINFQ== \ --influx_bucket ben \ @@ -46,7 +46,6 @@ show_log: 0 port_sorting: 0 kpi_id: AP Auto bg: 0xE0ECF8 -test_rig: Ferndale-01-Basic show_scan: 1 auto_helper: 1 skip_2: 1 @@ -187,6 +186,7 @@ class ApAutoTest(cvtest): lf_port=8080, lf_user="lanforge", lf_password="lanforge", + local_lf_report_dir="", instance_name="ap_auto_instance", config_name="ap_auto_config", upstream="1.1.eth1", @@ -231,6 +231,7 @@ class ApAutoTest(cvtest): self.raw_lines_file = raw_lines_file self.sets = sets self.graph_groups = graph_groups + self.local_lf_report_dir = local_lf_report_dir def setup(self): # Nothing to do at this time. @@ -283,7 +284,7 @@ class ApAutoTest(cvtest): self.create_and_run_test(self.load_old_cfg, self.test_name, self.instance_name, self.config_name, self.sets, self.pull_report, self.lf_host, self.lf_user, self.lf_password, - cv_cmds, graph_groups_file=self.graph_groups) + cv_cmds, graph_groups_file=self.graph_groups, local_lf_report_dir=self.local_lf_report_dir) self.rm_text_blob(self.config_name, blob_test) # To delete old config with same name @@ -333,6 +334,7 @@ def main(): help="Specify 2.4Ghz radio. May be specified multiple times.") parser.add_argument("--radio5", action='append', nargs=1, default=[], help="Specify 5Ghz radio. May be specified multiple times.") + parser.add_argument("--local_lf_report_dir", help="--local_lf_report_dir default '' put where dataplane script run from",default="") args = parser.parse_args() @@ -346,6 +348,7 @@ def main(): config_name = args.config_name, upstream = args.upstream, pull_report = args.pull_report, + local_lf_report_dir = args.local_lf_report_dir, dut5_0 = args.dut5_0, dut2_0 = args.dut2_0, load_old_cfg = args.load_old_cfg, diff --git a/py-scripts/lf_csv.py b/py-scripts/lf_csv.py new file mode 100644 index 00000000..a87fb038 --- /dev/null +++ b/py-scripts/lf_csv.py @@ -0,0 +1,49 @@ +#!/usr/bin/env python3 +''' +NAME: lf_csv.py + +PURPOSE: +Common Library for generating csv for LANforge output + +SETUP: +/lanforge/html-reports directory needs to be present or output generated in local file + +EXAMPLE: +see: /py-scritps/lf_report_test.py for example + +COPYWRITE + Copyright 2021 Candela Technologies Inc + License: Free to distribute and modify. LANforge systems must be licensed. + +INCLUDE_IN_README +''' + +import numpy as np +import pandas as pd + + +class LfCSV: + def __init__(self, + _columns=['Stations', 'bk', 'be', 'vi', 'vo'], + _rows=[['sta0001', 'sta0002', 'sta0003', 'sta0004', 'sta0005'], + [1, 2, 3, 4, 5], + [11, 22, 33, 44, 55], + [6, 7, 8, 9, 10], + [66, 77, 88, 99, 100]], + _filename='test.csv'): + self.rows = _rows + self.columns = _columns + self.filename = _filename + + def generate_csv(self): + df = {} + for i in range(len(self.columns)): + df[self.columns[i]] = self.rows[i] + csv_df = pd.DataFrame(df) + print(csv_df) + csv_df.to_csv(self.filename, index=False, encoding='utf-8', na_rep='NA', float_format='%.2f') + + +if __name__ == "__main__": + test = LfCSV() + test.generate_csv() diff --git a/py-scripts/lf_dataplane_test.py b/py-scripts/lf_dataplane_test.py index 17939f5b..98682f7a 100755 --- a/py-scripts/lf_dataplane_test.py +++ b/py-scripts/lf_dataplane_test.py @@ -121,7 +121,7 @@ class DataplaneTest(cv_test): lf_user="lanforge", lf_password="lanforge", ssh_port=22, - local_path="", + local_lf_report_dir="", instance_name="dpt_instance", config_name="dpt_config", upstream="1.1.eth2", @@ -138,7 +138,9 @@ class DataplaneTest(cv_test): raw_lines_file="", sets=[], graph_groups=None, - report_dir="" + report_dir="", + test_rig="", + debug=False ): super().__init__(lfclient_host=lf_host, lfclient_port=lf_port) @@ -165,7 +167,9 @@ class DataplaneTest(cv_test): self.graph_groups = graph_groups self.report_dir = report_dir self.ssh_port = ssh_port - self.local_path = local_path + self.local_lf_report_dir = local_lf_report_dir + self.test_rig = test_rig + self.debug = debug def setup(self): # Nothing to do at this time. @@ -200,6 +204,8 @@ class DataplaneTest(cv_test): cfg_options.append("duration: " + self.duration) if self.dut != "": cfg_options.append("selected_dut: " + self.dut) + if self.test_rig != "": + cfg_options.append("test_rig: " + self.test_rig) # We deleted the scenario earlier, now re-build new one line at a time. @@ -209,8 +215,8 @@ class DataplaneTest(cv_test): self.create_and_run_test(self.load_old_cfg, self.test_name, self.instance_name, self.config_name, self.sets, self.pull_report, self.lf_host, self.lf_user, self.lf_password, - cv_cmds, ssh_port=self.ssh_port, local_path=self.local_path, - graph_groups_file=self.graph_groups) + cv_cmds, ssh_port=self.ssh_port, local_lf_report_dir=self.local_lf_report_dir, + graph_groups_file=self.graph_groups, debug=self.debug) self.rm_text_blob(self.config_name, blob_test) # To delete old config with same name @@ -239,6 +245,11 @@ def main(): cv_add_base_parser(parser) # see cv_test_manager.py +<<<<<<< HEAD +======= + parser.add_argument('--json', help="--json json input file", default="") + parser.add_argument('--influx_json', help="--influx_json influx config json input file", default="") +>>>>>>> 0ef021e1165cbaa612e5128bc48d6abfbb7b887b parser.add_argument("-u", "--upstream", type=str, default="", help="Upstream port for wifi capacity test ex. 1.1.eth2") parser.add_argument("--station", type=str, default="", @@ -254,9 +265,76 @@ def main(): help="Specify duration of each traffic run") parser.add_argument("--graph_groups", help="File to save graph_groups to", default=None) parser.add_argument("--report_dir", default="") + parser.add_argument("--local_lf_report_dir", help="--local_lf_report_dir default '' put where dataplane script run from",default="") + parser.add_argument("--debug", default=False) + args = parser.parse_args() +<<<<<<< HEAD +======= + # use json config file + if args.json != "": + try: + with open(args.json, 'r') as json_config: + json_data = json.load(json_config) + except: + print("Error reading {}".format(args.json)) + # json configuation takes presidence to command line + if "mgr" in json_data: + args.mgr = json_data["mgr"] + if "port" in json_data: + args.port = json_data["port"] + if "lf_user" in json_data: + args.lf_user = json_data["lf_user"] + if "lf_password" in json_data: + args.lf_password = json_data["lf_password"] + if "instance_name" in json_data: + args.instance_name = json_data["instance_name"] + if "config_name" in json_data: + args.config_name = json_data["config_name"] + if "upstream" in json_data: + args.upstream = json_data["upstream"] + if "dut" in json_data: + args.dut = json_data["dut"] + if "duration" in json_data: + args.duration = json_data["duration"] + if "station" in json_data: + args.station = json_data["station"] + if "download_speed" in json_data: + args.download_speed = json_data["download_speed"] + if "upload_speed" in json_data: + args.upload_speed = json_data["upload_speed"] + if "pull_report" in json_data: + args.pull_report = json_data["pull_report"] + if "raw_line" in json_data: + # the json_data is a list , need to make into a list of lists, to match command line raw_line paramaters + # https://www.tutorialspoint.com/convert-list-into-list-of-lists-in-python + json_data_tmp = [[x] for x in json_data["raw_line"]] + args.raw_line = json_data_tmp + + + # use influx json config file + if args.influx_json != "": + try: + with open(args.influx_json, 'r') as influx_json_config: + influx_json_data = json.load(influx_json_config) + except: + print("Error reading {}".format(args.influx_json)) + # json configuation takes presidence to command line + # influx DB configuration + if "influx_host" in influx_json_data: + args.influx_host = influx_json_data["influx_host"] + if "influx_port" in influx_json_data: + args.influx_port = influx_json_data["influx_port"] + if "influx_org" in influx_json_data: + args.influx_org = influx_json_data["influx_org"] + if "influx_token" in influx_json_data: + args.influx_token = influx_json_data["influx_token"] + if "influx_bucket" in influx_json_data: + args.influx_bucket = influx_json_data["influx_bucket"] + +>>>>>>> 0ef021e1165cbaa612e5128bc48d6abfbb7b887b cv_base_adjust_parser(args) CV_Test = DataplaneTest(lf_host = args.mgr, @@ -267,6 +345,7 @@ def main(): config_name = args.config_name, upstream = args.upstream, pull_report = args.pull_report, + local_lf_report_dir = args.local_lf_report_dir, load_old_cfg = args.load_old_cfg, download_speed = args.download_speed, upload_speed = args.upload_speed, @@ -278,7 +357,9 @@ def main(): raw_lines = args.raw_line, raw_lines_file = args.raw_lines_file, sets = args.set, - graph_groups = args.graph_groups + graph_groups = args.graph_groups, + test_rig=args.test_rig, + debug=args.debug ) CV_Test.setup() CV_Test.run() diff --git a/py-scripts/lf_graph.py b/py-scripts/lf_graph.py index df109b65..fa82d944 100755 --- a/py-scripts/lf_graph.py +++ b/py-scripts/lf_graph.py @@ -25,16 +25,20 @@ import pandas as pd import pdfkit import math from matplotlib.colors import ListedColormap +from lf_csv import LfCSV # internal candela references included during intial phases, to be deleted at future date # graph reporting classes class lf_bar_graph(): - def __init__(self, _data_set=[[30, 55, 69, 37], [45, 67, 34, 22], [22, 45, 12, 34]], + def __init__(self, _data_set=[[30.4, 55.3, 69.2, 37.1], [45.1, 67.2, 34.3, 22.4], [22.5, 45.6, 12.7, 34.8]], _xaxis_name="x-axis", _yaxis_name="y-axis", - _xaxis_categories=[1, 2, 3, 4], + _xaxis_categories=[1, 2, 3, 4, 5], + _xaxis_label=["a", "b", "c", "d", "e"], + _graph_title="", + _title_size=16, _graph_image_name="image_name", _label=["bi-downlink", "bi-uplink", 'uplink'], _color=None, @@ -43,12 +47,22 @@ class lf_bar_graph(): _font_weight='bold', _color_name=['lightcoral', 'darkgrey', 'r', 'g', 'b', 'y'], _figsize=(10, 5), - _dpi=96): + _show_bar_value=False, + _xaxis_step=5, + _xticks_font = None, + _text_font=None, + _text_rotation=None, + _grp_title = "", + _dpi=96, + _enable_csv=False): self.data_set = _data_set self.xaxis_name = _xaxis_name self.yaxis_name = _yaxis_name self.xaxis_categories = _xaxis_categories + self.xaxis_label = _xaxis_label + self.title = _graph_title + self.title_size = _title_size self.graph_image_name = _graph_image_name self.label = _label self.color = _color @@ -57,6 +71,14 @@ class lf_bar_graph(): self.font_weight = _font_weight self.color_name = _color_name self.figsize = _figsize + self.show_bar_value = _show_bar_value + self.xaxis_step = _xaxis_step + self.xticks_font = _xticks_font + self.text_font = _text_font + self.text_rotation = _text_rotation + self.grp_title = _grp_title + self.enable_csv = _enable_csv + self.lf_csv = LfCSV() def build_bar_graph(self): if self.color is None: @@ -68,31 +90,53 @@ class lf_bar_graph(): fig = plt.subplots(figsize=self.figsize) i = 0 + + def show_value(rects): + for rect in rects: + h = rect.get_height() + plt.text(rect.get_x() + rect.get_width() / 2., h, h, + ha='center', va='bottom', rotation=self.text_rotation, fontsize=self.text_font) + for data in self.data_set: if i > 0: br = br1 br2 = [x + self.bar_width for x in br] - plt.bar(br2, self.data_set[i], color=self.color[i], width=self.bar_width, - edgecolor=self.color_edge, label=self.label[i]) + rects = plt.bar(br2, self.data_set[i], color=self.color[i], width=self.bar_width, + edgecolor=self.color_edge, label=self.label[i]) + if self.show_bar_value: + show_value(rects) br1 = br2 i = i + 1 else: br1 = np.arange(len(self.data_set[i])) - plt.bar(br1, self.data_set[i], color=self.color[i], width=self.bar_width, - edgecolor=self.color_edge, label=self.label[i]) + rects = plt.bar(br1, self.data_set[i], color=self.color[i], width=self.bar_width, + edgecolor=self.color_edge, label=self.label[i]) + if self.show_bar_value: + show_value(rects) i = i + 1 plt.xlabel(self.xaxis_name, fontweight='bold', fontsize=15) plt.ylabel(self.yaxis_name, fontweight='bold', fontsize=15) - """plt.xticks([r + self.bar_width for r in range(len(self.data_set[0]))], - self.xaxis_categories)""" - plt.xticks(np.arange(0, len(self.xaxis_categories), step=5)) + if self.xaxis_categories[0] == 0: + plt.xticks(np.arange(0, len(self.xaxis_categories), step=self.xaxis_step),fontsize = self.xticks_font) + else: + plt.xticks(np.arange(0, len(self.data_set[0]), step=self.xaxis_step), self.xaxis_categories, + fontsize = self.xticks_font) plt.legend() - + plt.suptitle(self.title, fontsize=self.title_size) + plt.title(self.grp_title) fig = plt.gcf() plt.savefig("%s.png" % self.graph_image_name, dpi=96) plt.close() print("{}.png".format(self.graph_image_name)) - + if self.enable_csv: + if self.data_set is not None: + self.lf_csv.columns = self.label + self.lf_csv.rows = self.data_set + self.lf_csv.filename = f"{self.graph_image_name}.csv" + self.lf_csv.generate_csv() + else: + print("No Dataset Found") + print("{}.csv".format(self.graph_image_name)) return "%s.png" % self.graph_image_name @@ -104,9 +148,10 @@ class lf_scatter_graph(): _xaxis_name="x-axis", _yaxis_name="y-axis", _label=["num1", "num2"], - _graph_image_name="image_name", + _graph_image_name="image_name1", _color=["r", "y"], - _figsize=(9, 4)): + _figsize=(9, 4), + _enable_csv=True): self.x_data_set = _x_data_set self.y_data_set = _y_data_set self.xaxis_name = _xaxis_name @@ -116,6 +161,8 @@ class lf_scatter_graph(): self.color = _color self.label = _label self.values = _values + self.enable_csv = _enable_csv + self.lf_csv = LfCSV() def build_scatter_graph(self): if self.color is None: @@ -140,6 +187,11 @@ class lf_scatter_graph(): plt.savefig("%s.png" % self.graph_image_name, dpi=96) plt.close() print("{}.png".format(self.graph_image_name)) + if self.enable_csv: + self.lf_csv.columns = self.label + self.lf_csv.rows = self.y_data_set + self.lf_csv.filename = f"{self.graph_image_name}.csv" + self.lf_csv.generate_csv() return "%s.png" % self.graph_image_name @@ -150,9 +202,10 @@ class lf_stacked_graph(): _xaxis_name="Stations", _yaxis_name="Numbers", _label=['Success', 'Fail'], - _graph_image_name="image_name", + _graph_image_name="image_name2", _color=["b", "g"], - _figsize=(9, 4)): + _figsize=(9, 4), + _enable_csv=True): self.data_set = _data_set # [x_axis,y1_axis,y2_axis] self.xaxis_name = _xaxis_name self.yaxis_name = _yaxis_name @@ -160,6 +213,8 @@ class lf_stacked_graph(): self.graph_image_name = _graph_image_name self.label = _label self.color = _color + self.enable_csv = _enable_csv + self.lf_csv = LfCSV() def build_stacked_graph(self): fig = plt.subplots(figsize=self.figsize) @@ -177,7 +232,11 @@ class lf_stacked_graph(): plt.savefig("%s.png" % (self.graph_image_name), dpi=96) plt.close() print("{}.png".format(self.graph_image_name)) - + if self.enable_csv: + self.lf_csv.columns = self.label + self.lf_csv.rows = self.data_set + self.lf_csv.filename = f"{self.graph_image_name}.csv" + self.lf_csv.generate_csv() return "%s.png" % (self.graph_image_name) @@ -190,10 +249,11 @@ class lf_horizontal_stacked_graph(): _unit="%", _xaxis_name="Stations", _label=['Success', 'Fail'], - _graph_image_name="image_name", + _graph_image_name="image_name3", _color=["success", "Fail"], _figsize=(9, 4), - _disable_xaxis=False): + _disable_xaxis=False, + _enable_csv=True): self.unit = _unit self.seg = _seg self.xaxis_set1 = _xaxis_set1 @@ -205,6 +265,8 @@ class lf_horizontal_stacked_graph(): self.label = _label self.color = _color self.disable_xaxis = _disable_xaxis + self.enable_csv = _enable_csv + self.lf_csv = LfCSV() def build_horizontal_stacked_graph(self): def sumzip(items): @@ -246,7 +308,11 @@ class lf_horizontal_stacked_graph(): plt.savefig("%s.png" % self.graph_image_name, dpi=96) plt.close() print("{}.png".format(self.graph_image_name)) - + if self.enable_csv: + self.lf_csv.columns = self.label + self.lf_csv.rows = self.data_set + self.lf_csv.filename = f"{self.graph_image_name}.csv" + self.lf_csv.generate_csv() return "%s.png" % self.graph_image_name @@ -261,7 +327,7 @@ if __name__ == "__main__":

""" - # + # test_file = open(output_html_1, "w") test_file.write(graph_html_obj) test_file.close() @@ -293,7 +359,7 @@ if __name__ == "__main__":

""" - # + # test_file = open(output_html_2, "w") test_file.write(graph_html_obj) test_file.close() diff --git a/py-scripts/lf_influx_db.json b/py-scripts/lf_influx_db.json new file mode 100644 index 00000000..3aee6cfa --- /dev/null +++ b/py-scripts/lf_influx_db.json @@ -0,0 +1,12 @@ +{ + "influx_host":"192.168.100.201", + "influx_port": "8086", + "influx_org": "Candela", + "influx_token": "-u_Wd-L8o992701QF0c5UmqEp7w7Z7YOMaWLxOMgmHfATJGnQbbmYyNxHBR9PgD6taM_tcxqJl6U8DjU1xINFQ==", + "influx_bucket": "ben", + "influx_tag": "testbed Ferndale-01" +} + + + + \ No newline at end of file diff --git a/py-scripts/lf_report.py b/py-scripts/lf_report.py index a9d76de1..6f2aea90 100755 --- a/py-scripts/lf_report.py +++ b/py-scripts/lf_report.py @@ -28,107 +28,145 @@ INCLUDE_IN_README import os import shutil import datetime + import pandas as pd import pdfkit + # internal candela references included during intial phases, to be deleted at future date # https://candelatech.atlassian.net/wiki/spaces/LANFORGE/pages/372703360/Scripting+Data+Collection+March+2021 # base report class class lf_report(): def __init__(self, - #_path the report directory under which the report directories will be created. - _path = "/home/lanforge/html-reports", - _alt_path = "", - _date = "", - _title="LANForge Test Run Heading", - _table_title="LANForge Table Heading", - _graph_title="LANForge Graph Title", - _obj = "", - _obj_title = "", - _output_html="outfile.html", - _output_pdf="outfile.pdf", - _results_dir_name = "LANforge_Test_Results", - _output_format = 'html', # pass in on the write functionality, current not used - _dataframe="", - _path_date_time=""): # this is where the final report is placed. - #other report paths, + # _path the report directory under which the report directories will be created. + _path="/home/lanforge/html-reports", + _alt_path="", + _date="", + _title="LANForge Test Run Heading", + _table_title="LANForge Table Heading", + _graph_title="LANForge Graph Title", + _obj="", + _obj_title="", + _output_html="outfile.html", + _output_pdf="outfile.pdf", + _results_dir_name="LANforge_Test_Results", + _output_format='html', # pass in on the write functionality, current not used + _dataframe="", + _path_date_time="", + _custom_css='custom-example.css'): # this is where the final report is placed. + # other report paths, - # _path is where the directory with the data time will be created - if _path == "local" or _path == "here": - self.path = os.path.abspath(__file__) - print("path set to file path: {}".format(self.path)) - elif _alt_path != "": - self.path = _alt_path - print("path set to alt path: {}".format(self.path)) - else: - self.path = _path - print("path set: {}".format(self.path)) - - self.dataframe=_dataframe - self.text = "" - self.title=_title - self.table_title=_table_title - self.graph_title=_graph_title - self.date=_date - self.output_html=_output_html - self.path_date_time = _path_date_time - self.write_output_html = "" - self.output_pdf=_output_pdf - self.write_output_pdf = "" - self.banner_html = "" - self.graph_titles="" - self.graph_image="" - self.html = "" - self.custom_html = "" - self.objective = _obj - self.obj_title = _obj_title - #self.systeminfopath = "" - self.date_time_directory = "" - self.banner_directory = "artifacts" - self.banner_file_name = "banner.png" # does this need to be configurable - self.logo_directory = "artifacts" - self.logo_file_name = "CandelaLogo2-90dpi-200x90-trans.png" # does this need to be configurable. - self.current_path = os.path.dirname(os.path.abspath(__file__)) + # _path is where the directory with the data time will be created + if _path == "local" or _path == "here": + self.path = os.path.abspath(__file__) + print("path set to file path: {}".format(self.path)) + elif _alt_path != "": + self.path = _alt_path + print("path set to alt path: {}".format(self.path)) + else: + self.path = _path + print("path set: {}".format(self.path)) - # pass in _date to allow to change after construction - self.set_date_time_directory(_date,_results_dir_name) - self.build_date_time_directory() + self.dataframe = _dataframe + self.text = "" + self.title = _title + self.table_title = _table_title + self.graph_title = _graph_title + self.date = _date + self.output_html = _output_html + self.path_date_time = _path_date_time + self.write_output_html = "" + self.output_pdf = _output_pdf + self.write_output_pdf = "" + self.banner_html = "" + self.footer_html = "" + self.graph_titles = "" + self.graph_image = "" + self.csv_file_name = "" + self.html = "" + self.custom_html = "" + self.objective = _obj + self.obj_title = _obj_title + # self.systeminfopath = "" + self.date_time_directory = "" + self.banner_directory = "artifacts" + self.banner_file_name = "banner.png" # does this need to be configurable + self.logo_directory = "artifacts" + self.logo_file_name = "CandelaLogo2-90dpi-200x90-trans.png" # does this need to be configurable. + self.logo_footer_file_name = "candela_swirl_small-72h.png" # does this need to be configurable. + self.current_path = os.path.dirname(os.path.abspath(__file__)) + self.custom_css = _custom_css + # pass in _date to allow to change after construction + self.set_date_time_directory(_date, _results_dir_name) + self.build_date_time_directory() + + self.font_file = "CenturyGothic.woff" + # move the banners and candela images to report path + self.copy_banner() + self.copy_css() + self.copy_logo() + self.copy_logo_footer() - # move the banners and candela images to report path - self.copy_banner() - self.copy_logo() - def copy_banner(self): - banner_src_file = str(self.current_path)+'/'+str(self.banner_directory)+'/'+str(self.banner_file_name) - banner_dst_file = str(self.path_date_time)+'/'+ str(self.banner_file_name) - #print("banner src_file: {}".format(banner_src_file)) - #print("dst_file: {}".format(banner_dst_file)) - shutil.copy(banner_src_file,banner_dst_file) + banner_src_file = str(self.current_path) + '/' + str(self.banner_directory) + '/' + str(self.banner_file_name) + banner_dst_file = str(self.path_date_time) + '/' + str(self.banner_file_name) + # print("banner src_file: {}".format(banner_src_file)) + # print("dst_file: {}".format(banner_dst_file)) + shutil.copy(banner_src_file, banner_dst_file) + + def copy_css(self): + reportcss_src_file = str(self.current_path) + '/' + str(self.banner_directory) + '/report.css' + reportcss_dest_file = str(self.path_date_time) + '/report.css' + + customcss_src_file = str(self.current_path) + '/' + str(self.banner_directory) + '/' + str(self.custom_css) + customcss_dest_file = str(self.path_date_time) + '/custom.css' + + font_src_file = str(self.current_path) + '/' + str(self.banner_directory) + '/' + str(self.font_file) + font_dest_file = str(self.path_date_time) + '/' + str(self.font_file) + + shutil.copy(reportcss_src_file, reportcss_dest_file) + shutil.copy(customcss_src_file, customcss_dest_file) + shutil.copy(font_src_file, font_dest_file) def copy_logo(self): - logo_src_file = str(self.current_path)+'/'+str(self.logo_directory)+'/'+str(self.logo_file_name) - logo_dst_file = str(self.path_date_time)+'/'+ str(self.logo_file_name) - #print("logo_src_file: {}".format(logo_src_file)) - #print("logo_dst_file: {}".format(logo_dst_file)) - shutil.copy(logo_src_file,logo_dst_file) + logo_src_file = str(self.current_path) + '/' + str(self.logo_directory) + '/' + str(self.logo_file_name) + logo_dst_file = str(self.path_date_time) + '/' + str(self.logo_file_name) + # print("logo_src_file: {}".format(logo_src_file)) + # print("logo_dst_file: {}".format(logo_dst_file)) + shutil.copy(logo_src_file, logo_dst_file) - def move_graph_image(self,): + def copy_logo_footer(self): + logo_footer_src_file = str(self.current_path) + '/' + str(self.logo_directory) + '/' + str( + self.logo_footer_file_name) + logo_footer_dst_file = str(self.path_date_time) + '/' + str(self.logo_footer_file_name) + # print("logo_footer_src_file: {}".format(logo_footer_src_file)) + # print("logo_footer_dst_file: {}".format(logo_footer_dst_file)) + shutil.copy(logo_footer_src_file, logo_footer_dst_file) + + def move_graph_image(self, ): graph_src_file = str(self.graph_image) - graph_dst_file = str(self.path_date_time)+'/'+ str(self.graph_image) + graph_dst_file = str(self.path_date_time) + '/' + str(self.graph_image) print("graph_src_file: {}".format(graph_src_file)) print("graph_dst_file: {}".format(graph_dst_file)) - shutil.move(graph_src_file,graph_dst_file) + shutil.move(graph_src_file, graph_dst_file) - def set_path(self,_path): + def move_csv_file(self): + csv_src_file = str(self.csv_file_name) + csv_dst_file = str(self.path_date_time) + '/' + str(self.csv_file_name) + print("csv_src_file: {}".format(csv_src_file)) + print("csv_dst_file: {}".format(csv_dst_file)) + shutil.move(csv_src_file, csv_dst_file) + + def set_path(self, _path): self.path = _path - def set_date_time_directory(self,_date,_results_dir_name): + def set_date_time_directory(self, _date, _results_dir_name): self.date = _date self.results_dir_name = _results_dir_name if self.date != "": self.date_time_directory = str(self.date) + str("_") + str(self.results_dir_name) else: - self.date = str(datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S")).replace(':','-') + self.date = str(datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S")).replace(':', '-') self.date_time_directory = self.date + str("_") + str(self.results_dir_name) def build_date_time_directory(self): @@ -136,49 +174,59 @@ class lf_report(): self.set_date_time_directory() self.path_date_time = os.path.join(self.path, self.date_time_directory) print("path_date_time {}".format(self.path_date_time)) - try: + try: if not os.path.exists(self.path_date_time): os.mkdir(self.path_date_time) except: self.path_date_time = os.path.join(self.current_path, self.date_time_directory) if not os.path.exists(self.path_date_time): os.mkdir(self.path_date_time) - print("report path : {}".format(self.path_date_time)) + print("report path : {}".format(self.path_date_time)) - def set_text(self,_text): + def set_text(self, _text): self.text = _text - def set_title(self,_title): + def set_title(self, _title): self.title = _title - def set_table_title(self,_table_title): + def set_table_title(self, _table_title): self.table_title = _table_title - def set_graph_title(self,_graph_title): + def set_graph_title(self, _graph_title): self.graph_title = _graph_title - def set_date(self,_date): + # sets the csv file name as graph title + def set_csv_filename(self, _graph_title): + fname, ext = os.path.splitext(_graph_title) + self.csv_file_name = fname + ".csv" + + # The _date is set when class is enstanciated / created so this set_date should be used with caution, used to synchronize results + def set_date(self, _date): self.date = _date - def set_table_dataframe(self,_dataframe): + def set_table_dataframe(self, _dataframe): self.dataframe = _dataframe - def set_table_dataframe_from_csv(self,_csv): + def set_table_dataframe_from_csv(self, _csv): self.dataframe = pd.read_csv(_csv) - def set_custom_html(self,_custom_html): + def set_custom_html(self, _custom_html): self.custom_html = _custom_html - def set_obj_html(self,_obj_title, _obj ): + def set_obj_html(self, _obj_title, _obj): self.objective = _obj self.obj_title = _obj_title - def set_graph_image(self,_graph_image): + def set_graph_image(self, _graph_image): self.graph_image = _graph_image + def get_date(self): + return self.date + def get_path(self): return self.path - # get_path_date_time, get_report_path and need to be the same () + + # get_path_date_time, get_report_path and need to be the same def get_path_date_time(self): return self.path_date_time @@ -186,12 +234,12 @@ class lf_report(): return self.path_date_time def file_add_path(self, file): - output_file = str(self.path_date_time)+'/'+ str(file) + output_file = str(self.path_date_time) + '/' + str(file) print("output file {}".format(output_file)) return output_file - def write_html(self): - self.write_output_html = str(self.path_date_time)+'/'+ str(self.output_html) + def write_html(self): + self.write_output_html = str(self.path_date_time) + '/' + str(self.output_html) print("write_output_html: {}".format(self.write_output_html)) try: test_file = open(self.write_output_html, "w") @@ -201,8 +249,8 @@ class lf_report(): print("write_html failed") return self.write_output_html - def write_html_with_timestamp(self): - self.write_output_html = "{}/{}-{}".format(self.path_date_time,self.date,self.output_html) + def write_html_with_timestamp(self): + self.write_output_html = "{}/{}-{}".format(self.path_date_time, self.date, self.output_html) print("write_output_html: {}".format(self.write_output_html)) try: test_file = open(self.write_output_html, "w") @@ -212,161 +260,232 @@ class lf_report(): print("write_html failed") return self.write_output_html + # https://wkhtmltopdf.org/usage/wkhtmltopdf.txt + # page_size A4, A3, Letter, Legal + # orientation Portrait , Landscape + def write_pdf(self, _page_size='A4', _orientation='Portrait'): + # write logic to generate pdf here + # wget https://github.com/wkhtmltopdf/packaging/releases/download/0.12.6-1/wkhtmltox_0.12.6-1.focal_amd64.deb + # sudo apt install ./wkhtmltox_0.12.6-1.focal_amd64.deb + + options = {"enable-local-file-access": None, + 'orientation': _orientation, + 'page-size': _page_size} # prevent error Blocked access to file + self.write_output_pdf = str(self.path_date_time) + '/' + str(self.output_pdf) + pdfkit.from_file(self.write_output_html, self.write_output_pdf, options=options) # https://wkhtmltopdf.org/usage/wkhtmltopdf.txt # page_size A4, A3, Letter, Legal # orientation Portrait , Landscape - def write_pdf(self, _page_size = 'A4', _orientation = 'Portrait'): - # write logic to generate pdf here - # wget https://github.com/wkhtmltopdf/packaging/releases/download/0.12.6-1/wkhtmltox_0.12.6-1.focal_amd64.deb - # sudo apt install ./wkhtmltox_0.12.6-1.focal_amd64.deb - - options = {"enable-local-file-access" : None, - 'orientation': _orientation, - 'page-size': _page_size} # prevent error Blocked access to file - self.write_output_pdf = str(self.path_date_time)+'/'+ str(self.output_pdf) - pdfkit.from_file(self.write_output_html, self.write_output_pdf, options=options) - - # https://wkhtmltopdf.org/usage/wkhtmltopdf.txt - # page_size A4, A3, Letter, Legal - # orientation Portrait , Landscape - def write_pdf_with_timestamp(self, _page_size = 'A4', _orientation = 'Portrait'): - # write logic to generate pdf here - # wget https://github.com/wkhtmltopdf/packaging/releases/download/0.12.6-1/wkhtmltox_0.12.6-1.focal_amd64.deb - # sudo apt install ./wkhtmltox_0.12.6-1.focal_amd64.deb - - options = {"enable-local-file-access" : None, - 'orientation': _orientation, - 'page-size': _page_size} # prevent error Blocked access to file - self.write_output_pdf = "{}/{}-{}".format(self.path_date_time,self.date,self.output_pdf) - pdfkit.from_file(self.write_output_html, self.write_output_pdf, options=options) + def write_pdf_with_timestamp(self, _page_size='A4', _orientation='Portrait'): + # write logic to generate pdf here + # wget https://github.com/wkhtmltopdf/packaging/releases/download/0.12.6-1/wkhtmltox_0.12.6-1.focal_amd64.deb + # sudo apt install ./wkhtmltox_0.12.6-1.focal_amd64.deb + options = {"enable-local-file-access": None, + 'orientation': _orientation, + 'page-size': _page_size} # prevent error Blocked access to file + self.write_output_pdf = "{}/{}-{}".format(self.path_date_time, self.date, self.output_pdf) + pdfkit.from_file(self.write_output_html, self.write_output_pdf, options=options) def generate_report(self): - self.write_html() + self.write_html() self.write_pdf() def build_all(self): self.build_banner() + self.start_content_div() self.build_table_title() self.build_table() + self.end_content_div() def build_banner(self): - self.banner_html = """ - - - - - -
- - - BANNER - -
-
- - -
-
-

""" + str(self.title) + """

-

""" + str(self.date) + """

-
-
-
-
-
-
- """ + # NOTE: {{ }} are the ESCAPED curly braces + self.banner_html = """ + + + + + + + + {title} + + +
+ +
+ """.format( + title=self.title, + date=self.date, + ) self.html += self.banner_html def build_table_title(self): self.table_title_html = """ - - - - -
-

""" + str(self.table_title) + """

- """ + +

{title}

+ """.format(title=self.table_title) self.html += self.table_title_html + def start_content_div(self): + self.html += "\n
\n" + def build_text(self): + # please do not use 'style=' tags unless you cannot override a class self.text_html = """ - - - - -
-

""" + str(self.text) + """

- """ +
+

{text}

\n +
""".format(text=self.text) self.html += self.text_html - def build_date_time(self): - self.date_time = str(datetime.datetime.now().strftime("%Y-%m-%d-%H-h-%m-m-%S-s")).replace(':','-') + self.date_time = str(datetime.datetime.now().strftime("%Y-%m-%d-%H-h-%m-m-%S-s")).replace(':', '-') return self.date_time def build_path_date_time(self): - try: - self.path_date_time = os.path.join(self.path,self.date_time) + try: + self.path_date_time = os.path.join(self.path, self.date_time) os.mkdir(self.path_date_time) except: - curr_dir_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) - self.path_date_time = os.path.join(curr_dir_path,self.date_time) - os.mkdir(self.path_date_time) + curr_dir_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) + self.path_date_time = os.path.join(curr_dir_path, self.date_time) + os.mkdir(self.path_date_time) def build_table(self): - self.dataframe_html = self.dataframe.to_html(index=False, justify='center') # have the index be able to be passed in. + self.dataframe_html = self.dataframe.to_html(index=False, + justify='center') # have the index be able to be passed in. self.html += self.dataframe_html + def test_setup_table(self, test_setup_data, value): + if test_setup_data is None: + return None + else: + var = "" + for i in test_setup_data: + var = var + "" + i + "" + str(test_setup_data[i]) + "" + + setup_information = """ + + + + + + + +
""" + str(value) + """ + + """ + var + """ +
+
+ +
+ """ + self.html += setup_information + + def build_footer(self): + self.footer_html = """ + +
+ + + + + +
+ + + + + """ + self.html += self.footer_html + + def build_footer_no_png(self): + self.footer_html = """ + +
+ + + + + +
+ +
+

Generate by Candela Technologies LANforge network testing tool

+

www.candelatech.com

+

+ + + """ + self.html += self.footer_html + def build_custom(self): self.html += self.custom_html def build_objective(self): self.obj_html = """ - -

""" + str(self.obj_title) + """

-

""" + str(self.objective) + """

- """ + +

{title}

+

{objective}

+ """.format(title=self.obj_title, + objective=self.objective) self.html += self.obj_html def build_graph_title(self): self.table_graph_html = """ - - - - -
-

""" + str(self.graph_title) + """

- """ +
+

{title}

+ """.format(title=self.graph_title) self.html += self.table_graph_html def build_graph(self): self.graph_html_obj = """ - -

- """ - self.html +=self.graph_html_obj + + """.format(image=self.graph_image) + self.html += self.graph_html_obj + + def end_content_div(self): + self.html += "\n
\n" + # Unit Test if __name__ == "__main__": - - # Testing: generate data frame + # Testing: generate data frame dataframe = pd.DataFrame({ - 'product':['CT521a-264-1ac-1n','CT521a-1ac-1ax','CT522-264-1ac2-1n','CT523c-2ac2-db-10g-cu','CT523c-3ac2-db-10g-cu','CT523c-8ax-ac10g-cu','CT523c-192-2ac2-1ac-10g'], - 'radios':[1,1,2,2,6,9,3], - 'MIMO':['N','N','N','Y','Y','Y','Y'], - 'stations':[200,64,200,128,384,72,192], - 'mbps':[300,300,300,10000,10000,10000,10000] + 'product': ['CT521a-264-1ac-1n', 'CT521a-1ac-1ax', 'CT522-264-1ac2-1n', 'CT523c-2ac2-db-10g-cu', + 'CT523c-3ac2-db-10g-cu', 'CT523c-8ax-ac10g-cu', 'CT523c-192-2ac2-1ac-10g'], + 'radios': [1, 1, 2, 2, 6, 9, 3], + 'MIMO': ['N', 'N', 'N', 'Y', 'Y', 'Y', 'Y'], + 'stations': [200, 64, 200, 128, 384, 72, 192], + 'mbps': [300, 300, 300, 10000, 10000, 10000, 10000] }) print(dataframe) # Testing: generate data frame dataframe2 = pd.DataFrame({ - 'station':[1,2,3,4,5,6,7], - 'time_seconds':[23,78,22,19,45,22,25] + 'station': [1, 2, 3, 4, 5, 6, 7], + 'time_seconds': [23, 78, 22, 19, 45, 22, 25] }) report = lf_report() @@ -385,11 +504,14 @@ if __name__ == "__main__": report.set_table_dataframe(dataframe2) report.build_table() - #report.build_all() + # report.build_all() + # report.build_footer() + report.build_footer_no_png() - html_file = report.write_html() + html_file = report.write_html() print("returned file ") print(html_file) report.write_pdf() print("report path {}".format(report.get_path())) + diff --git a/py-scripts/lf_report_test.py b/py-scripts/lf_report_test.py index a9340f34..4399da37 100755 --- a/py-scripts/lf_report_test.py +++ b/py-scripts/lf_report_test.py @@ -36,7 +36,7 @@ if __name__ == "__main__": 'radios': [1, 1, 2, 2, 6, 9, 3], 'MIMO': ['N', 'N', 'N', 'Y', 'Y', 'Y', 'Y'], 'stations': [200, 64, 200, 128, 384, 72, 192], - 'mbps': [300, 300, 300, 10000, 10000, 10000, 10000] + '1 mbps': [300, 300, 300, 10000, 10000, 10000, 10000] }) print(dataframe) @@ -96,7 +96,7 @@ if __name__ == "__main__": _xaxis_categories=x_axis_values, _graph_image_name="Bi-single_radio_2.4GHz", _label=["bi-downlink", "bi-uplink", 'uplink'], - _color=None, + _color=['darkorange', 'forestgreen','blueviolet'], _color_edge='red') graph_png = graph.build_bar_graph() diff --git a/py-scripts/lf_rvr_test.py b/py-scripts/lf_rvr_test.py index 41e7543c..ad7f13c3 100755 --- a/py-scripts/lf_rvr_test.py +++ b/py-scripts/lf_rvr_test.py @@ -10,7 +10,7 @@ Note: To Run this script gui should be opened with This script is used to automate running Rate-vs-Range tests. You may need to view a Rate-vs-Range test configured through the GUI to understand the options and how best to input data. - + ./lf_rvr_test.py --mgr localhost --port 8080 --lf_user lanforge --lf_password lanforge \ --instance_name rvr-instance --config_name test_con --upstream 1.1.eth1 \ --dut RootAP --duration 15s --station 1.1.wlan0 \ @@ -30,7 +30,7 @@ the options and how best to input data. Note: attenuator_mod: selects the attenuator modules, bit-field. This example uses 3, which is first two attenuator modules on Attenuator ID 1040. - + --raw_line 'line contents' will add any setting to the test config. This is useful way to support any options not specifically enabled by the command options. @@ -45,7 +45,7 @@ show_log: 0 port_sorting: 0 kpi_id: Rate vs Range bg: 0xE0ECF8 -test_rig: +test_rig: show_scan: 1 auto_helper: 0 skip_2: 0 @@ -83,7 +83,7 @@ attenuations: 0..+50..950 attenuations2: 0..+50..950 chamber: 0 tt_deg: 0..+45..359 -cust_pkt_sz: +cust_pkt_sz: show_bar_labels: 1 show_prcnt_tput: 0 show_3s: 0 @@ -93,7 +93,7 @@ show_1m: 1 pause_iter: 0 outer_loop_atten: 0 show_realtime: 1 -operator: +operator: mconn: 1 mpkt: 1000 tos: 0 @@ -118,10 +118,14 @@ if 'py-json' not in sys.path: from cv_test_manager import cv_test as cvtest from cv_test_manager import * + class RvrTest(cvtest): def __init__(self, lf_host="localhost", lf_port=8080, + ssh_port=22, + local_path="", + graph_groups=None, lf_user="lanforge", lf_password="lanforge", instance_name="rvr_instance", @@ -145,7 +149,7 @@ class RvrTest(cvtest): self.lf_host = lf_host self.lf_port = lf_port self.lf_user = lf_user - self.lf_password =lf_password + self.lf_password = lf_password self.instance_name = instance_name self.config_name = config_name self.dut = dut @@ -162,12 +166,14 @@ class RvrTest(cvtest): self.raw_lines = raw_lines self.raw_lines_file = raw_lines_file self.sets = sets + self.ssh_port = ssh_port + self.local_path = local_path + self.graph_groups = graph_groups def setup(self): # Nothing to do at this time. return - def run(self): self.sync_cv() time.sleep(2) @@ -206,18 +212,18 @@ class RvrTest(cvtest): self.create_and_run_test(self.load_old_cfg, self.test_name, self.instance_name, self.config_name, self.sets, self.pull_report, self.lf_host, self.lf_user, self.lf_password, - cv_cmds) + cv_cmds, ssh_port=self.ssh_port, local_lf_report_dir=self.local_path, + graph_groups_file=self.graph_groups) self.rm_text_blob(self.config_name, blob_test) # To delete old config with same name def main(): - parser = argparse.ArgumentParser(""" Open this file in an editor and read the top notes for more details. Example: - + """ ) @@ -236,35 +242,40 @@ def main(): help="Specify requested upload speed. Percentage of theoretical is also supported. Default: 0") parser.add_argument("--duration", default="", help="Specify duration of each traffic run") + parser.add_argument("--graph_groups", help="File to save graph_groups to", default=None) + parser.add_argument("--report_dir", default="") args = parser.parse_args() cv_base_adjust_parser(args) - CV_Test = RvrTest(lf_host = args.mgr, - lf_port = args.port, - lf_user = args.lf_user, - lf_password = args.lf_password, - instance_name = args.instance_name, - config_name = args.config_name, - upstream = args.upstream, - pull_report = args.pull_report, - load_old_cfg = args.load_old_cfg, - download_speed = args.download_speed, - upload_speed = args.upload_speed, - duration = args.duration, - dut = args.dut, - station = args.station, - enables = args.enable, - disables = args.disable, - raw_lines = args.raw_line, - raw_lines_file = args.raw_lines_file, - sets = args.set + CV_Test = RvrTest(lf_host=args.mgr, + lf_port=args.port, + lf_user=args.lf_user, + lf_password=args.lf_password, + instance_name=args.instance_name, + config_name=args.config_name, + upstream=args.upstream, + pull_report=args.pull_report, + load_old_cfg=args.load_old_cfg, + download_speed=args.download_speed, + upload_speed=args.upload_speed, + duration=args.duration, + dut=args.dut, + station=args.station, + enables=args.enable, + disables=args.disable, + raw_lines=args.raw_line, + raw_lines_file=args.raw_lines_file, + sets=args.set, + graph_groups=args.graph_groups ) CV_Test.setup() CV_Test.run() CV_Test.check_influx_kpi(args) + if __name__ == "__main__": main() + diff --git a/py-scripts/lf_rx_sensitivity_config.json b/py-scripts/lf_rx_sensitivity_config.json new file mode 100644 index 00000000..81c370bc --- /dev/null +++ b/py-scripts/lf_rx_sensitivity_config.json @@ -0,0 +1,19 @@ +{ + "mgr":"192.168.0.101", + "port":"8080", + "lf_user":"lanforge", + "lf_password":"lanforge", + "instance_name":"rx-sensitivity-instance", + "config_name":"test_con", + "upstream":"1.1.eth1", + "dut":"asus_5g", + "duration":"15s", + "station":"1.1.eth2", + "download_speed":"85%", + "upload_speed":"0", + "pull_report": true, + "raw_line": ["txo_preamble: VHT", "txo_mcs: 4 OFDM, HT, VHT;5 OFDM, HT, VHT;6 OFDM, HT, VHT;7 OFDM, HT, VHT", "spatial_streams: 3", "bandw_options: 80", "txo_sgi: ON", "txo_retries: No Retry", "txo_txpower: 17"] +} + + + diff --git a/py-scripts/lf_rx_sensitivity_test.py b/py-scripts/lf_rx_sensitivity_test.py new file mode 100644 index 00000000..7ca8e212 --- /dev/null +++ b/py-scripts/lf_rx_sensitivity_test.py @@ -0,0 +1,387 @@ +#!/usr/bin/env python3 + +""" +Note: To Run this script gui should be opened with + + path: cd LANforgeGUI_5.4.3 (5.4.3 can be changed with GUI version) + pwd (Output : /home/lanforge/LANforgeGUI_5.4.3) + ./lfclient.bash -cli-socket 3990 + +This script is used to automate running RX Sensitivity tests. You +may need to view a RX Sensitivity test configured through the GUI to understand +the options and how best to input data. + + ./lf_rx_sensitivity_test.py --mgr localhost --port 8080 --lf_user lanforge --lf_password lanforge \ + --instance_name rx-sensitivity-instance --config_name test_con --upstream 1.1.eth2 \ + --dut linksys-8450 --duration 15s --station 1.1.sta01500 \ + --download_speed 85% --upload_speed 0 \ + --raw_line 'txo_preamble: VHT' \ + --raw_line 'txo_mcs: 4 OFDM, HT, VHT;5 OFDM, HT, VHT;6 OFDM, HT, VHT;7 OFDM, HT, VHT' \ + --raw_line 'spatial_streams: 3' \ + --raw_line 'bandw_options: 80' \ + --raw_line 'txo_sgi: ON' \ + --raw_line 'txo_retries: No Retry' \ + --raw_line 'txo_txpower: 17' \ + --test_rig Testbed-01 --pull_report \ + --influx_host c7-graphana --influx_port 8086 --influx_org Candela \ + --influx_token=-u_Wd-L8o992701QF0c5UmqEp7w7Z7YOMaWLxOMgmHfATJGnQbbmYyNxHBR9PgD6taM_tcxqJl6U8DjU1xINFQ== \ + --influx_bucket ben \ + --influx_tag testbed Ferndale-01 + +Note: + --raw_line 'line contents' will add any setting to the test config. This is + useful way to support any options not specifically enabled by the + command options. + --set modifications will be applied after the other config has happened, + so it can be used to override any other config. + +Example of raw text config for RX Sensitivity, to show other possible options: + +show_events: 1 +show_log: 0 +port_sorting: 2 +kpi_id: RX Sensitivity +bg: 0xE0ECF8 +test_rig: +show_scan: 1 +auto_helper: 0 +skip_ac: 0 +skip_ax: 0 +skip_2: 0 +skip_5: 0 +skip_5b: 1 +skip_dual: 0 +skip_tri: 1 +selected_dut: ea8300 +duration: 15000 +settle_time: 1000 +sndbuf: 0 +rcvbuf: 0 +traffic_port: 1.1.157 sta01500 +upstream_port: 1.1.1 eth1 +path_loss: 10 +speed: 85% +speed2: 0Kbps +min_rssi_bound: -150 +max_rssi_bound: 0 +channels: AUTO +modes: Auto +pkts: 1024 +spatial_streams: 1 +security_options: AUTO +bandw_options: 20 +traffic_types: TCP +directions: DUT Transmit +txo_preamble: OFDM +txo_mcs: 0 CCK, OFDM, HT, VHT;1 CCK, OFDM, HT, VHT;2 CCK, OFDM, HT, VHT;3 CCK, OFDM, HT, VHT +txo_retries: No Retry +txo_sgi: ON +txo_txpower: 15 +attenuator: 0 +attenuator2: 0 +attenuator_mod: 255 +attenuator_mod2: 255 +attenuations: 0..+50..100 +attenuations2: 0..+50..950 +chamber: 0 +tt_deg: 0..+45..359 +cust_pkt_sz: +show_bar_labels: 1 +show_prcnt_tput: 0 +show_3s: 0 +show_ll_graphs: 0 +show_gp_graphs: 1 +show_1m: 1 +pause_iter: 0 +outer_loop_atten: 0 +show_realtime: 1 +operator: +mconn: 1 +mpkt: 1000 +tos: 0 +loop_iterations: 1 + +""" + +import sys +import os +import argparse +import time +import json +from os import path + +if sys.version_info[0] != 3: + print("This script requires Python 3") + exit(1) + +if 'py-json' not in sys.path: + sys.path.append(os.path.join(os.path.abspath('..'), 'py-json')) + +from cv_test_manager import cv_test +from cv_test_manager import * + + +class RxSensitivityTest(cv_test): + def __init__(self, + lf_host="localhost", + lf_port=8080, + lf_user="lanforge", + lf_password="lanforge", + ssh_port=22, + local_path="", + instance_name="dpt_instance", + config_name="dpt_config", + upstream="1.1.eth2", + pull_report=False, + load_old_cfg=False, + upload_speed="0", + download_speed="85%", + duration="15s", + station="1.1.sta01500", + dut="NA", + enables=[], + disables=[], + raw_lines=[], + raw_lines_file="", + sets=[], + graph_groups=None, + report_dir="" + ): + super().__init__(lfclient_host=lf_host, lfclient_port=lf_port) + + self.lf_host = lf_host + self.lf_port = lf_port + self.lf_user = lf_user + self.lf_password = lf_password + self.instance_name = instance_name + self.config_name = config_name + self.dut = dut + self.duration = duration + self.upstream = upstream + self.station = station + self.pull_report = pull_report + self.load_old_cfg = load_old_cfg + self.test_name = "RX Sensitivity" + self.upload_speed = upload_speed + self.download_speed = download_speed + self.enables = enables + self.disables = disables + self.raw_lines = raw_lines + self.raw_lines_file = raw_lines_file + self.sets = sets + self.graph_groups = graph_groups + self.report_dir = report_dir + self.ssh_port = ssh_port + self.local_path = local_path + + def setup(self): + # Nothing to do at this time. + return + + def run(self): + self.sync_cv() + time.sleep(2) + self.sync_cv() + + blob_test = "rxsens-test-latest-" + + self.rm_text_blob(self.config_name, blob_test) # To delete old config with same name + self.show_text_blob(None, None, False) + + # Test related settings + cfg_options = [] + + ### HERE### + self.apply_cfg_options(cfg_options, self.enables, self.disables, self.raw_lines, self.raw_lines_file) + + # cmd line args take precedence and so come last in the cfg array. + if self.upstream != "": + cfg_options.append("upstream_port: " + self.upstream) + if self.station != "": + cfg_options.append("traffic_port: " + self.station) + if self.download_speed != "": + cfg_options.append("speed: " + self.download_speed) + if self.upload_speed != "": + cfg_options.append("speed2: " + self.upload_speed) + if self.duration != "": + cfg_options.append("duration: " + self.duration) + if self.dut != "": + cfg_options.append("selected_dut: " + self.dut) + + # We deleted the scenario earlier, now re-build new one line at a time. + + self.build_cfg(self.config_name, blob_test, cfg_options) + + cv_cmds = [] + self.create_and_run_test(self.load_old_cfg, self.test_name, self.instance_name, + self.config_name, self.sets, + self.pull_report, self.lf_host, self.lf_user, self.lf_password, + cv_cmds, ssh_port=self.ssh_port, local_path=self.local_path, + graph_groups_file=self.graph_groups) + self.rm_text_blob(self.config_name, blob_test) # To delete old config with same name + + +def main(): + parser = argparse.ArgumentParser(description=""" + + IMPORTANT: Start lanforge with socket 3990 : ./lfclient.bash -cli-socket 3990 + lfclient.bash is located in the LANforgeGUI_X.X.X directory + + On local or remote system: ./lfclient.bash -cli-socket 3990 -s LF_MGR + On local system the -s LF_MGR will be local_host if not provided + + Open this file in an editor and read the top notes for more details. + Example: + ./lf_rx_sensitivity_test.py --mgr localhost --port 8080 --lf_user lanforge --lf_password lanforge \ + --instance_name rx-sensitivity-instance --config_name test_con --upstream 1.1.eth2 \ + --dut linksys-8450 --duration 15s --station 1.1.sta01500 \ + --download_speed 85% --upload_speed 0 \ + --raw_line 'txo_preamble: VHT' \ + --raw_line 'txo_mcs: 4 OFDM, HT, VHT;5 OFDM, HT, VHT;6 OFDM, HT, VHT;7 OFDM, HT, VHT' \ + --raw_line 'spatial_streams: 3' \ + --raw_line 'bandw_options: 80' \ + --raw_line 'txo_sgi: ON' \ + --raw_line 'txo_retries: No Retry' \ + --raw_line 'txo_txpower: 17' \ + --test_rig Testbed-01 --pull_report \ + --influx_host c7-graphana --influx_port 8086 --influx_org Candela \ + --influx_token=-u_Wd-L8o992701QF0c5UmqEp7w7Z7YOMaWLxOMgmHfATJGnQbbmYyNxHBR9PgD6taM_tcxqJl6U8DjU1xINFQ== \ + --influx_bucket ben \ + --influx_tag testbed Ferndale-01 + + + Example 2: + ./lf_dataplane_test.py --json .json + + see sample json file: lf_dataplane_config.json + + Sample .json between using eth1 and eth2 + { + "mgr":"192.168.0.101", + "port":"8080", + "lf_user":"lanforge", + "lf_password":"lanforge", + "instance_name":"dataplane-instance", + "config_name":"test_con", + "upstream":"1.1.eth1", + "dut":"asus_5g", + "duration":"15s", + "station":"1.1.eth2", + "download_speed":"85%", + "upload_speed":"0", + "raw_line": ["txo_preamble: VHT", "txo_mcs: 4 OFDM, HT, VHT;5 OFDM, HT, VHT;6 OFDM, HT, VHT;7 OFDM, HT, VHT", "spatial_streams: 3", "bandw_options: 80", "txo_sgi: ON", "txo_retries: No Retry", "txo_txpower: 17"] + } + + Sample .json between using eth1 and station 1.1.sta0002 + { + "mgr":"192.168.0.101", + "port":"8080", + "lf_user":"lanforge", + "lf_password":"lanforge", + "instance_name":"dataplane-instance", + "config_name":"test_con", + "upstream":"1.1.eth1", + "dut":"asus_5g", + "duration":"15s", + "station":"1.1.sta0002", + "download_speed":"85%", + "upload_speed":"0", + "raw_line": ["txo_preamble: VHT", "txo_mcs: 4 OFDM, HT, VHT;5 OFDM, HT, VHT;6 OFDM, HT, VHT;7 OFDM, HT, VHT", "spatial_streams: 3", "bandw_options: 80", "txo_sgi: ON", "txo_retries: No Retry", "txo_txpower: 17"] + } + + """ + ) + + cv_add_base_parser(parser) # see cv_test_manager.py + + parser.add_argument('--json', help="--json json input file", default="") + parser.add_argument("-u", "--upstream", type=str, default="", + help="Upstream port for wifi capacity test ex. 1.1.eth2") + parser.add_argument("--station", type=str, default="", + help="Station to be used in this test, example: 1.1.sta01500") + + parser.add_argument("--dut", default="", + help="Specify DUT used by this test, example: linksys-8450") + parser.add_argument("--download_speed", default="", + help="Specify requested download speed. Percentage of theoretical is also supported. Default: 85%%.") + parser.add_argument("--upload_speed", default="", + help="Specify requested upload speed. Percentage of theoretical is also supported. Default: 0") + parser.add_argument("--duration", default="", + help="Specify duration of each traffic run") + parser.add_argument("--graph_groups", help="File to save graph_groups to", default=None) + parser.add_argument("--report_dir", default="") + + args = parser.parse_args() + + # use json config file + if args.json != "": + try: + with open(args.json, 'r') as json_config: + json_data = json.load(json_config) + except: + print("Error reading {}".format(args.json)) + # json configuation takes presidence to command line + # TODO see if there is easier way to search presence, look at parser args + if "mgr" in json_data: + args.mgr = json_data["mgr"] + if "port" in json_data: + args.port = json_data["port"] + if "lf_user" in json_data: + args.lf_user = json_data["lf_user"] + if "lf_password" in json_data: + args.lf_password = json_data["lf_password"] + if "instance_name" in json_data: + args.instance_name = json_data["instance_name"] + if "config_name" in json_data: + args.config_name = json_data["config_name"] + if "upstream" in json_data: + args.upstream = json_data["upstream"] + if "dut" in json_data: + args.dut = json_data["dut"] + if "duration" in json_data: + args.duration = json_data["duration"] + if "station" in json_data: + args.station = json_data["station"] + if "download_speed" in json_data: + args.download_speed = json_data["download_speed"] + if "upload_speed" in json_data: + args.upload_speed = json_data["upload_speed"] + if "pull_report" in json_data: + args.pull_report = json_data["pull_report"] + if "raw_line" in json_data: + # the json_data is a list , need to make into a list of lists, to match command line raw_line paramaters + # https://www.tutorialspoint.com/convert-list-into-list-of-lists-in-python + json_data_tmp = [[x] for x in json_data["raw_line"]] + args.raw_line = json_data_tmp + + cv_base_adjust_parser(args) + + CV_Test = RxSensitivityTest(lf_host=args.mgr, + lf_port=args.port, + lf_user=args.lf_user, + lf_password=args.lf_password, + instance_name=args.instance_name, + config_name=args.config_name, + upstream=args.upstream, + pull_report=args.pull_report, + load_old_cfg=args.load_old_cfg, + download_speed=args.download_speed, + upload_speed=args.upload_speed, + duration=args.duration, + dut=args.dut, + station=args.station, + enables=args.enable, + disables=args.disable, + raw_lines=args.raw_line, + raw_lines_file=args.raw_lines_file, + sets=args.set, + graph_groups=args.graph_groups + ) + CV_Test.setup() + CV_Test.run() + + CV_Test.check_influx_kpi(args) + + +if __name__ == "__main__": + main() diff --git a/py-scripts/lf_tr398_test.py b/py-scripts/lf_tr398_test.py index 9f5d9d9e..8cbec10a 100755 --- a/py-scripts/lf_tr398_test.py +++ b/py-scripts/lf_tr398_test.py @@ -28,8 +28,7 @@ the options and how best to input data. --set 'Multiple Assoc Stability' 0 \ --set 'Downlink MU-MIMO' 0 \ --set 'AP Coexistence' 0 \ - --set 'Long Term Stability' 0 \ - --test_rig Testbed-01 + --set 'Long Term Stability' 0 Note: --raw_line 'line contents' will add any setting to the test config. This is @@ -61,7 +60,6 @@ notes1: smaller CT810a chamber. CT704b and CT714 4-module attenuators are used. notes2: mounted on the sides of the DUT chamber are used to communicate to the DUT. DUT is facing forward at notes3: the zero-rotation angle. bg: 0xE0ECF8 -test_rig: TR-398 test bed show_scan: 1 auto_helper: 1 skip_2: 0 @@ -278,8 +276,7 @@ def main(): --set 'Multiple Assoc Stability' 0 \ --set 'Downlink MU-MIMO' 0 \ --set 'AP Coexistence' 0 \ - --set 'Long Term Stability' 0 \ - --test_rig Testbed-01 + --set 'Long Term Stability' 0 """ ) diff --git a/py-scripts/lf_wifi_capacity_test.py b/py-scripts/lf_wifi_capacity_test.py index 24364cc6..951713ee 100755 --- a/py-scripts/lf_wifi_capacity_test.py +++ b/py-scripts/lf_wifi_capacity_test.py @@ -25,7 +25,7 @@ Note: This is a test file which will run a wifi capacity test. --instance_name wct_instance --config_name wifi_config --upstream 1.1.eth1 --batch_size 1,5,25 --loop_iter 1 \ --protocol UDP-IPv4 --duration 6000 --pull_report --stations 1.1.sta0000,1.1.sta0001 \ --create_stations --radio wiphy0 --ssid test-ssid --security open --paswd [BLANK] \ - --test_rig Testbed-01 + --test_rig Testbed-01 --set DUT_NAME linksys-8450 Note: @@ -34,6 +34,8 @@ Note: --stations == Enter stations to use for wifi capacity + --set DUT_NAME XXXX == Determines which DUT the wifi capacity test should use to get details on + Example of raw text config for Capacity, to show other possible options: sel_port-0: 1.1.eth1 @@ -353,7 +355,10 @@ class WiFiCapacityTest(cv_test): influx_host="localhost", influx_port=8086, report_dir="", - graph_groups=None + graph_groups=None, + test_rig="", + local_lf_report_dir="", + debug=False, ): super().__init__(lfclient_host=lfclient_host, lfclient_port=lf_port) @@ -390,6 +395,9 @@ class WiFiCapacityTest(cv_test): self.influx_port = influx_port self.report_dir = report_dir self.graph_groups = graph_groups + self.test_rig = test_rig + self.local_lf_report_dir = local_lf_report_dir + self.debug = debug def setup(self): if self.create_stations and self.stations != "": @@ -445,6 +453,8 @@ class WiFiCapacityTest(cv_test): cfg_options.append("ul_rate: " + self.upload_rate) if self.download_rate != "": cfg_options.append("dl_rate: " + self.download_rate) + if self.test_rig != "": + cfg_options.append("test_rig: " + self.test_rig) cfg_options.append("save_csv: 1") @@ -465,7 +475,8 @@ class WiFiCapacityTest(cv_test): self.create_and_run_test(self.load_old_cfg, self.test_name, self.instance_name, self.config_name, self.sets, self.pull_report, self.lfclient_host, self.lf_user, self.lf_password, - cv_cmds, graph_groups_file=self.graph_groups) + cv_cmds, graph_groups_file=self.graph_groups, local_lf_report_dir=self.local_lf_report_dir, + debug=self.debug) self.rm_text_blob(self.config_name, blob_test) # To delete old config with same name @@ -519,6 +530,9 @@ def main(): parser.add_argument("--report_dir", default="") parser.add_argument("--scenario", default="") parser.add_argument("--graph_groups", help="File to save graph groups to", default=None) + parser.add_argument("--local_lf_report_dir", help="--local_lf_report_dir default '' put where dataplane script run from",default="") + parser.add_argument("--debug", default=False) + args = parser.parse_args() cv_base_adjust_parser(args) @@ -550,7 +564,10 @@ def main(): raw_lines=args.raw_line, raw_lines_file=args.raw_lines_file, sets=args.set, - graph_groups=args.graph_groups + graph_groups=args.graph_groups, + test_rig=args.test_rig, + local_lf_report_dir=args.local_lf_report_dir, + debug=args.debug ) WFC_Test.setup() WFC_Test.run() diff --git a/py-scripts/test_generic.py b/py-scripts/test_generic.py index 247d9554..50118d15 100755 --- a/py-scripts/test_generic.py +++ b/py-scripts/test_generic.py @@ -98,6 +98,13 @@ class GenTest(LFCliBase): if (speedtest_max_ping is not None): self.generic_endps_profile.speedtest_max_ping = float(speedtest_max_ping) + def check_tab_exists(self): + response = self.json_get("generic") + if response is None: + return False + else: + return True + def start(self, print_pass=False, print_fail=False): self.station_profile.admin_up() temp_stas = [] @@ -290,6 +297,8 @@ python3 ./test_generic.py client=args.client, _debug_on=args.debug) + if not generic_test.check_tab_exists(): + raise ValueError("Error received from GUI, please ensure generic tab is enabled") generic_test.cleanup(station_list) generic_test.build() if not generic_test.passes(): diff --git a/py-scripts/test_ip_variable_time.py b/py-scripts/test_ip_variable_time.py new file mode 100755 index 00000000..6507dfb5 --- /dev/null +++ b/py-scripts/test_ip_variable_time.py @@ -0,0 +1,521 @@ +#!/usr/bin/env python3 + +""" +NAME: test_ip_variable_time.py + +PURPOSE: +test_ip_variable_time.py will create stations and endpoints to generate and verify layer-3 traffic over ipv4 or ipv6. +This script replaces the functionality of test_ipv4_variable_time.py and test_ipv6_variable_time.py +This Script has two working modes: + Mode 1: + When station is not available, + + This script will create a variable number of stations each with their own set of cross-connects and endpoints. + It will then create layer 3 traffic over a specified amount of time, testing for increased traffic at regular intervals. + This test will pass if all stations increase traffic over the full test duration. + + Mode 2: + + When station is already available This script will create layer3 cross-connects and endpoints It will then + create layer 3 traffic over a specified amount of time, testing for increased traffic at regular intervals. + This test will pass if all stations increase traffic over the full test duration. + +Use './test_ip_variable_time.py --help' to see command line usage and options +Copyright 2021 Candela Technologies Inc +License: Free to distribute and modify. LANforge systems must be licensed. +""" + +import sys +import os + +if sys.version_info[0] != 3: + print("This script requires Python 3") + exit(1) + +if 'py-json' not in sys.path: + sys.path.append(os.path.join(os.path.abspath('..'), 'py-json')) + +import argparse +from LANforge import LFUtils +from realm import Realm +import time +import datetime + + +class IPVariableTime(Realm): + def __init__(self, + ssid=None, + security=None, + password=None, + sta_list=[], + create_sta=True, + name_prefix=None, + upstream=None, + radio=None, + host="localhost", + port=8080, + mode=0, + ap=None, + traffic_type=None, + side_a_min_rate=56, side_a_max_rate=0, + side_b_min_rate=56, side_b_max_rate=0, + number_template="00000", + test_duration="5m", + use_ht160=False, + ipv6=False, + _debug_on=False, + _exit_on_error=False, + _exit_on_fail=False): + super().__init__(lfclient_host=host, + lfclient_port=port), + self.upstream = upstream + self.host = host + self.port = port + self.ssid = ssid + self.sta_list = sta_list + self.create_sta = create_sta + self.security = security + self.password = password + self.radio = radio + self.mode = mode + self.ap = ap + self.traffic_type = traffic_type + self.number_template = number_template + self.debug = _debug_on + # self.json_post("/cli-json/set_resource", { + # "shelf":1, + # "resource":all, + # "max_staged_bringup": 30, + # "max_trying_ifup": 15, + # "max_station_bringup": 6 + # }) + self.name_prefix = name_prefix + self.test_duration = test_duration + self.station_profile = self.new_station_profile() + self.cx_profile = self.new_l3_cx_profile() + self.station_profile.lfclient_url = self.lfclient_url + self.station_profile.ssid = self.ssid + self.station_profile.ssid_pass = self.password + self.station_profile.security = self.security + self.station_profile.number_template_ = self.number_template + self.station_profile.debug = self.debug + + self.station_profile.use_ht160 = use_ht160 + if self.station_profile.use_ht160: + self.station_profile.mode = 9 + self.station_profile.mode = mode + if self.ap is not None: + self.station_profile.set_command_param("add_sta", "ap", self.ap) + + self.cx_profile.host = self.host + self.cx_profile.port = self.port + self.ipv6 = ipv6 + self.cx_profile.name_prefix = self.name_prefix + self.cx_profile.side_a_min_bps = side_a_min_rate + self.cx_profile.side_a_max_bps = side_a_max_rate + self.cx_profile.side_b_min_bps = side_b_min_rate + self.cx_profile.side_b_max_bps = side_b_max_rate + + def start(self, print_pass=False, print_fail=False): + if self.create_sta: + self.station_profile.admin_up() + # to-do- check here if upstream port got IP + temp_stas = self.station_profile.station_names.copy() + + if self.wait_for_ip(temp_stas, ipv4=not self.ipv6, ipv6=self.ipv6): + self._pass("All stations got IPs") + else: + self._fail("Stations failed to get IPs") + self.exit_fail() + self.cx_profile.start_cx() + + def stop(self): + self.cx_profile.stop_cx() + if self.create_sta: + self.station_profile.admin_down() + + def pre_cleanup(self): + self.cx_profile.cleanup_prefix() + if self.create_sta: + for sta in self.sta_list: + self.rm_port(sta, check_exists=True) + + def cleanup(self): + self.cx_profile.cleanup() + if self.create_sta: + self.station_profile.cleanup() + LFUtils.wait_until_ports_disappear(base_url=self.lfclient_url, port_list=self.station_profile.station_names, + debug=self.debug) + + def build(self): + if self.create_sta: + self.station_profile.use_security(self.security, self.ssid, self.password) + self.station_profile.set_number_template(self.number_template) + print("Creating stations") + self.station_profile.set_command_flag("add_sta", "create_admin_down", 1) + self.station_profile.set_command_param("set_port", "report_timer", 1500) + self.station_profile.set_command_flag("set_port", "rpt_timer", 1) + self.station_profile.create(radio=self.radio, sta_names_=self.sta_list, debug=self.debug) + self._pass("PASS: Station build finished") + + self.cx_profile.create(endp_type=self.traffic_type, side_a=self.sta_list, + side_b=self.upstream, + sleep_time=0) + + +def main(): + parser = Realm.create_basic_argparse( + prog='test_ip_variable_time.py', + formatter_class=argparse.RawTextHelpFormatter, + epilog='''\ + Create stations to test connection and traffic on VAPs of varying security types (WEP, WPA, WPA2, WPA3, Open) + over ipv4 or ipv6 + ''', + description='''\ +test_ip_variable_time.py: +-------------------- +Generic command layout: + +python3 ./test_ip_variable_time.py + --upstream_port eth1 + --radio wiphy0 + --num_stations 32 + --security {open|wep|wpa|wpa2|wpa3} + --mode 1 + {"auto" : "0", + "a" : "1", + "b" : "2", + "g" : "3", + "abg" : "4", + "abgn" : "5", + "bgn" : "6", + "bg" : "7", + "abgnAC" : "8", + "anAC" : "9", + "an" : "10", + "bgnAC" : "11", + "abgnAX" : "12", + "bgnAX" : "13"} + --ssid netgear + --password admin123 + --test_duration 2m (default) + --monitor_interval_ms + --a_min 3000 + --b_min 1000 + --ap "00:0e:8e:78:e1:76" + --output_format csv + --traffic_type lf_udp + --report_file ~/Documents/results.csv (Example of csv file output - please use another extension for other file formats) + --compared_report ~/Documents/results_prev.csv (Example of csv file retrieval - please use another extension for other file formats) - UNDER CONSTRUCTION + --layer3_cols 'name','tx bytes','rx bytes','dropped' (column names from the GUI to print on report - please read below to know what to put here according to preferences) + --port_mgr_cols 'ap','ip' (column names from the GUI to print on report - please read below to know what to put here according to preferences) + --debug + + python3 ./test_ip_variable_time.py + --upstream_port eth1 (upstream Port) + --traffic_type lf_udp (traffic type, lf_udp | lf_tcp) + --test_duration 5m (duration to run traffic 5m --> 5 Minutes) + --create_sta False (False, means it will not create stations and use the sta_names specified below) + --sta_names sta000,sta001,sta002 (used if --create_sta False, comma separated names of stations) + + +=============================================================================== + ** FURTHER INFORMATION ** + Using the layer3_cols flag: + + Currently the output function does not support inputting the columns in layer3_cols the way they are displayed in the GUI. This quirk is under construction. To output + certain columns in the GUI in your final report, please match the according GUI column display to it's counterpart to have the columns correctly displayed in + your report. + + GUI Column Display Layer3_cols argument to type in (to print in report) + + Name | 'name' + EID | 'eid' + Run | 'run' + Mng | 'mng' + Script | 'script' + Tx Rate | 'tx rate' + Tx Rate (1 min) | 'tx rate (1 min)' + Tx Rate (last) | 'tx rate (last)' + Tx Rate LL | 'tx rate ll' + Rx Rate | 'rx rate' + Rx Rate (1 min) | 'rx rate (1 min)' + Rx Rate (last) | 'rx rate (last)' + Rx Rate LL | 'rx rate ll' + Rx Drop % | 'rx drop %' + Tx PDUs | 'tx pdus' + Tx Pkts LL | 'tx pkts ll' + PDU/s TX | 'pdu/s tx' + Pps TX LL | 'pps tx ll' + Rx PDUs | 'rx pdus' + Rx Pkts LL | 'pps rx ll' + PDU/s RX | 'pdu/s tx' + Pps RX LL | 'pps rx ll' + Delay | 'delay' + Dropped | 'dropped' + Jitter | 'jitter' + Tx Bytes | 'tx bytes' + Rx Bytes | 'rx bytes' + Replays | 'replays' + TCP Rtx | 'tcp rtx' + Dup Pkts | 'dup pkts' + Rx Dup % | 'rx dup %' + OOO Pkts | 'ooo pkts' + Rx OOO % | 'rx ooo %' + RX Wrong Dev | 'rx wrong dev' + CRC Fail | 'crc fail' + RX BER | 'rx ber' + CX Active | 'cx active' + CX Estab/s | 'cx estab/s' + 1st RX | '1st rx' + CX TO | 'cx to' + Pattern | 'pattern' + Min PDU | 'min pdu' + Max PDU | 'max pdu' + Min Rate | 'min rate' + Max Rate | 'max rate' + Send Buf | 'send buf' + Rcv Buf | 'rcv buf' + CWND | 'cwnd' + TCP MSS | 'tcp mss' + Bursty | 'bursty' + A/B | 'a/b' + Elapsed | 'elapsed' + Destination Addr | 'destination addr' + Source Addr | 'source addr' + ''') + + parser.add_argument('--mode', help='Used to force mode of stations') + parser.add_argument('--ap', help='Used to force a connection to a particular AP') + parser.add_argument('--traffic_type', help='Select the Traffic Type [lf_udp, lf_tcp, udp, tcp], type will be ' + 'adjusted automatically between ipv4 and ipv6 based on use of --ipv6 flag' + , required=True) + parser.add_argument('--output_format', help='choose either csv or xlsx') + parser.add_argument('--report_file', help='where you want to store results', default=None) + parser.add_argument('--a_min', help='--a_min bps rate minimum for side_a', default=256000) + parser.add_argument('--b_min', help='--b_min bps rate minimum for side_b', default=256000) + parser.add_argument('--test_duration', help='--test_duration sets the duration of the test', default="2m") + parser.add_argument('--layer3_cols', help='Columns wished to be monitored from layer 3 endpoint tab', + default=['name', 'tx bytes', 'rx bytes', 'tx rate', 'rx rate']) + parser.add_argument('--port_mgr_cols', help='Columns wished to be monitored from port manager tab', + default=['ap', 'ip', 'parent dev']) + parser.add_argument('--compared_report', help='report path and file which is wished to be compared with new report', + default=None) + parser.add_argument('--monitor_interval', + help='how frequently do you want your monitor function to take measurements; \, 35s, 2h', + default='10s') + parser.add_argument('--ipv6', help='Sets the test to use IPv6 traffic instead of IPv4', action='store_true') + parser.add_argument('--influx_host') + parser.add_argument('--influx_token', help='Username for your Influx database') + parser.add_argument('--influx_bucket', help='Password for your Influx database') + parser.add_argument('--influx_org', help='Name of your Influx database') + parser.add_argument('--influx_port', help='Port where your influx database is located', default=8086) + parser.add_argument('--influx_tag', action='append', nargs=2, + help='--influx_tag Can add more than one of these.') + parser.add_argument('--influx_mgr', + help='IP address of the server your Influx database is hosted if different from your LANforge Manager', + default=None) + parser.add_argument('--create_sta', help='Used to force a connection to a particular AP', default=True) + parser.add_argument('--sta_names', help='Used to force a connection to a particular AP', default="sta0000") + args = parser.parse_args() + create_sta = True + if args.create_sta == "False": + create_sta = False + + num_sta = 2 + if (args.num_stations is not None) and (int(args.num_stations) > 0): + num_sta = int(args.num_stations) + + # Create directory + + # if file path with output file extension is not given... + # check if home/lanforge/report-data exists. if not, save + # in new folder based in current file's directory + + if args.report_file is None: + new_file_path = str(datetime.datetime.now().strftime("%Y-%m-%d-%H-h-%M-m-%S-s")).replace(':', + '-') + '_test_ip_variable_time' # create path name + try: + path = os.path.join('/home/lanforge/report-data/', new_file_path) + os.mkdir(path) + except: + curr_dir_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) + path = os.path.join(curr_dir_path, new_file_path) + os.mkdir(path) + systeminfopath = str(path) + '/systeminfo.txt' + + if args.output_format in ['csv', 'json', 'html', 'hdf', 'stata', 'pickle', 'pdf', 'png', 'parquet', + 'xlsx']: + report_f = str(path) + '/data.' + args.output_format + output = args.output_format + else: + print( + 'Not supporting this report format or cannot find report format provided. Defaulting to csv data file ' + 'output type, naming it data.csv.') + report_f = str(path) + '/data.csv' + output = 'csv' + + else: + systeminfopath = str(args.report_file).split('/')[-1] + report_f = args.report_file + if args.output_format is None: + output = str(args.report_file).split('.')[-1] + else: + output = args.output_format + print("IP Test Report Data: {}".format(report_f)) + + # Retrieve last data file + compared_rept = None + if args.compared_report: + compared_report_format = args.compared_report.split('.')[-1] + # if compared_report_format not in ['csv', 'json', 'dta', 'pkl','html','xlsx','parquet','h5']: + if compared_report_format != 'csv': + print(ValueError("Cannot process this file type. Please select a different file and re-run script.")) + exit(1) + else: + compared_rept = args.compared_report + + if create_sta: + station_list = LFUtils.portNameSeries(prefix_="sta", start_id_=0, end_id_=num_sta - 1, padding_number_=10000, + radio=args.radio) + else: + station_list = args.sta_names.split(",") + + CX_TYPES = ("tcp", "udp", "lf_tcp", "lf_udp") + + if (args.traffic_type is None) or (args.traffic_type not in CX_TYPES): + print("cx_type needs to be lf_tcp, lf_udp, tcp, or udp, bye") + exit(1) + + if args.ipv6: + if args.traffic_type == "tcp" or args.traffic_type == "lf_tcp": + args.traffic_type = "lf_tcp6" + if args.traffic_type == "udp" or args.traffic_type == "lf_udp": + args.traffic_type = "lf_udp6" + else: + if args.traffic_type == "tcp": + args.traffic_type = "lf_tcp" + if args.traffic_type == "udp": + args.traffic_type = "lf_udp" + + ip_var_test = IPVariableTime(host=args.mgr, + port=args.mgr_port, + number_template="0000", + sta_list=station_list, + create_sta=create_sta, + name_prefix="VT", + upstream=args.upstream_port, + ssid=args.ssid, + password=args.passwd, + radio=args.radio, + security=args.security, + test_duration=args.test_duration, + use_ht160=False, + side_a_min_rate=args.a_min, + side_b_min_rate=args.b_min, + mode=args.mode, + ap=args.ap, + ipv6=args.ipv6, + traffic_type=args.traffic_type, + _debug_on=args.debug) + + ip_var_test.pre_cleanup() + + ip_var_test.build() + # exit() + if create_sta: + if not ip_var_test.passes(): + print(ip_var_test.get_fail_message()) + ip_var_test.exit_fail() + + try: + layer3connections = ','.join([[*x.keys()][0] for x in ip_var_test.json_get('endp')['endpoint']]) + except: + raise ValueError('Try setting the upstream port flag if your device does not have an eth1 port') + + if type(args.layer3_cols) is not list: + layer3_cols = list(args.layer3_cols.split(",")) + # send col names here to file to reformat + else: + layer3_cols = args.layer3_cols + # send col names here to file to reformat + if type(args.port_mgr_cols) is not list: + port_mgr_cols = list(args.port_mgr_cols.split(",")) + # send col names here to file to reformat + else: + port_mgr_cols = args.port_mgr_cols + # send col names here to file to reformat + if args.debug: + print("Layer 3 Endp column names are...") + print(layer3_cols) + print("Port Manager column names are...") + print(port_mgr_cols) + + print("Layer 3 Endp column names are...") + print(layer3_cols) + print("Port Manager column names are...") + print(port_mgr_cols) + + try: + monitor_interval = Realm.parse_time(args.monitor_interval).total_seconds() + except ValueError as error: + print(str(error)) + print(ValueError( + "The time string provided for monitor_interval argument is invalid. Please see supported time stamp increments and inputs for monitor_interval in --help. ")) + exit(1) + ip_var_test.start(False, False) + + # if args.influx_mgr is None: + # manager = args.mgr + # else: + # manager = args.influx_mgr + + if args.influx_org is not None: + from influx2 import RecordInflux + grapher = RecordInflux(_influx_host=args.influx_host, + _influx_port=args.influx_port, + _influx_org=args.influx_org, + _influx_token=args.influx_token, + _influx_bucket=args.influx_bucket) + devices = [station.split('.')[-1] for station in station_list] + tags = dict() + tags['script'] = 'test_ip_variable_time' + try: + for k in args.influx_tag: + tags[k[0]] = k[1] + except: + pass + grapher.monitor_port_data(longevity=Realm.parse_time(args.test_duration).total_seconds(), + devices=devices, + monitor_interval=Realm.parse_time(args.monitor_interval).total_seconds(), + tags=tags) + + ip_var_test.cx_profile.monitor(layer3_cols=layer3_cols, + sta_list=station_list, + # port_mgr_cols=port_mgr_cols, + report_file=report_f, + systeminfopath=systeminfopath, + duration_sec=Realm.parse_time(args.test_duration).total_seconds(), + monitor_interval_ms=monitor_interval, + created_cx=layer3connections, + output_format=output, + compared_report=compared_rept, + script_name='test_ip_variable_time', + arguments=args, + debug=args.debug) + + ip_var_test.stop() + if create_sta: + if not ip_var_test.passes(): + print(ip_var_test.get_fail_message()) + ip_var_test.exit_fail() + LFUtils.wait_until_ports_admin_up(port_list=station_list) + + if ip_var_test.passes(): + ip_var_test.success() + ip_var_test.cleanup() + print("IP Variable Time Test Report Data: {}".format(report_f)) + + +if __name__ == "__main__": + main() diff --git a/py-scripts/test_ipv4_ttls.py b/py-scripts/test_ipv4_ttls.py index aa352362..93321560 100755 --- a/py-scripts/test_ipv4_ttls.py +++ b/py-scripts/test_ipv4_ttls.py @@ -17,7 +17,8 @@ from LANforge import LFUtils import realm import time import pprint -from test_ipv4_variable_time import IPV4VariableTime +from test_ip_variable_time import IPVariableTime + class TTLSTest(LFCliBase): def __init__(self, host="localhost", port=8080, @@ -79,11 +80,11 @@ class TTLSTest(LFCliBase): self.key = wep_key self.ca_cert = ca_cert self.eap = eap - self.identity = identity # eap identity + self.identity = identity # eap identity self.anonymous_identity = anonymous_identity self.phase1 = phase1 self.phase2 = phase2 - self.ttls_passwd = ttls_passwd #eap passwd + self.ttls_passwd = ttls_passwd # eap passwd self.pin = pin self.pac_file = pac_file self.private_key = private_key @@ -124,9 +125,9 @@ class TTLSTest(LFCliBase): self.station_profile.mode = 0 # Layer3 Traffic - self.l3_cx_obj_udp = IPV4VariableTime(host=self.host, port=self.port, - create_sta=False, sta_list=self.sta_list, traffic_type="lf_udp", - upstream=self.upstream_port) + self.l3_cx_obj_udp = IPVariableTime(host=self.host, port=self.port, + create_sta=False, sta_list=self.sta_list, traffic_type="lf_udp", + upstream=self.upstream_port) self.l3_cx_obj_udp.cx_profile.name_prefix = "udp-" self.l3_cx_obj_udp.cx_profile.side_a_min_bps = 128000 @@ -137,9 +138,9 @@ class TTLSTest(LFCliBase): self.l3_cx_obj_udp.cx_profile.side_b_min_pdu = 1500 self.l3_cx_obj_udp.cx_profile.report_timer = 1000 - self.l3_cx_obj_tcp = IPV4VariableTime(host=self.host, port=self.port, - create_sta=False, sta_list=self.sta_list, traffic_type="lf_tcp", - upstream=self.upstream_port) + self.l3_cx_obj_tcp = IPVariableTime(host=self.host, port=self.port, + create_sta=False, sta_list=self.sta_list, traffic_type="lf_tcp", + upstream=self.upstream_port) self.l3_cx_obj_tcp.cx_profile.name_prefix = "tcp-" self.l3_cx_obj_tcp.cx_profile.side_a_min_bps = 128000 self.l3_cx_obj_tcp.cx_profile.side_a_max_bps = 128000 @@ -172,7 +173,7 @@ class TTLSTest(LFCliBase): passwd=self.ttls_passwd, realm=self.ttls_realm, domain=self.domain, - hessid=self.hessid ) + hessid=self.hessid) if self.ieee80211w: self.station_profile.set_command_param("add_sta", "ieee80211w", self.ieee80211w) if self.enable_pkc: @@ -241,7 +242,7 @@ class TTLSTest(LFCliBase): if (len(sta_list) == len(ip_map)) and (len(sta_list) == len(associated_map)): self._pass("PASS: All stations associated with IP", print_pass) else: - + self._fail("FAIL: Not all stations able to associate/get IP", print_fail) if self.debug: print("sta_list", sta_list) @@ -255,7 +256,6 @@ class TTLSTest(LFCliBase): # please see test_ipv4_variable_time for example of generating traffic return self.passes() - def stop(self): # Bring stations down self.station_profile.admin_down() @@ -313,11 +313,11 @@ class TTLSTest(LFCliBase): else: self._fail("%s did not report traffic: %s" % (name, postVal), print_fail) -def main(): +def main(): parser = LFCliBase.create_basic_argparse( prog='test_ipv4_ttls.py', - #formatter_class=argparse.RawDescriptionHelpFormatter, + # formatter_class=argparse.RawDescriptionHelpFormatter, formatter_class=argparse.RawTextHelpFormatter, epilog='''Demonstration showing wpa2-ent ttls authentication''', @@ -340,25 +340,28 @@ test_ipv4_ttls.py: for agroup in parser._action_groups: if agroup.title == "required arguments": required = agroup - #if required is not None: + # if required is not None: optional = None for agroup in parser._action_groups: if agroup.title == "optional arguments": optional = agroup - + if optional is not None: optional.add_argument('--a_min', help='--a_min bps rate minimum for side_a', default=256000) optional.add_argument('--b_min', help='--b_min bps rate minimum for side_b', default=256000) optional.add_argument('--test_duration', help='--test_duration sets the duration of the test', default="5m") - optional.add_argument('--key-mgmt', help="--key-mgt: { %s }"%", ".join(realm.wpa_ent_list()), default="WPA-EAP") + optional.add_argument('--key-mgmt', help="--key-mgt: { %s }" % ", ".join(realm.wpa_ent_list()), + default="WPA-EAP") optional.add_argument('--wpa_psk', help='wpa-ent pre shared key', default="[BLANK]") optional.add_argument('--eap', help='--eap eap method to use', default="TTLS") optional.add_argument('--identity', help='--identity eap identity string', default="testuser") optional.add_argument('--ttls_passwd', help='--ttls_passwd eap password string', default="testpasswd") - optional.add_argument('--ttls_realm', help='--ttls_realm 802.11u home realm to use', default="localhost.localdomain") + optional.add_argument('--ttls_realm', help='--ttls_realm 802.11u home realm to use', + default="localhost.localdomain") optional.add_argument('--domain', help='--domain 802.11 domain to use', default="localhost.localdomain") - optional.add_argument('--hessid', help='--hessid 802.11u HESSID (MAC addr format/peer for WDS)', default="00:00:00:00:00:01") + optional.add_argument('--hessid', help='--hessid 802.11u HESSID (MAC addr format/peer for WDS)', + default="00:00:00:00:00:01") optional.add_argument('--ieee80211w', help='--ieee80211w .1 \ --atten_vals 20,21,40,41 -Sample using upsteam eth1 downstream eth2 - ./test_l3_longevity.py --test_duration 20s --polling_interval 1s --upstream_port eth1 --downstream_port eth2 +Example using upsteam eth1 downstream eth2 + ./test_l3_longevity.py --test_duration 20s --polling_interval 1s --upstream_port eth1 --downstream_port eth2 --endp_type lf --rates_are_totals --side_a_min_bps=10000000,0 --side_a_min_pdu=1000 --side_b_min_bps=0,300000000 --side_b_min_pdu=1000 +COPYRIGHT: +Copyright 2021 Candela Technologies Inc + +INCLUDE_IN_README + ''' import sys @@ -56,35 +65,36 @@ if 'py-json' not in sys.path: sys.path.append(os.path.join(os.path.abspath('..'), 'py-json')) import argparse -#from LANforge.lfcli_base import LFCliBase +# from LANforge.lfcli_base import LFCliBase from LANforge import LFUtils -#import realm +# import realm from realm import Realm import time import datetime import subprocess import csv + # This class handles running the test and generating reports. class L3VariableTime(Realm): - def __init__(self, - endp_types, - args, - tos, + def __init__(self, + endp_types, + args, + tos, side_b, - side_a, - radio_name_list, + side_a, + radio_name_list, number_of_stations_per_radio_list, - ssid_list, - ssid_password_list, - ssid_security_list, - station_lists, - name_prefix, + ssid_list, + ssid_password_list, + ssid_security_list, + station_lists, + name_prefix, outfile, reset_port_enable_list, reset_port_time_min_list, reset_port_time_max_list, - side_a_min_rate=[56000], + side_a_min_rate=[56000], side_a_max_rate=[0], side_b_min_rate=[56000], side_b_max_rate=[0], @@ -97,17 +107,20 @@ class L3VariableTime(Realm): mconn=1, attenuators=[], atten_vals=[], - number_template="00", + number_template="00", test_duration="256s", polling_interval="60s", - lfclient_host="localhost", - lfclient_port=8080, + lfclient_host="localhost", + lfclient_port=8080, debug=False, influxdb=None, + ap_scheduler_stats=False, + ap_ofdma_stats=False, ap_read=False, ap_port='/dev/ttyUSB0', ap_baud='115200', ap_cmd='wl -i wl1 bs_data', + ap_chanim_cmd='wl -i wl1 chanim_stats', ap_test_mode=False, _exit_on_error=False, _exit_on_fail=False, @@ -132,7 +145,7 @@ class L3VariableTime(Realm): self.dataplane = False self.ssid_list = ssid_list self.ssid_password_list = ssid_password_list - self.station_lists = station_lists + self.station_lists = station_lists self.ssid_security_list = ssid_security_list self.reset_port_enable_list = reset_port_enable_list self.reset_port_time_min_list = reset_port_time_min_list @@ -141,8 +154,8 @@ class L3VariableTime(Realm): self.name_prefix = name_prefix self.test_duration = test_duration self.radio_name_list = radio_name_list - self.number_of_stations_per_radio_list = number_of_stations_per_radio_list - #self.local_realm = realm.Realm(lfclient_host=self.host, lfclient_port=self.port, debug_=debug_on) + self.number_of_stations_per_radio_list = number_of_stations_per_radio_list + # self.local_realm = realm.Realm(lfclient_host=self.host, lfclient_port=self.port, debug_=debug_on) self.polling_interval_seconds = self.duration_time_to_seconds(polling_interval) self.cx_profile = self.new_l3_cx_profile() self.multicast_profile = self.new_multicast_profile() @@ -155,8 +168,6 @@ class L3VariableTime(Realm): self.debug = debug self.mconn = mconn self.user_tags = user_tags - - self.side_a_min_rate = side_a_min_rate self.side_a_max_rate = side_a_max_rate @@ -184,11 +195,20 @@ class L3VariableTime(Realm): self.cx_profile.side_b_min_bps = side_b_min_rate[0] self.cx_profile.side_b_max_bps = side_b_max_rate[0] + self.ap_scheduler_stats = ap_scheduler_stats + self.ap_ofdma_stats = ap_ofdma_stats self.ap_read = ap_read self.ap_port = ap_port self.ap_baud = ap_baud self.ap_cmd = ap_cmd + self.ap_chanim_cmd = ap_chanim_cmd self.ap_test_mode = ap_test_mode + self.ap_5g_umsched = "" + self.ap_5g_msched = "" + self.ap_24g_umsched = "" + self.ap_24g_msched = "" + self.ap_ofdma_5g = "" + self.ap_ofdma_24g = "" # Lookup key is port-eid name self.port_csv_files = {} @@ -196,10 +216,10 @@ class L3VariableTime(Realm): # TODO: cmd-line arg to enable/disable these stats. self.ap_stats_col_titles = ["Station Address", "PHY Mbps", "Data Mbps", "Air Use", "Data Use", - "Retries", "bw", "mcs", "Nss", "ofdma", "mu-mimo"] + "Retries", "bw", "mcs", "Nss", "ofdma", "mu-mimo", "channel utilization"] dur = self.duration_time_to_seconds(self.test_duration) - + if (self.polling_interval_seconds > dur + 1): self.polling_interval_seconds = dur - 1 @@ -209,13 +229,13 @@ class L3VariableTime(Realm): kpi = kpi + "-kpi.csv" self.csv_kpi_file = open(kpi, "w") self.csv_kpi_writer = csv.writer(self.csv_kpi_file, delimiter=",") - + # if side_a is None then side_a is radios if self.dataplane == False: - for (radio_, ssid_, ssid_password_, ssid_security_,\ - reset_port_enable_, reset_port_time_min_, reset_port_time_max_) \ - in zip(radio_name_list, ssid_list, ssid_password_list, ssid_security_list,\ - reset_port_enable_list, reset_port_time_min_list, reset_port_time_max_list): + for (radio_, ssid_, ssid_password_, ssid_security_, \ + reset_port_enable_, reset_port_time_min_, reset_port_time_max_) \ + in zip(radio_name_list, ssid_list, ssid_password_list, ssid_security_list, \ + reset_port_enable_list, reset_port_time_min_list, reset_port_time_max_list): self.station_profile = self.new_station_profile() self.station_profile.lfclient_url = self.lfclient_url self.station_profile.ssid = ssid_ @@ -223,22 +243,41 @@ class L3VariableTime(Realm): self.station_profile.security = ssid_security_ self.station_profile.number_template = self.number_template self.station_profile.mode = 0 - self.station_profile.set_reset_extra(reset_port_enable=reset_port_enable_,\ - test_duration=self.duration_time_to_seconds(self.test_duration),\ - reset_port_min_time=self.duration_time_to_seconds(reset_port_time_min_),\ - reset_port_max_time=self.duration_time_to_seconds(reset_port_time_max_)) + self.station_profile.set_reset_extra(reset_port_enable=reset_port_enable_, \ + test_duration=self.duration_time_to_seconds(self.test_duration), \ + reset_port_min_time=self.duration_time_to_seconds( + reset_port_time_min_), \ + reset_port_max_time=self.duration_time_to_seconds( + reset_port_time_max_)) self.station_profiles.append(self.station_profile) else: pass - self.multicast_profile.host = self.lfclient_host self.cx_profile.host = self.lfclient_host self.cx_profile.port = self.lfclient_port self.cx_profile.name_prefix = self.name_prefix + def get_ap_5g_umsched(self): + return self.ap_5g_umsched + + def get_ap_5g_msched(self): + return self.ap_5g_msched + + def get_ap_24g_umsched(self): + return self.ap_5g_umsched + + def get_ap_24g_msched(self): + return self.ap_5g_msched + + def get_ap_ofdma_5g(self): + return self.ap_ofdma_5g + + def get_ap_ofdma_24g(self): + return self.ap_ofdma_24g + def get_kpi_csv(self): - #print("self.csv_kpi_file {}".format(self.csv_kpi_file.name)) + # print("self.csv_kpi_file {}".format(self.csv_kpi_file.name)) return self.csv_kpi_file.name # Find avg latency, jitter for connections using specified port. @@ -248,7 +287,7 @@ class L3VariableTime(Realm): tput = 0 count = 0 - #print("endp-stats-for-port, port-eid: {}".format(eid_name)) + # print("endp-stats-for-port, port-eid: {}".format(eid_name)) eid = self.name_to_eid(eid_name) # Convert all eid elements to strings @@ -257,13 +296,13 @@ class L3VariableTime(Realm): eid[2] = str(eid[2]) for e in endps: - #pprint(e) + # pprint(e) eid_endp = e["eid"].split(".") print("Comparing eid: ", eid, " to endp-id: ", eid_endp) # Look through all the endpoints (endps), to find the port the eid_name is using. # The eid_name that has the same Shelf, Resource, and Port as the eid_endp (looking at all the endps) # Then read the eid_endp to get the delay, jitter and rx rate - # Note: the endp eid is shelf.resource.port.endp-id, the eid can be treated somewhat as + # Note: the endp eid is shelf.resource.port.endp-id, the eid can be treated somewhat as # child class of port-eid , and look up the port the eid is using. if eid[0] == eid_endp[0] and eid[1] == eid_endp[1] and eid[2] == eid_endp[2]: lat += int(e["delay"]) @@ -303,7 +342,7 @@ class L3VariableTime(Realm): endps.append(value) print("endpoint: ", item, " value:\n") pprint(value) - + for value_name, value in value.items(): if value_name == 'rx bytes': endp_rx_map[item] = value @@ -311,13 +350,13 @@ class L3VariableTime(Realm): endp_rx_drop_map[item] = value if value_name == 'rx rate': # This hack breaks for mcast or if someone names endpoints weirdly. - #print("item: ", item, " rx-bps: ", value_rx_bps) + # print("item: ", item, " rx-bps: ", value_rx_bps) if item.endswith("-A"): total_dl += int(value) else: total_ul += int(value) - #print("total-dl: ", total_dl, " total-ul: ", total_ul, "\n") + # print("total-dl: ", total_dl, " total-ul: ", total_ul, "\n") return endp_rx_map, endp_rx_drop_map, endps, total_dl, total_ul # Common code to generate timestamp for CSV files. @@ -350,7 +389,7 @@ class L3VariableTime(Realm): def gather_port_eids(self): rv = [self.side_b] - + for station_profile in self.station_profiles: rv = rv + station_profile.station_names @@ -370,20 +409,21 @@ class L3VariableTime(Realm): # stations, so allow skipping it. # Do clean cx lists so that when we re-apply them we get same endp name # as we had previously - #print("rebuild: Clearing cx profile lists.\n") + # print("rebuild: Clearing cx profile lists.\n") self.cx_profile.clean_cx_lists() self.multicast_profile.clean_mc_lists() if self.dataplane: for etype in self.endp_types: for _tos in self.tos: - print("Creating connections for endpoint type: %s TOS: %s cx-count: %s"%(etype, _tos, self.cx_profile.get_cx_count())) + print("Creating connections for endpoint type: %s TOS: %s cx-count: %s" % ( + etype, _tos, self.cx_profile.get_cx_count())) # use brackes on [self.side_a] to make it a list these_cx, these_endp = self.cx_profile.create(endp_type=etype, side_a=[self.side_a], - side_b=self.side_b, sleep_time=0, tos=_tos) + side_b=self.side_b, sleep_time=0, tos=_tos) if (etype == "lf_udp" or etype == "lf_udp6"): self.udp_endps = self.udp_endps + these_endp - elif(etype=="lf"): + elif (etype == "lf"): self.lf_endps = self.eth_endps + these_endp else: self.tcp_endps = self.tcp_endps + these_endp @@ -391,11 +431,13 @@ class L3VariableTime(Realm): else: for station_profile in self.station_profiles: if not rebuild: - station_profile.use_security(station_profile.security, station_profile.ssid, station_profile.ssid_pass) + station_profile.use_security(station_profile.security, station_profile.ssid, + station_profile.ssid_pass) station_profile.set_number_template(station_profile.number_template) - print("Creating stations on radio %s"%(self.radio_name_list[index])) + print("Creating stations on radio %s" % (self.radio_name_list[index])) - station_profile.create(radio=self.radio_name_list[index], sta_names_=self.station_lists[index], debug=self.debug, sleep_time=0) + station_profile.create(radio=self.radio_name_list[index], sta_names_=self.station_lists[index], + debug=self.debug, sleep_time=0) index += 1 self.station_count += len(station_profile.station_names) @@ -403,14 +445,16 @@ class L3VariableTime(Realm): # Build/update connection types for etype in self.endp_types: if etype == "mc_udp" or etype == "mc_udp6": - print("Creating Multicast connections for endpoint type: %s"%(etype)) + print("Creating Multicast connections for endpoint type: %s" % (etype)) self.multicast_profile.create_mc_tx(etype, self.side_b, etype) self.multicast_profile.create_mc_rx(etype, side_rx=station_profile.station_names) else: for _tos in self.tos: - print("Creating connections for endpoint type: %s TOS: %s cx-count: %s"%(etype, _tos, self.cx_profile.get_cx_count())) - these_cx, these_endp = self.cx_profile.create(endp_type=etype, side_a=station_profile.station_names, - side_b=self.side_b, sleep_time=0, tos=_tos) + print("Creating connections for endpoint type: %s TOS: %s cx-count: %s" % ( + etype, _tos, self.cx_profile.get_cx_count())) + these_cx, these_endp = self.cx_profile.create(endp_type=etype, + side_a=station_profile.station_names, + side_b=self.side_b, sleep_time=0, tos=_tos) if (etype == "lf_udp" or etype == "lf_udp6"): self.udp_endps = self.udp_endps + these_endp; else: @@ -419,9 +463,25 @@ class L3VariableTime(Realm): self.cx_count = self.cx_profile.get_cx_count() if self.dataplane == True: - self._pass("PASS: CX build finished: created/updated: %s connections."%(self.cx_count)) + self._pass("PASS: CX build finished: created/updated: %s connections." % (self.cx_count)) else: - self._pass("PASS: Stations & CX build finished: created/updated: %s stations and %s connections."%(self.station_count, self.cx_count)) + self._pass("PASS: Stations & CX build finished: created/updated: %s stations and %s connections." % ( + self.station_count, self.cx_count)) + + def ap_custom_cmd(self, ap_custom_cmd): + ap_results = "" + try: + # configure the serial interface + ser = serial.Serial(self.ap_port, int(self.ap_baud), timeout=5) + ss = SerialSpawn(ser) + ss.sendline(str(ap_custom_cmd)) + ss.expect([pexpect.TIMEOUT], timeout=1) # do not detete line, waits for output + ap_results = ss.before.decode('utf-8', 'ignore') + print("ap_custom_cmd: {} ap_results {}".format(ap_custom_cmd, ap_results)) + except: + print("ap_custom_cmd: {} WARNING unable to read AP ".format(ap_custom_cmd)) + + return ap_results def read_ap_stats(self): # 5ghz: wl -i wl1 bs_data 2.4ghz# wl -i wl0 bs_data @@ -431,22 +491,39 @@ class L3VariableTime(Realm): ser = serial.Serial(self.ap_port, int(self.ap_baud), timeout=5) ss = SerialSpawn(ser) ss.sendline(str(self.ap_cmd)) - ss.expect([pexpect.TIMEOUT], timeout=1) # do not detete line, waits for output - ap_stats = ss.before.decode('utf-8','ignore') + ss.expect([pexpect.TIMEOUT], timeout=1) # do not detete line, waits for output + ap_stats = ss.before.decode('utf-8', 'ignore') print("ap_stats {}".format(ap_stats)) except: print("WARNING unable to read AP") - + return ap_stats + def read_ap_chanim_stats(self): + # 5ghz: wl -i wl1 chanim_stats 2.4ghz# wl -i wl0 chanim_stats + ap_chanim_stats = "" + try: + # configure the serial interface + ser = serial.Serial(self.ap_port, int(self.ap_baud), timeout=5) + ss = SerialSpawn(ser) + ss.sendline(str(self.ap_chanim_cmd)) + ss.expect([pexpect.TIMEOUT], timeout=1) # do not detete line, waits for output + ap_chanim_stats = ss.before.decode('utf-8', 'ignore') + print("ap_stats {}".format(ap_chanim_stats)) + + except: + print("WARNING unable to read AP") + + return ap_chanim_stats + # Run the main body of the test logic. def start(self, print_pass=False, print_fail=False): print("Bringing up stations") - self.admin_up(self.side_b) + self.admin_up(self.side_b) for station_profile in self.station_profiles: for sta in station_profile.station_names: - print("Bringing up station %s"%(sta)) + print("Bringing up station %s" % (sta)) self.admin_up(sta) temp_stations_list = [] @@ -461,7 +538,7 @@ class L3VariableTime(Realm): print("print failed to get IP's") csv_header = self.csv_generate_column_headers() - #print(csv_header) + # print(csv_header) self.csv_add_column_headers(csv_header) port_eids = self.gather_port_eids() for eid_name in port_eids: @@ -495,8 +572,9 @@ class L3VariableTime(Realm): if (dl_pdu == "AUTO" or dl_pdu == "MTU"): dl_pdu = "-1" - print("ul: %s dl: %s cx-count: %s rates-are-totals: %s\n"%(ul, dl, self.cx_count, self.rates_are_totals)) - + print("ul: %s dl: %s cx-count: %s rates-are-totals: %s\n" % ( + ul, dl, self.cx_count, self.rates_are_totals)) + # Set rate and pdu size config self.cx_profile.side_a_min_bps = ul self.cx_profile.side_a_max_bps = ul @@ -511,6 +589,10 @@ class L3VariableTime(Realm): # Update connections with the new rate and pdu size config. self.build(rebuild=True) + if self.ap_scheduler_stats or self.ap_ofdma_stats: + self.ap_custom_cmd('wl -i wl1 dump_clear') + self.ap_custom_cmd('wl -i wl0 dump_clear') + for atten_val in self.atten_vals: if atten_val != -1: for atten_idx in self.attenuators: @@ -529,7 +611,7 @@ class L3VariableTime(Realm): end_time = self.parse_time(self.test_duration) + cur_time - print("Monitoring throughput for duration: %s"%(self.test_duration)) + print("Monitoring throughput for duration: %s" % (self.test_duration)) # Monitor test for the interval duration. passes = 0 @@ -540,12 +622,10 @@ class L3VariableTime(Realm): ap_row = [] ap_stats_col_titles = [] - - while cur_time < end_time: - #interval_time = cur_time + datetime.timedelta(seconds=5) + # interval_time = cur_time + datetime.timedelta(seconds=5) interval_time = cur_time + datetime.timedelta(seconds=self.polling_interval_seconds) - #print("polling_interval_seconds {}".format(self.polling_interval_seconds)) + # print("polling_interval_seconds {}".format(self.polling_interval_seconds)) while cur_time < interval_time: cur_time = datetime.datetime.now() @@ -554,66 +634,113 @@ class L3VariableTime(Realm): self.epoch_time = int(time.time()) new_rx_values, rx_drop_percent, endps, total_dl_bps, total_ul_bps = self.__get_rx_values() - #print("main loop, total-dl: ", total_dl_bps, " total-ul: ", total_ul_bps) + # print("main loop, total-dl: ", total_dl_bps, " total-ul: ", total_ul_bps) # AP OUTPUT if self.ap_read: if self.ap_test_mode: # Create the test data as a continuous string - ap_stats="{}{}{}{}{}{}".format("root@Docsis-Gateway:~# wl -i wl1 bs_data\n", - "Station Address PHY Mbps Data Mbps Air Use Data Use Retries bw mcs Nss ofdma mu-mimo\n", - "04:f0:21:82:2f:d6 1016.6 48.9 6.5% 24.4% 16.6% 80 9.7 2 0.0% 0.0%\n", - "50:E0:85:84:7A:E7 880.9 52.2 7.7% 26.1% 20.0% 80 8.5 2 0.0% 0.0%\n", - "50:E0:85:89:5D:00 840.0 47.6 6.4% 23.8% 2.3% 80 8.0 2 0.0% 0.0%\n", - "50:E0:85:87:5B:F4 960.7 51.5 5.9% 25.7% 0.0% 80 9 2 0.0% 0.0%\n", - "- note the MAC will match ap_stats.append((overall) - 200.2 26.5% - - \n") + ap_stats = "{}{}{}{}{}{}".format("root@Docsis-Gateway:~# wl -i wl1 bs_data\n", + "Station Address PHY Mbps Data Mbps Air Use Data Use Retries bw mcs Nss ofdma mu-mimo\n", + "04:f0:21:82:2f:d6 1016.6 48.9 6.5% 24.4% 16.6% 80 9.7 2 0.0% 0.0%\n", + "50:E0:85:84:7A:E7 880.9 52.2 7.7% 26.1% 20.0% 80 8.5 2 0.0% 0.0%\n", + "50:E0:85:89:5D:00 840.0 47.6 6.4% 23.8% 2.3% 80 8.0 2 0.0% 0.0%\n", + "50:E0:85:87:5B:F4 960.7 51.5 5.9% 25.7% 0.0% 80 9 2 0.0% 0.0%\n", + "- note the MAC will match ap_stats.append((overall) - 200.2 26.5% - - \n") print("ap_stats {}".format(ap_stats)) - # read from the AP + + # Create the test data as a continuous string + ap_chanim_stats = "{}{}{}{}".format("root@Docsis-Gateway:~# wl -i wl1 chanim_stats\n", + "version: 3\n", + "chanspec tx inbss obss nocat nopkt doze txop goodtx badtx glitch badplcp knoise idle timestamp\n", + "0xe06a 61 15 0 17 0 0 6 53 2 0 0 -91 65 343370578\n") else: + # read from the AP ap_stats = self.read_ap_stats() - - #ap_stats_rows = [] # Array of Arrays - + ap_chanim_stats = self.read_ap_chanim_stats() + ap_stats_rows = ap_stats.splitlines() - print("ap_stats_rows {}".format(ap_stats_rows)) - + print("From AP stats: ap_stats_rows {}".format(ap_stats_rows)) + + ap_chanim_stats_rows = ap_chanim_stats.splitlines() + print("From AP chanim: ap_chanim_stats_rows {}".format(ap_chanim_stats_rows)) + channel_utilization = 0 + # Query all of our ports # Note: the endp eid is the shelf.resource.port.endp-id port_eids = self.gather_port_eids() for eid_name in port_eids: eid = self.name_to_eid(eid_name) - url = "/port/%s/%s/%s"%(eid[0], eid[1], eid[2]) + url = "/port/%s/%s/%s" % (eid[0], eid[1], eid[2]) + # read LANforge to get the mac response = self.json_get(url) if (response is None) or ("interface" not in response): - print("query-port: %s: incomplete response:"%(url)) + print("query-port: %s: incomplete response:" % (url)) pprint(response) else: # print("response".format(response)) # pprint(response) p = response['interface'] - #print("#### p, response['insterface']:{}".format(p)) + # print("#### From LANforge: p, response['insterface']:{}".format(p)) mac = p['mac'] - + # print("#### From LANforge: p['mac']: {mac}".format(mac=mac)) + + # Parse the ap stats to find the matching mac then use that row for reporting for row in ap_stats_rows: split_row = row.split() - #print("split_row {}".format(split_row)) - #print("split_row[0] {} mac {}".format(split_row[0].lower(),mac.lower())) + # print("split_row {}".format(split_row)) + # print("split_row[0] {} mac {}".format(split_row[0].lower(),mac.lower())) if self.ap_test_mode: if split_row[0].lower() != mac.lower(): ap_row = split_row else: - if split_row[0].lower() == mac.lower(): - ap_row = split_row + try: + # split_row[0].lower() , mac from AP + # mac.lower() , mac from LANforge + if split_row[0].lower() == mac.lower(): + ap_row = split_row + except: + print(" 'No stations are currently associated.'? from AP") + print( + " since possibly no stations: excption on compare split_row[0].lower() ") print("selected ap_row (from split_row): {}".format(ap_row)) - - + # Find latency, jitter for connections using this port. latency, jitter, tput = self.get_endp_stats_for_port(p["port"], endps) - - ap_stats_col_titles = ['Station Address','PHY Mbps','Data Mbps','Air Use','Data Use','Retries','bw','mcs','Nss','ofdma','mu-mimo'] - - self.write_port_csv(len(temp_stations_list), ul, dl, ul_pdu_str, dl_pdu_str, atten_val, eid_name, p, - latency, jitter, tput, ap_row, ap_stats_col_titles) #ap_stats_col_titles used as a length + + # now report the ap_chanim_stats along side of the ap_stats + xtop_reported = False + for row in ap_chanim_stats_rows: + split_row = row.split() + if xtop_reported: + try: + xtop = split_row[7] + channel_utilization = 100 - int(xtop) + except: + print( + "detected chanspec with reading chanim_stats, failed reading xtop") + # should be only one channel utilization + break + else: + try: + if split_row[0].lower() == 'chanspec': + xtop_reported = True + except: + print("Error reading xtop") + # ap information is passed with ap_row so all information needs to be contained in ap_row + ap_row.append(str(channel_utilization)) + print("channel_utilization {channel_utilization}".format( + channel_utilization=channel_utilization)) + print("ap_row {ap_row}".format(ap_row=ap_row)) + + ap_stats_col_titles = ['Station Address', 'PHY Mbps', 'Data Mbps', 'Air Use', + 'Data Use', 'Retries', 'bw', 'mcs', 'Nss', 'ofdma', + 'mu-mimo', 'channel_utilization'] + + self.write_port_csv(len(temp_stations_list), ul, dl, ul_pdu_str, dl_pdu_str, + atten_val, eid_name, p, + latency, jitter, tput, ap_row, + ap_stats_col_titles) # ap_stats_col_titles used as a length else: # Query all of our ports @@ -621,21 +748,37 @@ class L3VariableTime(Realm): port_eids = self.gather_port_eids() for eid_name in port_eids: eid = self.name_to_eid(eid_name) - url = "/port/%s/%s/%s"%(eid[0], eid[1], eid[2]) + url = "/port/%s/%s/%s" % (eid[0], eid[1], eid[2]) response = self.json_get(url) if (response is None) or ("interface" not in response): - print("query-port: %s: incomplete response:"%(url)) + print("query-port: %s: incomplete response:" % (url)) pprint(response) else: p = response['interface'] latency, jitter, tput = self.get_endp_stats_for_port(p["port"], endps) - - self.write_port_csv(len(temp_stations_list), ul, dl, ul_pdu_str, dl_pdu_str, atten_val, eid_name, p, - latency, jitter, tput, ap_row, ap_stats_col_titles) #ap_stats_col_titles used as a length + self.write_port_csv(len(temp_stations_list), ul, dl, ul_pdu_str, dl_pdu_str, + atten_val, eid_name, p, + latency, jitter, tput, ap_row, + ap_stats_col_titles) # ap_stats_col_titles used as a length # At end of test step, record KPI information. - self.record_kpi(len(temp_stations_list), ul, dl, ul_pdu_str, dl_pdu_str, atten_val, total_dl_bps, total_ul_bps) + self.record_kpi(len(temp_stations_list), ul, dl, ul_pdu_str, dl_pdu_str, atten_val, total_dl_bps, + total_ul_bps) + + # At end of test if requested store upload and download stats + if self.ap_scheduler_stats: + # get the (UL) Upload scheduler statistics + self.ap_umsched += self.ap_custom_cmd('wl -i wl1 dump umsched') + # get the (DL) Download schduler staticstics + self.ap_msched += self.ap_custom_cmd('wl -i wl1 dump msched') + + if self.ap_ofdma_stats: + # provide OFDMA stats 5GHz + self.ap_ofdma_5g += self.ap_custom_cmd('wl -i wl1 muinfo -v') + + # provide OFDMA stats 2.4GHz + self.ap_ofdma_24g += self.ap_custom_cmd('wl -i wl0 muinfo -v') # Stop connections. self.cx_profile.stop_cx(); @@ -644,7 +787,8 @@ class L3VariableTime(Realm): cur_time = datetime.datetime.now() if passes == expected_passes: - self._pass("PASS: Requested-Rate: %s <-> %s PDU: %s <-> %s All tests passed" % (ul, dl, ul_pdu, dl_pdu), print_pass) + self._pass("PASS: Requested-Rate: %s <-> %s PDU: %s <-> %s All tests passed" % ( + ul, dl, ul_pdu, dl_pdu), print_pass) def write_port_csv(self, sta_count, ul, dl, ul_pdu, dl_pdu, atten, eid_name, port_data, latency, jitter, tput, ap_row, ap_stats_col_titles): @@ -656,21 +800,20 @@ class L3VariableTime(Realm): row = row + [port_data['bps rx'], port_data['bps tx'], port_data['rx-rate'], port_data['tx-rate'], port_data['signal'], port_data['ap'], port_data['mode'], latency, jitter, tput] - #Add in info queried from AP. NOTE: do not need to pass in the ap_stats_col_titles - #print("ap_row length {} col_titles length {}".format(len(ap_row),len(self.ap_stats_col_titles))) - #print("self.ap_stats_col_titles {} ap_stats_col_titles {}".format(self.ap_stats_col_titles,ap_stats_col_titles)) + # Add in info queried from AP. NOTE: do not need to pass in the ap_stats_col_titles + # print("ap_row length {} col_titles length {}".format(len(ap_row),len(self.ap_stats_col_titles))) + # print("self.ap_stats_col_titles {} ap_stats_col_titles {}".format(self.ap_stats_col_titles,ap_stats_col_titles)) if len(ap_row) == len(self.ap_stats_col_titles): i = 0 - #print("ap_row {}".format(ap_row)) + # print("ap_row {}".format(ap_row)) for col in ap_row: - #print("col {}".format(col)) + # print("col {}".format(col)) row.append(col) writer = self.port_csv_writers[eid_name] writer.writerow(row) self.port_csv_files[eid_name].flush() - # Submit data to the influx db if configured to do so. def record_kpi(self, sta_count, ul, dl, ul_pdu, dl_pdu, atten, total_dl_bps, total_ul_bps): @@ -689,7 +832,8 @@ class L3VariableTime(Realm): now = str(datetime.datetime.utcnow().isoformat()) - print("NOTE: Adding kpi to influx, total-download-bps: %s upload: %s bi-directional: %s\n"%(total_dl_bps, total_ul_bps, (total_ul_bps + total_dl_bps))) + print("NOTE: Adding kpi to influx, total-download-bps: %s upload: %s bi-directional: %s\n" % ( + total_dl_bps, total_ul_bps, (total_ul_bps + total_dl_bps))) if self.influxdb is not None: self.influxdb.post_to_influx("total-download-bps", total_dl_bps, tags, now) @@ -725,17 +869,17 @@ class L3VariableTime(Realm): station_profile.cleanup() def csv_generate_column_headers(self): - csv_rx_headers = ['Time epoch','Time','Monitor', - 'UL-Min-Requested','UL-Max-Requested','DL-Min-Requested','DL-Max-Requested', - 'UL-Min-PDU','UL-Max-PDU','DL-Min-PDU','DL-Max-PDU', + csv_rx_headers = ['Time epoch', 'Time', 'Monitor', + 'UL-Min-Requested', 'UL-Max-Requested', 'DL-Min-Requested', 'DL-Max-Requested', + 'UL-Min-PDU', 'UL-Max-PDU', 'DL-Min-PDU', 'DL-Max-PDU', ] csv_rx_headers.append("average_rx_data_bytes") return csv_rx_headers def csv_generate_port_column_headers(self): csv_rx_headers = ['Time epoch', 'Time', 'Station-Count', - 'UL-Min-Requested','UL-Max-Requested','DL-Min-Requested','DL-Max-Requested', - 'UL-Min-PDU','UL-Max-PDU','DL-Min-PDU','DL-Max-PDU','Attenuation', + 'UL-Min-Requested', 'UL-Max-Requested', 'DL-Min-Requested', 'DL-Max-Requested', + 'UL-Min-PDU', 'UL-Max-PDU', 'DL-Min-PDU', 'DL-Max-PDU', 'Attenuation', 'Name', 'Rx-Bps', 'Tx-Bps', 'Rx-Link-Rate', 'Tx-Link-Rate', 'RSSI', 'AP', 'Mode', 'Rx-Latency', 'Rx-Jitter', 'Rx-Goodput-Bps' ] @@ -747,8 +891,8 @@ class L3VariableTime(Realm): def csv_generate_kpi_column_headers(self): csv_rx_headers = ['Time epoch', 'Time', 'Station-Count', - 'UL-Min-Requested','UL-Max-Requested','DL-Min-Requested','DL-Max-Requested', - 'UL-Min-PDU','UL-Max-PDU','DL-Min-PDU','DL-Max-PDU','Attenuation', + 'UL-Min-Requested', 'UL-Max-Requested', 'DL-Min-Requested', 'DL-Max-Requested', + 'UL-Min-PDU', 'UL-Max-PDU', 'DL-Min-PDU', 'DL-Max-PDU', 'Attenuation', 'Total-Download-Bps', 'Total-Upload-Bps', 'Total-UL/DL-Bps' ] for k in self.user_tags: @@ -757,43 +901,45 @@ class L3VariableTime(Realm): return csv_rx_headers # Write initial headers to csv file. - def csv_add_column_headers(self,headers): + def csv_add_column_headers(self, headers): if self.csv_kpi_file is not None: self.csv_kpi_writer.writerow(self.csv_generate_kpi_column_headers()) self.csv_kpi_file.flush() # Write initial headers to port csv file. def csv_add_port_column_headers(self, eid_name, headers): - #if self.csv_file is not None: + # if self.csv_file is not None: fname = self.outfile[:-4] # Strip '.csv' from file name fname = fname + "-" + eid_name + ".csv" pfile = open(fname, "w") port_csv_writer = csv.writer(pfile, delimiter=",") self.port_csv_files[eid_name] = pfile self.port_csv_writers[eid_name] = port_csv_writer - + port_csv_writer.writerow(headers) pfile.flush() def csv_validate_list(self, csv_list, length): if len(csv_list) < length: - csv_list = csv_list + [('no data','no data')] * (length - len(csv_list)) + csv_list = csv_list + [('no data', 'no data')] * (length - len(csv_list)) return csv_list - def csv_add_row(self,row,writer,csv_file): + def csv_add_row(self, row, writer, csv_file): if csv_file is not None: writer.writerow(row) csv_file.flush() # End of the main class. + # Check some input values. def valid_endp_types(_endp_type): etypes = _endp_type.split(',') for endp_type in etypes: - valid_endp_type=['lf','lf_udp','lf_udp6','lf_tcp','lf_tcp6','mc_udp','mc_udp6'] + valid_endp_type = ['lf', 'lf_udp', 'lf_udp6', 'lf_tcp', 'lf_tcp6', 'mc_udp', 'mc_udp6'] if not (str(endp_type) in valid_endp_type): - print('invalid endp_type: %s. Valid types lf, lf_udp, lf_udp6, lf_tcp, lf_tcp6, mc_udp, mc_udp6' % endp_type) + print( + 'invalid endp_type: %s. Valid types lf, lf_udp, lf_udp6, lf_tcp, lf_tcp6, mc_udp, mc_udp6' % endp_type) exit(1) return _endp_type @@ -807,7 +953,7 @@ def main(): parser = argparse.ArgumentParser( prog='test_l3_longevity.py', - #formatter_class=argparse.RawDescriptionHelpFormatter, + # formatter_class=argparse.RawDescriptionHelpFormatter, formatter_class=argparse.RawTextHelpFormatter, epilog='''\ Useful Information: @@ -816,7 +962,7 @@ def main(): 3. The tx/rx rates are fixed at 256000 bits per second 4. Maximum stations per radio based on radio ''', - + description='''\ test_l3_longevity.py: -------------------- @@ -888,67 +1034,109 @@ python3 .\\test_l3_longevity.py --test_duration 4m --endp_type \"lf_tcp lf_udp m ''') - parser.add_argument('--tty', help='--tty \"/dev/ttyUSB2\" the serial interface to the AP') - parser.add_argument('--baud', help='--baud \"9600\" baud rate for the serial interface',default="9600") - parser.add_argument('--amount_ports_to_reset', help='--amount_ports_to_reset \" \" ', default=None) - parser.add_argument('--port_reset_seconds', help='--ports_reset_seconds \" \" ', default="10 30") + parser.add_argument('--local_lf_report_dir', + help='--local_lf_report_dir override the report path, primary use when running test in test suite', + default="") + parser.add_argument('-o', '--csv_outfile', help="--csv_outfile ", default="") - parser.add_argument('--mgr', help='--mgr ',default='localhost') - parser.add_argument('--test_duration', help='--test_duration example --time 5d (5 days) default: 3m options: number followed by d, h, m or s',default='3m') - parser.add_argument('--tos', help='--tos: Support different ToS settings: BK | BE | VI | VO | numeric',default="BE") - parser.add_argument('--debug', help='--debug flag present debug on enable debugging',action='store_true') - parser.add_argument('-t', '--endp_type', help='--endp_type example --endp_type \"lf_udp lf_tcp mc_udp\" Default: lf_udp , options: lf_udp, lf_udp6, lf_tcp, lf_tcp6, mc_udp, mc_udp6', + parser.add_argument('--tty', help='--tty \"/dev/ttyUSB2\" the serial interface to the AP', default="") + parser.add_argument('--baud', help='--baud \"9600\" AP baud rate for the serial interface', default="9600") + parser.add_argument('--amount_ports_to_reset', + help='--amount_ports_to_reset \" \" ', default=None) + parser.add_argument('--port_reset_seconds', help='--ports_reset_seconds \" \" ', + default="10 30") + + parser.add_argument('--mgr', help='--mgr ', default='localhost') + parser.add_argument('--test_duration', + help='--test_duration example --time 5d (5 days) default: 3m options: number followed by d, h, m or s', + default='3m') + parser.add_argument('--tos', help='--tos: Support different ToS settings: BK | BE | VI | VO | numeric', + default="BE") + parser.add_argument('--debug', help='--debug flag present debug on enable debugging', action='store_true') + parser.add_argument('-t', '--endp_type', + help='--endp_type example --endp_type \"lf_udp lf_tcp mc_udp\" Default: lf_udp , options: lf_udp, lf_udp6, lf_tcp, lf_tcp6, mc_udp, mc_udp6', default='lf_udp', type=valid_endp_types) - parser.add_argument('-u', '--upstream_port', help='--upstream_port example: --upstream_port eth1',default='eth1') - parser.add_argument('--downstream_port', help='--downstream_port example: --downstream_port eth2',default='eth2') - parser.add_argument('-o','--csv_outfile', help="--csv_outfile ", default="") + parser.add_argument('-u', '--upstream_port', + help='--upstream_port example: --upstream_port eth1', + default='eth1') + parser.add_argument('--downstream_port', + help='--downstream_port example: --downstream_port eth2') parser.add_argument('--polling_interval', help="--polling_interval ", default='60s') - parser.add_argument('-r','--radio', action='append', nargs=1, help='--radio \ + parser.add_argument('-r', '--radio', action='append', nargs=1, help='--radio \ \"radio== ssid== ssid_pw== security==\" ') parser.add_argument('--ap_read', help='--ap_read flag present enable reading ap', action='store_true') - parser.add_argument('--ap_port', help='--ap_port \'/dev/ttyUSB0\'',default='/dev/ttyUSB0') - parser.add_argument('--ap_baud', help='--ap_baud \'115200\'',default='115200') + parser.add_argument('--ap_port', help='--ap_port \'/dev/ttyUSB0\'', default='/dev/ttyUSB0') + parser.add_argument('--ap_baud', help='--ap_baud \'115200\'', default='115200') parser.add_argument('--ap_cmd', help='ap_cmd \'wl -i wl1 bs_data\'', default="wl -i wl1 bs_data") + parser.add_argument('--ap_chanim_cmd', help='ap_chanim_cmd \'wl -i wl1 chanim_stats\'', + default="wl -i wl1 chanim_stats") + parser.add_argument('--ap_scheduler_stats', + help='--ap_scheduler_stats flag to clear stats run test then dump ul and dl stats to file', + action='store_true') + parser.add_argument('--ap_ofdma_stats', + help='--ap_ofdma_stats flag to clear stats run test then dumps wl -i wl1 muinfo -v and wl 0i wl0 muinof -v to file', + action='store_true') parser.add_argument('--ap_test_mode', help='ap_test_mode flag present use ap canned data', action='store_true') - parser.add_argument('-tty', help='-tty serial interface to AP -tty \"/dev/ttyUSB2\"',default="") - parser.add_argument('-baud', help='-baud serial interface baud rate to AP -baud ',default='9600') - - parser.add_argument('-amr','--side_a_min_bps', - help='--side_a_min_bps, requested downstream min tx rate, comma separated list for multiple iterations. Default 256k', default="256000") - parser.add_argument('-amp','--side_a_min_pdu', - help='--side_a_min_pdu, downstream pdu size, comma separated list for multiple iterations. Default MTU', default="MTU") - parser.add_argument('-bmr','--side_b_min_bps', - help='--side_b_min_bps, requested upstream min tx rate, comma separated list for multiple iterations. Default 256000', default="256000") - parser.add_argument('-bmp','--side_b_min_pdu', - help='--side_b_min_pdu, upstream pdu size, comma separated list for multiple iterations. Default MTU', default="MTU") + parser.add_argument('-amr', '--side_a_min_bps', + help='--side_a_min_bps, requested downstream min tx rate, comma separated list for multiple iterations. Default 256k', + default="256000") + parser.add_argument('-amp', '--side_a_min_pdu', + help='--side_a_min_pdu, downstream pdu size, comma separated list for multiple iterations. Default MTU', + default="MTU") + parser.add_argument('-bmr', '--side_b_min_bps', + help='--side_b_min_bps, requested upstream min tx rate, comma separated list for multiple iterations. Default 256000', + default="256000") + parser.add_argument('-bmp', '--side_b_min_pdu', + help='--side_b_min_pdu, upstream pdu size, comma separated list for multiple iterations. Default MTU', + default="MTU") parser.add_argument("--rates_are_totals", default=False, - help="Treat configured rates as totals instead of using the un-modified rate for every connection.", action='store_true') + help="Treat configured rates as totals instead of using the un-modified rate for every connection.", + action='store_true') parser.add_argument("--multiconn", default=1, help="Configure multi-conn setting for endpoints. Default is 1 (auto-helper is enabled by default as well).") - parser.add_argument('--attenuators', help='--attenuators, comma separated list of attenuator module eids: shelf.resource.atten-serno.atten-idx', default="") - parser.add_argument('--atten_vals', help='--atten_vals, comma separated list of attenuator settings in ddb units (1/10 of db)', default="") + parser.add_argument('--attenuators', + help='--attenuators, comma separated list of attenuator module eids: shelf.resource.atten-serno.atten-idx', + default="") + parser.add_argument('--atten_vals', + help='--atten_vals, comma separated list of attenuator settings in ddb units (1/10 of db)', + default="") influx_add_parser_args(parser) - parser.add_argument("--cap_ctl_out", help="--cap_ctl_out, switch the controller output will be captured", action='store_true') - parser.add_argument("--wait", help="--wait