mirror of
https://github.com/Telecominfraproject/wlan-lanforge-scripts.git
synced 2025-11-01 03:07:56 +00:00
Better csv reading funciton
Signed-off-by: Matthew Stidham <stidmatt@gmail.com>
This commit is contained in:
@@ -44,48 +44,35 @@ class CSVtoInflux():
|
||||
self.target_csv = target_csv.replace('/home/lanforge/html-reports/', '')
|
||||
self.influx_tag = _influx_tag
|
||||
|
||||
def read_csv(self, file):
|
||||
csv = open(file).read().split('\n')
|
||||
rows = list()
|
||||
for x in csv:
|
||||
if len(x) > 0:
|
||||
rows.append(x.split('\t'))
|
||||
return rows
|
||||
|
||||
# Submit data to the influx db if configured to do so.
|
||||
def post_to_influx(self):
|
||||
with open(self.target_csv) as fp:
|
||||
line = fp.readline()
|
||||
line = line.split('\t')
|
||||
# indexes tell us where in the CSV our data is located. We do it this way so that even if the columns are moved around, as long as they are present, the script will still work.
|
||||
numeric_score_index = line.index('numeric-score')
|
||||
test_id_index = line.index('test-id')
|
||||
date_index = line.index('Date')
|
||||
test_details_index = line.index('test details')
|
||||
short_description_index = line.index('short-description')
|
||||
graph_group_index = line.index('Graph-Group')
|
||||
units_index = line.index('Units')
|
||||
testbed_index = line.index('test-rig')
|
||||
duthwversion = line.index('dut-hw-version')
|
||||
dutswversion = line.index('dut-sw-version')
|
||||
dutserialnum = line.index('dut-serial-num')
|
||||
line = fp.readline()
|
||||
while line:
|
||||
line = line.split('\t') #split the line by tabs to separate each item in the string
|
||||
date = line[date_index]
|
||||
date = datetime.datetime.utcfromtimestamp(int(date) / 1000).isoformat() #convert to datetime so influx can read it, this is required
|
||||
numeric_score = line[numeric_score_index]
|
||||
numeric_score = float(numeric_score) #convert to float, InfluxDB cannot
|
||||
short_description = line[short_description_index]
|
||||
tags = dict()
|
||||
tags['script'] = line[test_id_index]
|
||||
tags['short-description'] = line[short_description_index]
|
||||
tags['test_details'] = line[test_details_index]
|
||||
tags['Graph-Group'] = line[graph_group_index]
|
||||
tags['DUT-HW-version'] = line[duthwversion]
|
||||
tags['DUT-SW-version'] = line[dutswversion]
|
||||
tags['DUT-Serial-Num'] = line[dutserialnum]
|
||||
tags['testbed'] = line[testbed_index]
|
||||
tags['Units'] = line[units_index]
|
||||
for item in self.influx_tag: # Every item in the influx_tag command needs to be added to the tags variable
|
||||
tags[item[0]] = item[1]
|
||||
self.influxdb.post_to_influx(short_description, numeric_score, tags, date)
|
||||
line = fp.readline()
|
||||
#influx wants to get data in the following format:
|
||||
# variable n ame, value, tags, date
|
||||
# total-download-mbps-speed-for-the-duration-of-this-iteration 171.085494 {'script': 'WiFi Capacity'} 2021-04-14T19:04:04.902000
|
||||
df = self.read_csv(self.target_csv)
|
||||
length = list(range(0, len(df[0])))
|
||||
columns = dict(zip(df[0], length))
|
||||
influx_variables = ['script', 'short-description', 'test_details', 'Graph-Group',
|
||||
'DUT-HW-version', 'DUT-SW-version', 'DUT-Serial-Num', 'testbed', 'Units']
|
||||
csv_variables = ['test-id', 'short-description', 'test details', 'Graph-Group',
|
||||
'dut-hw-version', 'dut-sw-version', 'dut-serial-num', 'test-rig', 'Units']
|
||||
csv_vs_influx = dict(zip(csv_variables, influx_variables))
|
||||
for row in df[1:]:
|
||||
tags = dict()
|
||||
short_description = row[columns['short-description']]
|
||||
numeric_score = float(row[columns['numeric-score']])
|
||||
date = row[columns['Date']]
|
||||
date = datetime.datetime.utcfromtimestamp(int(date) / 1000).isoformat() #convert to datetime so influx can read it, this is required
|
||||
for variable in csv_variables:
|
||||
index = columns[variable]
|
||||
influx_variable = csv_vs_influx[variable]
|
||||
tags[influx_variable] = row[index]
|
||||
self.influxdb.post_to_influx(short_description, numeric_score, tags, date)
|
||||
|
||||
def script_name(self):
|
||||
with open(self.target_csv) as fp:
|
||||
|
||||
@@ -97,7 +97,8 @@ class UseGrafana(LFCliBase):
|
||||
graph_groups=None,
|
||||
graph_groups_file=None,
|
||||
testbed=None,
|
||||
datasource='InfluxDB'):
|
||||
datasource='InfluxDB',
|
||||
from_date='now-1y'):
|
||||
options = string.ascii_lowercase + string.ascii_uppercase + string.digits
|
||||
uid = ''.join(random.choice(options) for i in range(9))
|
||||
input1 = dict()
|
||||
@@ -117,7 +118,7 @@ class UseGrafana(LFCliBase):
|
||||
templating['list'] = list()
|
||||
|
||||
timedict = dict()
|
||||
timedict['from'] = 'now-1y'
|
||||
timedict['from'] = from_date
|
||||
timedict['to'] = 'now'
|
||||
|
||||
panels = list()
|
||||
@@ -341,6 +342,7 @@ def main():
|
||||
optional.add_argument('--testbed', help='Which testbed you want to query', default=None)
|
||||
optional.add_argument('--kpi', help='KPI file(s) which you want to graph form', action='append', default=None)
|
||||
optional.add_argument('--datasource', help='Name of Influx database if different from InfluxDB', default='InfluxDB')
|
||||
optional.add_argument('--from_date', help='Date you want to start your Grafana dashboard from', default='now-1y')
|
||||
args = parser.parse_args()
|
||||
|
||||
Grafana = UseGrafana(args.grafana_token,
|
||||
@@ -369,7 +371,8 @@ def main():
|
||||
graph_groups=args.graph_groups,
|
||||
graph_groups_file=args.graph_groups_file,
|
||||
testbed=args.testbed,
|
||||
datasource=args.datasource)
|
||||
datasource=args.datasource,
|
||||
from_date=args.from_date)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
||||
Reference in New Issue
Block a user