in save_jmh_result.py [0:0]
def readData(args):
results = []
if args.input:
path = args.input
else:
path = "jmh-result.csv"
modificationDate = datetime.datetime.fromtimestamp(os.path.getmtime(path))
#modificationDate = datetime.date(2016, 8, int(args.commit))
with open(path) as csvFile:
reader = csv.reader(csvFile, delimiter=",")
lines = [line for line in reader]
header = lines[0]
params = sorted(filter(lambda s : s.startswith("Param"), header))
paramIndexes = map(lambda param : header.index(param), params)
benchmarkIndex = header.index("Benchmark")
scoreIndex = header.index("Score")
modeIndex = header.index("Mode")
unitIndex = header.index("Unit")
errorIndex = scoreIndex + 1
for line in lines[1:]:
name = line[benchmarkIndex].split(".")[-1]
if len(paramIndexes) > 0:
for paramIndex in paramIndexes:
if len(line[paramIndex]) > 0:
name += "." + line[paramIndex]
lessIsBetter = (line[modeIndex] == "avgt" or line[modeIndex] == "ss")
# unitsTitle is used to distinguish different groups of benchmarks when getting changes
# see https://github.com/tobami/codespeed/blob/263860bc298fd970c8466b3161de386582e4f801/codespeed/models.py#L444
unitsTitle = "Time"
if lessIsBetter:
unitsTitle = "Times"
results.append({
'commitid': args.commit,
'branch': args.branch,
'project': args.project,
'executable': args.executable,
'benchmark': name,
'environment': args.environment,
'lessisbetter': lessIsBetter,
'units': line[unitIndex],
'units_title': unitsTitle,
'result_value': float(line[scoreIndex]),
'revision_date': str(modificationDate),
'result_date': str(modificationDate),
'std_dev': line[errorIndex], # Optional. Default is blank
})
return results