in testsuite/driver/perf_notes.py [0:0]
def check_stats_change(actual, baseline, tolerance_dev, allowed_perf_changes = {}, force_print = False):
expected_val = baseline.perfStat.value
full_name = actual.test + ' (' + actual.way + ')'
lowerBound = trunc( int(expected_val) * ((100 - float(tolerance_dev))/100))
upperBound = trunc(0.5 + ceil(int(expected_val) * ((100 + float(tolerance_dev))/100)))
actual_dev = round(((float(actual.value) * 100)/ int(expected_val)) - 100, 1)
# Find the direction of change.
change = MetricChange.NoChange
if actual.value < lowerBound:
change = MetricChange.Decrease
elif actual.value > upperBound:
change = MetricChange.Increase
# Is the change allowed?
allowed_change_directions = [MetricChange.NoChange] + [ allow_stmt['direction']
for allow_stmt in allowed_perf_changes.get(actual.test, [])
# List of metrics are not specified or the metric is in the list of metrics.
if not allow_stmt['metrics'] or actual.metric in allow_stmt['metrics']
# way/test are not specified, or match the actual way/test.
if ((not 'way' in allow_stmt['opts'].keys()) or actual.way == allow_stmt['opts']['way'])
if ((not 'test_env' in allow_stmt['opts'].keys()) or actual.test_env == allow_stmt['opts']['test_env'])
]
change_allowed = change in allowed_change_directions
# Print errors and create pass/fail object.
result = passed()
if not change_allowed:
error = change + ' from ' + baseline.perfStat.test_env + \
' baseline @ HEAD~' + str(baseline.commitDepth)
print(actual.metric, error + ':')
result = failBecause('stat ' + error, tag='stat')
if not change_allowed or force_print:
length = max(len(str(x)) for x in [expected_val, lowerBound, upperBound, actual.value])
def display(descr, val, extra):
print(descr, str(val).rjust(length), extra)
display(' Expected ' + full_name + ' ' + actual.metric + ':', expected_val, '+/-' + str(tolerance_dev) + '%')
display(' Lower bound ' + full_name + ' ' + actual.metric + ':', lowerBound, '')
display(' Upper bound ' + full_name + ' ' + actual.metric + ':', upperBound, '')
display(' Actual ' + full_name + ' ' + actual.metric + ':', actual.value, '')
if actual.value != expected_val:
display(' Deviation ' + full_name + ' ' + actual.metric + ':', actual_dev, '%')
return (change, result)