in lnt/server/ui/views.py [0:0]
def v4_graph():
from lnt.server.ui import util
from lnt.testing import PASS
from lnt.util import stats
from lnt.external.stats import stats as ext_stats
ts = request.get_testsuite()
switch_min_mean_local = False
if 'switch_min_mean_session' not in session:
session['switch_min_mean_session'] = False
# Parse the view options.
options = {'min_mean_checkbox': 'min()'}
if 'submit' in request.args: # user pressed a button
if 'switch_min_mean' in request.args: # user checked mean() checkbox
session['switch_min_mean_session'] = options['switch_min_mean'] = \
bool(request.args.get('switch_min_mean'))
switch_min_mean_local = session['switch_min_mean_session']
else: # mean() check box is not checked
session['switch_min_mean_session'] = options['switch_min_mean'] = \
bool(request.args.get('switch_min_mean'))
switch_min_mean_local = session['switch_min_mean_session']
else: # new page was loaded by clicking link, not submit button
options['switch_min_mean'] = switch_min_mean_local = \
session['switch_min_mean_session']
options['hide_lineplot'] = bool(request.args.get('hide_lineplot'))
show_lineplot = not options['hide_lineplot']
options['show_mad'] = show_mad = bool(request.args.get('show_mad'))
options['show_stddev'] = show_stddev = bool(request.args.get('show_stddev'))
options['hide_all_points'] = hide_all_points = bool(
request.args.get('hide_all_points'))
options['show_linear_regression'] = show_linear_regression = bool(
request.args.get('show_linear_regression'))
options['show_failures'] = show_failures = bool(
request.args.get('show_failures'))
options['normalize_by_median'] = normalize_by_median = bool(
request.args.get('normalize_by_median'))
options['show_moving_average'] = moving_average = bool(
request.args.get('show_moving_average'))
options['show_moving_median'] = moving_median = bool(
request.args.get('show_moving_median'))
options['moving_window_size'] = moving_window_size = int(
request.args.get('moving_window_size', 10))
options['hide_highlight'] = bool(
request.args.get('hide_highlight'))
show_highlight = not options['hide_highlight']
def convert_revision(dotted):
"""Turn a version number like 489.2.10 into something
that is ordered and sortable.
For now 489.2.10 will be returned as a tuple of ints.
"""
dotted = integral_rex.findall(dotted)
return tuple([int(d) for d in dotted])
# Load the graph parameters.
graph_parameters = []
for name,value in request.args.items():
# Plots to graph are passed as::
#
# plot.<unused>=<machine id>.<test id>.<field index>
if not name.startswith(str('plot.')):
continue
# Ignore the extra part of the key, it is unused.
machine_id_str,test_id_str,field_index_str = value.split('.')
try:
machine_id = int(machine_id_str)
test_id = int(test_id_str)
field_index = int(field_index_str)
except:
return abort(400)
if not (0 <= field_index < len(ts.sample_fields)):
return abort(404)
try:
machine = \
ts.query(ts.Machine).filter(ts.Machine.id == machine_id).one()
test = ts.query(ts.Test).filter(ts.Test.id == test_id).one()
field = ts.sample_fields[field_index]
except NoResultFound:
return abort(404)
graph_parameters.append((machine, test, field, field_index))
# Order the plots by machine name, test name and then field.
graph_parameters.sort(key = lambda (m,t,f,_): (m.name, t.name, f.name, _))
# Extract requested mean trend.
mean_parameter = None
for name,value in request.args.items():
# Mean to graph is passed as:
#
# mean=<machine id>.<field index>
if name != 'mean':
continue
machine_id_str,field_index_str = value.split('.')
try:
machine_id = int(machine_id_str)
field_index = int(field_index_str)
except ValueError:
return abort(400)
if not (0 <= field_index < len(ts.sample_fields)):
return abort(404)
try:
machine = \
ts.query(ts.Machine).filter(ts.Machine.id == machine_id).one()
except NoResultFound:
return abort(404)
field = ts.sample_fields[field_index]
mean_parameter = (machine, field)
# Sanity check the arguments.
if not graph_parameters and not mean_parameter:
return render_template("error.html", message="Nothing to graph.")
# Extract requested baselines, and their titles.
baseline_parameters = []
for name,value in request.args.items():
# Baselines to graph are passed as:
#
# baseline.title=<run id>
if not name.startswith(str('baseline.')):
continue
baseline_title = name[len('baseline.'):]
run_id_str = value
try:
run_id = int(run_id_str)
except:
return abort(400)
try:
run = ts.query(ts.Run).join(ts.Machine).filter(ts.Run.id == run_id).one()
except:
err_msg = "The run {} was not found in the database.".format(run_id)
return render_template("error.html",
message=err_msg)
baseline_parameters.append((run, baseline_title))
# Create region of interest for run data region if we are performing a
# comparison.
revision_range = None
highlight_run_id = request.args.get('highlight_run')
if show_highlight and highlight_run_id and highlight_run_id.isdigit():
highlight_run = ts.query(ts.Run).filter_by(
id=int(highlight_run_id)).first()
if highlight_run is None:
abort(404)
# Find the neighboring runs, by order.
prev_runs = list(ts.get_previous_runs_on_machine(highlight_run, N = 1))
if prev_runs:
start_rev = prev_runs[0].order.llvm_project_revision
end_rev = highlight_run.order.llvm_project_revision
revision_range = {
"start": convert_revision(start_rev),
"end": convert_revision(end_rev) }
# Build the graph data.
legend = []
graph_plots = []
graph_datum = []
overview_plots = []
baseline_plots = []
num_plots = len(graph_parameters)
for i,(machine,test,field, field_index) in enumerate(graph_parameters):
# Determine the base plot color.
col = list(util.makeDarkColor(float(i) / num_plots))
url = "/".join([str(machine.id), str(test.id), str(field_index)])
legend.append(LegendItem(machine, test.name, field.name, tuple(col), url))
# Load all the field values for this test on the same machine.
#
# FIXME: Don't join to Order here, aggregate this across all the tests
# we want to load. Actually, we should just make this a single query.
#
# FIXME: Don't hard code field name.
q = ts.query(field.column, ts.Order.llvm_project_revision, ts.Run.start_time, ts.Run.id).\
join(ts.Run).join(ts.Order).\
filter(ts.Run.machine_id == machine.id).\
filter(ts.Sample.test == test).\
filter(field.column != None)
# Unless all samples requested, filter out failing tests.
if not show_failures:
if field.status_field:
q = q.filter((field.status_field.column == PASS) |
(field.status_field.column == None))
# Aggregate by revision.
data = util.multidict((rev, (val, date, run_id)) for val,rev,date,run_id in q).items()
data.sort(key=lambda sample: convert_revision(sample[0]))
graph_datum.append((test.name, data, col, field, url))
# Get baselines for this line
num_baselines = len(baseline_parameters)
for baseline_id, (baseline, baseline_title) in enumerate(baseline_parameters):
q_baseline = ts.query(field.column, ts.Order.llvm_project_revision, ts.Run.start_time, ts.Machine.name).\
join(ts.Run).join(ts.Order).join(ts.Machine).\
filter(ts.Run.id == baseline.id).\
filter(ts.Sample.test == test).\
filter(field.column != None)
# In the event of many samples, use the mean of the samples as the baseline.
samples = []
for sample in q_baseline:
samples.append(sample[0])
# Skip this baseline if there is no data.
if not samples:
continue
mean = sum(samples)/len(samples)
# Darken the baseline color distinguish from non-baselines.
# Make a color closer to the sample than its neighbour.
color_offset = float(baseline_id) / num_baselines / 2
my_color = (i + color_offset) / num_plots
dark_col = list(util.makeDarkerColor(my_color))
str_dark_col = util.toColorString(dark_col)
baseline_plots.append({'color': str_dark_col,
'lineWidth': 2,
'yaxis': {'from': mean, 'to': mean},
'name': q_baseline[0].llvm_project_revision})
baseline_name = "Baseline {} on {}".format(baseline_title, q_baseline[0].name)
legend.append(LegendItem(BaselineLegendItem(baseline_name, baseline.id), test.name, field.name, dark_col, None))
# Draw mean trend if requested.
if mean_parameter:
machine, field = mean_parameter
test_name = 'Geometric Mean'
col = (0,0,0)
legend.append(LegendItem(machine, test_name, field.name, col, None))
q = ts.query(sqlalchemy.sql.func.min(field.column),
ts.Order.llvm_project_revision,
sqlalchemy.sql.func.min(ts.Run.start_time)).\
join(ts.Run).join(ts.Order).join(ts.Test).\
filter(ts.Run.machine_id == machine.id).\
filter(field.column != None).\
group_by(ts.Order.llvm_project_revision, ts.Test)
# Calculate geomean of each revision.
data = util.multidict(((rev, date), val) for val,rev,date in q).items()
data = [(rev, [(lnt.server.reporting.analysis.calc_geomean(vals), date)])
for ((rev, date), vals) in data]
# Sort data points according to revision number.
data.sort(key=lambda sample: convert_revision(sample[0]))
graph_datum.append((test_name, data, col, field, None))
for name, data, col, field, url in graph_datum:
# Compute the graph points.
errorbar_data = []
points_data = []
pts = []
moving_median_data = []
moving_average_data = []
if normalize_by_median:
normalize_by = 1.0/stats.median([min([d[0] for d in values])
for _,values in data])
else:
normalize_by = 1.0
for pos, (point_label, datapoints) in enumerate(data):
# Get the samples.
data = [data_date[0] for data_date in datapoints]
# And the date on which they were taken.
dates = [data_date[1] for data_date in datapoints]
# Run where this point was collected.
runs = [data_pts[2] for data_pts in datapoints if len(data_pts)==3]
# When we can, map x-axis to revisions, but when that is too hard
# use the position of the sample instead.
rev_x = convert_revision(point_label)
x = rev_x[0] if len(rev_x)==1 else pos
values = [v*normalize_by for v in data]
aggregation_fn = min
if switch_min_mean_local:
aggregation_fn = lnt.util.stats.agg_mean
if field.bigger_is_better:
aggregation_fn = max
agg_value, agg_index = \
aggregation_fn((value, index)
for (index, value) in enumerate(values))
# Generate metadata.
metadata = {"label": point_label}
metadata["date"] = str(dates[agg_index])
if runs:
metadata["runID"] = str(runs[agg_index])
if len(graph_datum) > 1:
# If there are more than one plot in the graph, also label the
# test name.
metadata["test_name"] = name
pts.append((x, agg_value, metadata))
# Add the individual points, if requested.
# For each point add a text label for the mouse over.
if not hide_all_points:
for i,v in enumerate(values):
point_metadata = dict(metadata)
point_metadata["date"] = str(dates[i])
points_data.append((x, v, point_metadata))
# Add the standard deviation error bar, if requested.
if show_stddev:
mean = stats.mean(values)
sigma = stats.standard_deviation(values)
errorbar_data.append((x, mean, sigma))
# Add the MAD error bar, if requested.
if show_mad:
med = stats.median(values)
mad = stats.median_absolute_deviation(values, med)
errorbar_data.append((x, med, mad))
# Compute the moving average and or moving median of our data if requested.
if moving_average or moving_median:
fun = None
def compute_moving_average(x, window, average_list, median_list):
average_list.append((x, lnt.util.stats.mean(window)))
def compute_moving_median(x, window, average_list, median_list):
median_list.append((x, lnt.util.stats.median(window)))
def compute_moving_average_and_median(x, window, average_list, median_list):
average_list.append((x, lnt.util.stats.mean(window)))
median_list.append((x, lnt.util.stats.median(window)))
if moving_average and moving_median:
fun = compute_moving_average_and_median
elif moving_average:
fun = compute_moving_average
else:
fun = compute_moving_median
len_pts = len(pts)
for i in range(len_pts):
start_index = max(0, i - moving_window_size)
end_index = min(len_pts, i + moving_window_size)
window_pts = [x[1] for x in pts[start_index:end_index]]
fun(pts[i][0], window_pts, moving_average_data, moving_median_data)
# On the overview, we always show the line plot.
overview_plots.append({
"data" : pts,
"color" : util.toColorString(col) })
# Add the minimum line plot, if requested.
if show_lineplot:
plot = {"data" : pts,
"color" : util.toColorString(col)
}
if url:
plot["url"] = url
graph_plots.append(plot)
# Add regression line, if requested.
if show_linear_regression:
xs = [t for t,v,_ in pts]
ys = [v for t,v,_ in pts]
# We compute the regression line in terms of a normalized X scale.
x_min, x_max = min(xs), max(xs)
try:
norm_xs = [(x - x_min) / (x_max - x_min)
for x in xs]
except ZeroDivisionError:
norm_xs = xs
try:
info = ext_stats.linregress(norm_xs, ys)
except ZeroDivisionError:
info = None
except ValueError:
info = None
if info is not None:
slope, intercept,_,_,_ = info
reglin_col = [c * .7 for c in col]
reglin_pts = [(x_min, 0.0 * slope + intercept),
(x_max, 1.0 * slope + intercept)]
graph_plots.insert(0, {
"data" : reglin_pts,
"color" : util.toColorString(reglin_col),
"lines" : {
"lineWidth" : 2 },
"shadowSize" : 4 })
# Add the points plot, if used.
if points_data:
pts_col = (0,0,0)
plot = {"data" : points_data,
"color" : util.toColorString(pts_col),
"lines" : {"show" : False },
"points" : {
"show" : True,
"radius" : .25,
"fill" : True
}
}
if url:
plot['url'] = url
graph_plots.append(plot)
# Add the error bar plot, if used.
if errorbar_data:
bar_col = [c*.7 for c in col]
graph_plots.append({
"data" : errorbar_data,
"lines" : { "show" : False },
"color" : util.toColorString(bar_col),
"points" : {
"errorbars" : "y",
"yerr" : { "show" : True,
"lowerCap" : "-",
"upperCap" : "-",
"lineWidth" : 1 } } })
# Add the moving average plot, if used.
if moving_average_data:
col = [0.32, 0.6, 0.0]
graph_plots.append({
"data" : moving_average_data,
"color" : util.toColorString(col) })
# Add the moving median plot, if used.
if moving_median_data:
col = [0.75, 0.0, 1.0]
graph_plots.append({
"data" : moving_median_data,
"color" : util.toColorString(col) })
if bool(request.args.get('json')):
json_obj = dict()
json_obj['data'] = graph_plots
# Flatten ORM machine objects to their string names.
simple_type_legend = []
for li in legend:
# Flatten name, make color a dict.
new_entry = {'name': li.machine.name,
'test': li.test_name,
'unit': li.field_name,
'color': util.toColorString(li.color),
'url': li.url}
simple_type_legend.append(new_entry)
json_obj['legend'] = simple_type_legend
json_obj['revision_range'] = revision_range
json_obj['current_options'] = options
json_obj['test_suite_name'] = ts.name
json_obj['baselines'] = baseline_plots
return flask.jsonify(**json_obj)
return render_template("v4_graph.html", ts=ts, options=options,
revision_range=revision_range,
graph_plots=graph_plots,
overview_plots=overview_plots, legend=legend,
baseline_plots=baseline_plots)