in lnt/server/db/fieldchange.py [0:0]
def regenerate_fieldchanges_for_run(ts, run_id):
"""Regenerate the set of FieldChange objects for the given run.
"""
# Allow for potentially a few different runs, previous_runs, next_runs
# all with the same order_id which we will aggregate together to make
# our comparison result.
run = ts.getRun(run_id)
runs = ts.query(ts.Run). \
filter(ts.Run.order_id == run.order_id). \
filter(ts.Run.machine_id == run.machine_id). \
all()
previous_runs = ts.get_previous_runs_on_machine(run, FIELD_CHANGE_LOOKBACK)
next_runs = ts.get_next_runs_on_machine(run, FIELD_CHANGE_LOOKBACK)
# Find our start/end order.
if previous_runs != []:
start_order = previous_runs[0].order
else:
start_order = run.order
if next_runs != []:
end_order = next_runs[-1].order
else:
end_order = run.order
# Load our run data for the creation of the new fieldchanges.
runs_to_load = [r.id for r in (runs + previous_runs)]
# When the same rev is submitted many times, the database accesses here
# can be huge, and it is almost always an error to have the same rev
# be used in so many runs.
run_size = len(runs_to_load)
if run_size > 50:
warning("Generating field changes for {} runs."
"That will be very slow.".format(run_size))
runinfo = lnt.server.reporting.analysis.RunInfo(ts, runs_to_load)
# Only store fieldchanges for "metric" samples like execution time;
# not for fields with other data, e.g. hash of a binary
for field in list(ts.Sample.get_metric_fields()):
for test_id in runinfo.test_ids:
f = None
result = runinfo.get_comparison_result(
runs, previous_runs, test_id, field,
ts.Sample.get_hash_of_binary_field())
# Try and find a matching FC and update, else create one.
try:
f = ts.query(ts.FieldChange) \
.filter(ts.FieldChange.start_order == start_order) \
.filter(ts.FieldChange.end_order == end_order) \
.filter(ts.FieldChange.test_id == test_id) \
.filter(ts.FieldChange.machine == run.machine) \
.filter(ts.FieldChange.field == field) \
.one()
except sqlalchemy.orm.exc.NoResultFound:
f = None
if not result.is_result_performance_change() and f:
# With more data, its not a regression. Kill it!
note("Removing field change: {}".format(f.id))
deleted = delete_fieldchange(ts, f)
continue
if result.is_result_performance_change() and not f:
test = ts.query(ts.Test).filter(ts.Test.id == test_id).one()
f = ts.FieldChange(start_order=start_order,
end_order=run.order,
machine=run.machine,
test=test,
field=field)
# Check the rules to see if this change matters.
if rules.is_useful_change(ts, f):
ts.add(f)
ts.commit()
try:
found, new_reg = identify_related_changes(ts, f)
except ObjectDeletedError:
# This can happen from time to time.
# So, lets retry once.
found, new_reg = identify_related_changes(ts, f)
if found:
note("Found field change: {}".format(run.machine))
# Always update FCs with new values.
if f:
f.old_value = result.previous
f.new_value = result.current
f.run = run
ts.commit()
regressions = ts.query(ts.Regression).all()[::-1]
rules.post_submission_hooks(ts, regressions)