in report/generate_report.py [0:0]
def _prepare_data_for_graph(performance_data, aggr_json_profile):
"""Massage the data to fit a format suitable for graph generation."""
bazel_commit_to_phase_proportion = _get_proportion_breakdown(
aggr_json_profile)
ordered_commit_to_readings = collections.OrderedDict()
for entry in performance_data:
# Exclude measurements from failed runs in the graphs.
# TODO(leba): Print the summary table, which includes info on which runs
# failed.
if entry["exit_status"] != "0":
continue
bazel_commit = entry["bazel_commit"]
if bazel_commit not in ordered_commit_to_readings:
ordered_commit_to_readings[bazel_commit] = {
"bazel_commit": bazel_commit,
"wall_readings": [],
"memory_readings": [],
}
ordered_commit_to_readings[bazel_commit]["wall_readings"].append(
float(entry["wall"]))
ordered_commit_to_readings[bazel_commit]["memory_readings"].append(
float(entry["memory"]))
wall_data = [
["Bazel Commit"] + EVENTS_ORDER +
["Median [Min, Max]", {
"role": "interval"
}, {
"role": "interval"
}]
]
memory_data = [[
"Bazel Commit", "Memory (MB)", {
"role": "interval"
}, {
"role": "interval"
}
]]
for obj in ordered_commit_to_readings.values():
commit = _short_form(obj["bazel_commit"])
median_wall = statistics.median(obj["wall_readings"])
min_wall = min(obj["wall_readings"])
max_wall = max(obj["wall_readings"])
wall_data.append([commit] + _fit_data_to_phase_proportion(
median_wall, bazel_commit_to_phase_proportion[bazel_commit]) +
[median_wall, min_wall, max_wall])
median_memory = statistics.median(obj["memory_readings"])
min_memory = min(obj["memory_readings"])
max_memory = max(obj["memory_readings"])
memory_data.append([commit, median_memory, min_memory, max_memory])
return wall_data, memory_data