in src/PerfHistoryDisplay.js [42:97]
async update() {
const url_prefix =
"https://s3.amazonaws.com/ossci-metrics/torchbench_v0_nightly";
// branch: v0-nightly
const indexes = await axios.get(`${url_prefix}/index.json`);
console.log(indexes.data);
const requests = indexes.data.map(async (run) => {
try {
const r = await axios.get(`${url_prefix}/${run.result.relpath}`);
run.sb_map = objToStrMap(r.data);
} catch (e) {
run.sb_map = new Map();
}
return run;
});
const builds = await axios.all(requests);
const known_jobs_set = new Set();
// Use the oldest benchmark run as the standard
const standard_benchmark = builds[0].sb_map.get("benchmarks");
const benchmark_index = new Map();
standard_benchmark.forEach((benchmark, index) => {
known_jobs_set.add(benchmark["name"]);
benchmark_index.set(benchmark["name"], index);
});
console.log(known_jobs_set);
// Figure out if we think there is performance regression or not.
// 1. If the test mean is >10% smaller than the previous mean, it is an optimization
// 3. If the test mean is >10% larger than the previous mean, it is a regression
// 4. Otherwise, it is a stable result
for (let i = 0; i < builds.length; i++) {
const sb_map = builds[i].sb_map;
// Get the test
sb_map.get("benchmarks").forEach((benchmark) => {
const build_benchmark_mean = benchmark["stats"]["mean"];
const build_benchmark_index = benchmark_index.get(benchmark["name"]);
if (i === 0) {
benchmark["stats"]["prev_mean"] = build_benchmark_mean;
} else {
const prev_mean =
builds[i - 1].sb_map.get("benchmarks")[build_benchmark_index][
"stats"
]["mean"];
benchmark["stats"]["prev_mean"] = prev_mean;
}
});
}
builds.reverse();
const data = {};
data.known_jobs = [...known_jobs_set.values()].sort();
data.benchmark_index = benchmark_index;
data.builds = builds;
this.setState(data);
}