in benchmark.py [0:0]
def main(argv):
_flag_checks()
config, bazel_clone_repo, project_clone_repo = _get_benchmark_config_and_clone_repos(
argv)
# A dictionary that maps a (bazel_commit, project_commit) tuple
# to its benchmarking result.
data = collections.OrderedDict()
csv_data = collections.OrderedDict()
data_directory = FLAGS.data_directory or DEFAULT_OUT_BASE_PATH
# We use the start time as a unique identifier of this bazel-bench run.
bazel_bench_uid = datetime.datetime.utcnow().strftime('%Y%m%d%H%M%S')
bazel_bin_base_path = FLAGS.bazel_bin_dir or BAZEL_BINARY_BASE_PATH
bazel_bin_identifiers = []
# Build the bazel binaries, if necessary.
for unit in config.get_units():
if 'bazel_binary' in unit:
unit['bazel_bin_path'] = unit['bazel_binary']
elif 'bazel_commit' in unit:
bazel_bin_path = _build_bazel_binary(unit['bazel_commit'],
bazel_clone_repo,
bazel_bin_base_path, FLAGS.platform)
unit['bazel_bin_path'] = bazel_bin_path
for i, unit in enumerate(config.get_units()):
bazel_identifier = unit['bazel_commit'] if 'bazel_commit' in unit else unit['bazel_binary']
project_commit = unit['project_commit']
project_clone_repo.git.checkout('-f', project_commit)
if FLAGS.env_configure:
_exec_command(
FLAGS.env_configure, shell=True, cwd=project_clone_repo.working_dir)
results, args = _run_benchmark(
bazel_bin_path=unit['bazel_bin_path'],
project_path=project_clone_repo.working_dir,
runs=unit['runs'],
command=unit['command'],
options=unit['options'],
targets=unit['targets'],
startup_options=unit['startup_options'],
prefetch_ext_deps=FLAGS.prefetch_ext_deps,
bazel_bench_uid=bazel_bench_uid,
unit_num=i,
collect_json_profile=unit['collect_profile'],
data_directory=data_directory,
bazel_identifier=bazel_identifier,
project_commit=project_commit)
collected = {}
for benchmarking_result in results:
for metric, value in benchmarking_result.items():
if metric not in collected:
collected[metric] = Values()
collected[metric].add(value)
data[(i, bazel_identifier, project_commit)] = collected
non_measurables = {
'project_source': unit['project_source'],
'platform': FLAGS.platform,
'project_label': FLAGS.project_label
}
csv_data[(bazel_identifier, project_commit)] = {
'results': results,
'args': args,
'non_measurables': non_measurables
}
summary_text = create_summary(data, config.get_project_source())
print(summary_text)
if FLAGS.data_directory:
csv_file_name = FLAGS.csv_file_name or '{}.csv'.format(bazel_bench_uid)
txt_file_name = csv_file_name.replace('.csv', '.txt')
output_handling.export_csv(data_directory, csv_file_name, csv_data)
output_handling.export_file(data_directory, txt_file_name, summary_text)
# This is mostly for the nightly benchmark.
if FLAGS.aggregate_json_profiles:
aggr_json_profiles_csv_path = (
'%s/%s' % (FLAGS.data_directory, DEFAULT_AGGR_JSON_PROFILE_FILENAME))
handle_json_profiles_aggr(
config.get_bazel_commits(),
config.get_project_source(),
config.get_project_commits(),
FLAGS.runs,
output_prefix=bazel_bench_uid,
output_path=aggr_json_profiles_csv_path,
data_directory=FLAGS.data_directory)
logger.log('Done.')