in benchmark.py [0:0]
def _single_run(bazel_bin_path,
command,
options,
targets,
startup_options):
"""Runs the benchmarking for a combination of (bazel version, project version).
Args:
bazel_bin_path: the path to the bazel binary to be run.
command: the command to be run with Bazel.
options: the list of options.
targets: the list of targets.
startup_options: the list of target options.
Returns:
A result object:
{
'wall': 1.000,
'cpu': 1.000,
'system': 1.000,
'memory': 1.000,
'exit_status': 0,
'started_at': datetime.datetime(2019, 1, 1, 0, 0, 0, 000000),
}
"""
bazel = Bazel(bazel_bin_path, startup_options)
default_arguments = collections.defaultdict(list)
# Prepend some default options if the command is 'build'.
# The order in which the options appear matters.
if command == 'build':
options = options + ['--nostamp', '--noshow_progress', '--color=no']
measurements = bazel.command(command, args=options + targets)
if measurements != None:
logger.log('Results of this run: wall: ' +
'%.3fs, cpu %.3fs, system %.3fs, memory %.3fMB, exit_status: %d' % (
measurements['wall'],
measurements['cpu'],
measurements['system'],
measurements['memory'],
measurements['exit_status']))
# Get back to a clean state.
bazel.command('clean', ['--color=no'])
bazel.command('shutdown')
return measurements