def _run_benchmark()

in benchmark.py [0:0]


def _run_benchmark(bazel_bin_path,
                   project_path,
                   runs,
                   command,
                   options,
                   targets,
                   startup_options,
                   prefetch_ext_deps,
                   bazel_bench_uid,
                   unit_num,
                   data_directory=None,
                   collect_json_profile=False,
                   bazel_identifier=None,
                   project_commit=None):
  """Runs the benchmarking for a combination of (bazel version, project version).

  Args:
    bazel_bin_path: the path to the bazel binary to be run.
    project_path: the path to the project clone to be built.
    runs: the number of runs.
    bazel_args: the unparsed list of arguments to be passed to Bazel binary.
    prefetch_ext_deps: whether to do a first non-benchmarked run to fetch the
      external dependencies.
    bazel_bench_uid: a unique string identifier of this entire bazel-bench run.
    unit_num: the numerical order of the current unit being benchmarked.
    collect_json_profile: whether to collect JSON profile for each run.
    data_directory: the path to the directory to store run data. Required if
      collect_json_profile.
    bazel_identifier: the commit hash of the bazel commit. Required if
      collect_json_profile.
    project_commit: the commit hash of the project commit. Required if
      collect_json_profile.

  Returns:
    A list of result objects from each _single_run.
  """
  collected = []
  os.chdir(project_path)

  logger.log('=== BENCHMARKING BAZEL [Unit #%d]: %s, PROJECT: %s ===' %
             (unit_num, bazel_identifier, project_commit))
  # Runs the command once to make sure external dependencies are fetched.
  if prefetch_ext_deps:
    logger.log('Pre-fetching external dependencies...')
    _single_run(bazel_bin_path, command, options, targets, startup_options)

  if collect_json_profile:
    if not os.path.exists(data_directory):
      os.makedirs(data_directory)

  for i in range(1, runs + 1):
    logger.log('Starting benchmark run %s/%s:' % (i, runs))

    maybe_include_json_profile_flags = options[:]
    if collect_json_profile:
      assert bazel_identifier, ('bazel_identifier is required when '
                                'collect_json_profile')
      assert project_commit, ('project_commit is required when '
                              'collect_json_profile')
      maybe_include_json_profile_flags += _construct_json_profile_flags(
          json_profile_filename(data_directory, bazel_bench_uid,
                                bazel_identifier.replace('/', '_'), unit_num,
                                project_commit, i, runs))
    collected.append(
        _single_run(bazel_bin_path, command, maybe_include_json_profile_flags,
                    targets, startup_options))

  return collected, (command, targets, options)