def from_flags()

in utils/benchmark_config.py [0:0]


  def from_flags(cls, bazel_commits, bazel_binaries, project_commits,
                 bazel_source, project_source, runs, collect_profile, command):
    """Creates the BenchmarkConfig based on specified flags.

    Args:
      bazel_commits: the bazel commits.
      bazel_binaries: paths to pre-built bazel binaries.
      project_commits: the project commits.
      bazel_source: Either a path to the local Bazel repo or a https url to a
        GitHub repository
      project_source: Either a path to the local git project to be built or a
        https url to a GitHub repository
      runs: The number of benchmark runs to perform for each combination.
      collect_profile: Whether to collect a JSON profile.
      command: the full command to benchmark, optionally with startup options
        prepended, e.g. "--noexobazel build --nobuild ...".

    Returns:
      The created config object.
    """
    units = []
    for bazel_commit in bazel_commits:
      for project_commit in project_commits:
        units.append(
            cls._parse_unit({
                'bazel_commit': bazel_commit,
                'project_commit': project_commit,
                'bazel_source': bazel_source,
                'project_source': project_source,
                'runs': runs,
                'collect_profile': collect_profile,
                'command': command,
            }))
    for bazel_binary in bazel_binaries:
      for project_commit in project_commits:
        units.append(
            cls._parse_unit({
                'bazel_binary': bazel_binary,
                'project_commit': project_commit,
                'bazel_source': bazel_source,
                'project_source': project_source,
                'runs': runs,
                'collect_profile': collect_profile,
                'command': command,
            }))
    return cls(units, benchmark_project_commits=(len(project_commits) > 1))