in generate_test_report.py [0:0]
def reporter_parser():
parser = argparse.ArgumentParser(
"This tool can be used to generate a report of where eveyrthing is "
+ "currently running."
)
parser.add_argument(
"--decision-task-id",
type=str,
default="",
help="The decision task to get the full-task-graph.json file from.",
)
parser.add_argument(
"--full-task-graph-path",
type=str,
default="",
help="A path to a full-task-graph.json artifact to use instead of "
"obtaining it from a decision task.",
)
parser.add_argument(
"--tests",
type=str,
nargs="+",
default=["raptor", "browsertime"],
help="The tests to build a report for (pattern matched). "
+ "Defaults to raptor and browsertime.",
)
parser.add_argument(
"--platforms",
type=str,
nargs="+",
default=[],
help="Platforms to return results for. Defaults to all.",
)
parser.add_argument(
"--output",
type=str,
nargs=1,
default=os.getcwd(),
help="This is where the data will be saved. Defaults to CWD.",
)
parser.add_argument(
"--platform-breakdown",
action="store_true",
default=False,
help="Get a platform breakdown instead of a test breakdown.",
)
parser.add_argument(
"--branch-breakdown",
action="store_true",
default=False,
help="Get a branch breakdown instead of a test breakdown.",
)
parser.add_argument(
"--match-all-tests",
action="store_true",
default=False,
help="Only tests which match all --tests entries will be selected.",
)
parser.add_argument(
"--ignore-no-projects",
action="store_true",
default=False,
help="Prevents displaying tests with no projects.",
)
parser.add_argument(
"--field",
type=str,
default="attributes.run_on_projects",
help="The field to search for (defaults to `attributes.run_on_projects`).",
)
parser.add_argument(
"--show-all-fields",
action="store_true",
default=False,
help="Show all available fields in the given FTG.",
)
return parser