in report/firefox_code_coverage/codecoverage.py [0:0]
def main():
parser = argparse.ArgumentParser()
if is_taskcluster_loaner():
nargs = "?"
default_src_dir = "/home/worker/workspace/build/src/"
default_branch = os.environ["MH_BRANCH"]
default_commit = os.environ["GECKO_HEAD_REV"]
else:
nargs = None
default_src_dir = None
default_branch = None
default_commit = None
parser.add_argument(
"src_dir",
action="store",
nargs=nargs,
default=default_src_dir,
help="Path to the source directory",
)
parser.add_argument(
"branch",
action="store",
nargs="?",
default=default_branch,
help="Branch on which jobs ran",
)
parser.add_argument(
"commit",
action="store",
nargs="?",
default=default_commit,
help="Commit hash for push",
)
parser.add_argument("--grcov", action="store", nargs="?", help="Path to grcov")
parser.add_argument(
"--with-artifacts",
action="store",
nargs="?",
default="ccov-artifacts",
help="Path to already downloaded coverage files",
)
parser.add_argument(
"--platform",
action="store",
nargs="+",
help='List of platforms to include (by default they are all included). E.g. "linux", "windows", etc.',
)
parser.add_argument(
"--suite",
action="store",
nargs="+",
help='List of test suites to include (by default they are all included). E.g. "mochitest", "mochitest-chrome", "gtest", etc.',
)
parser.add_argument(
"--ignore",
action="store",
nargs="+",
help='List of test suites to ignore (by default "talos" and "awsy"). E.g. "mochitest", "mochitest-chrome", "gtest", etc.',
)
parser.add_argument(
"--stats",
action="store_true",
help="Only generate high-level stats, not a full HTML report",
)
parser.add_argument(
"-o",
"--output-dir",
help="The output directory for generated report",
default=os.path.join(os.getcwd(), "ccov-report"),
)
args = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
if (args.branch is None) != (args.commit is None):
parser.print_help()
return
if args.branch and args.commit:
task_id = get_task(args.branch, args.commit)
else:
task_id = get_last_task()
if args.ignore is None:
artifact_paths = download_coverage_artifacts(
task_id, args.suite, args.platform, args.with_artifacts
)
else:
artifact_paths = download_coverage_artifacts(
task_id, args.suite, args.platform, args.with_artifacts, args.ignore
)
if args.grcov:
grcov_path = args.grcov
else:
grcov_path = download_grcov()
if args.stats:
output = os.path.join(args.output_dir, "output.json")
generate_report(grcov_path, "coveralls", args.src_dir, output, artifact_paths)
with open(output, "r") as f:
report = json.load(f)
total_lines = 0
total_lines_covered = 0
for sf in report["source_files"]:
for c in sf["coverage"]:
if c is None:
continue
total_lines += 1
if c > 0:
total_lines_covered += 1
print("Coverable lines: {}".format(total_lines))
print("Covered lines: {}".format(total_lines_covered))
print(
"Coverage percentage: {}".format(
float(total_lines_covered) / float(total_lines)
)
)
else:
generate_report(
grcov_path, "html", args.src_dir, args.output_dir, artifact_paths
)
if is_taskcluster_loaner():
upload_html_report(args.output_dir)