def collect_changes()

in modules/testrail_integration.py [0:0]


def collect_changes(testrail_session: TestRail, report):
    """
    Determine what structure needs to be built so that we can report TestRail results.
     * Construct config and plan name
     * Find the right milestone to report to
     * Find the right submilestone
     * Find the right plan to report to, or create it
     * Find the right config to attach to the run, or create it
     * Use organize_entries to create the rest of the structure and gather results
     * Use mark_results to update the test runs
    """

    # Find milestone to attach to
    channel = os.environ.get("FX_CHANNEL") or "beta"
    channel = channel.title()
    if channel == "Release":
        raise ValueError("Release reporting currently not supported")

    metadata = None
    for test in report.get("tests"):
        if test.get("metadata"):
            metadata = test.get("metadata")
            break

    if not metadata:
        logging.error("No metadata collected. Exiting without report.")
        return False

    version_str = metadata.get("fx_version")
    plan_title = get_plan_title(version_str, channel)
    logging.info(plan_title)
    plan_match = PLAN_NAME_RE.match(plan_title)
    (_, major) = [plan_match[n] for n in range(1, 3)]
    config = metadata.get("machine_config")

    if "linux" in config.lower():
        os_name = "Linux"
        for word in config.split(" "):
            if word.startswith("x"):
                arch = word
        release = subprocess.check_output(["lsb_release", "-d"]).decode()
        release = release.split("\t")[-1].strip()
        release = ".".join(release.split(".")[:-1])
        config = f"{os_name} {release} {arch}"

    logging.warning(f"Reporting for config: {config}")
    if not config.strip():
        raise ValueError("Config cannot be blank.")

    with open(".tmp_testrail_info", "w") as fh:
        fh.write(f"{plan_title}|{config}")

    major_milestone = testrail_session.matching_milestone(
        TESTRAIL_FX_DESK_PRJ, f"Firefox {major}"
    )
    logging.info(f"{channel} {major}")
    channel_milestone = testrail_session.matching_submilestone(
        major_milestone, f"{channel} {major}"
    )
    if (not channel_milestone) and channel == "Devedition":
        channel_milestone = testrail_session.matching_submilestone(
            major_milestone, f"Beta {major}"
        )

    # Find plan to attach runs to, create if doesn't exist
    logging.info(f"Plan title: {plan_title}")
    milestone_id = channel_milestone.get("id")
    expected_plan = testrail_session.matching_plan_in_milestone(
        TESTRAIL_FX_DESK_PRJ, milestone_id, plan_title
    )
    if expected_plan is None:
        logging.info(f"Create plan '{plan_title}' in milestone {milestone_id}")
        expected_plan = testrail_session.create_new_plan(
            TESTRAIL_FX_DESK_PRJ,
            plan_title,
            description="Automation-generated test plan",
            milestone_id=milestone_id,
        )
    elif expected_plan.get("is_completed"):
        logging.info(f"Plan found ({expected_plan.get('id')}) but is completed.")
        return None

    # Find or add correct config for session

    config_matches = None
    tried = False
    while not config_matches:
        config_matches = testrail_session.matching_configs(
            TESTRAIL_FX_DESK_PRJ, CONFIG_GROUP_ID, config
        )
        if tried:
            break
        if not config_matches:
            logging.info("Creating config...")
            testrail_session.add_config(CONFIG_GROUP_ID, config)
        tried = True
    if len(config_matches) == 1:
        config_id = config_matches[0].get("id")
        logging.info(f"config id: {config_id}")
    else:
        raise ValueError(f"Should only have one matching TR config: {config}")

    # Find or add suite-based runs on the plan
    # Store test results for later

    last_suite_id = None
    last_description = None
    results_by_suite = {}
    full_test_results = {}
    tests = [
        test
        for test in report.get("tests")
        if "metadata" in test and "suite_id" in test.get("metadata")
    ]
    tests = sorted(tests, key=lambda item: item.get("metadata").get("suite_id"))

    # Iterate through the tests; when we finish a line of same-suite tests, gather them
    for test in tests:
        (suite_id_str, suite_description) = test.get("metadata").get("suite_id")
        try:
            suite_id = int(suite_id_str.replace("S", ""))
        except (ValueError, TypeError):
            logging.info("No suite number, not reporting...")
            continue
        test_case = test.get("metadata").get("test_case")
        try:
            int(test_case)
        except (ValueError, TypeError):
            logging.info("No test case number, not reporting...")
            continue

        outcome = test.get("outcome")
        # Tests reported as rerun are a problem -- we need to know pass/fail
        if outcome == "rerun":
            outcome = test.get("call").get("outcome")
        logging.info(f"TC: {test_case}: {outcome}")

        if not results_by_suite.get(suite_id):
            results_by_suite[suite_id] = {}
        results_by_suite[suite_id][test_case] = outcome
        if suite_id != last_suite_id:
            # When we get the last test_case in a suite, add entry, run, results
            if last_suite_id:
                logging.info("n-1 run")
                cases_in_suite = list(results_by_suite[last_suite_id].keys())
                cases_in_suite = [int(n) for n in cases_in_suite]
                suite_info = {
                    "id": last_suite_id,
                    "description": last_description,
                    "milestone_id": milestone_id,
                    "config": config,
                    "config_id": config_id,
                    "cases": cases_in_suite,
                    "results": results_by_suite[last_suite_id],
                }

                full_test_results = merge_results(
                    full_test_results,
                    organize_entries(testrail_session, expected_plan, suite_info),
                )

        last_suite_id = suite_id
        last_description = suite_description

    # We do need to run this again because we will always have one last suite.
    cases_in_suite = list(results_by_suite[last_suite_id].keys())
    suite_info = {
        "id": last_suite_id,
        "description": last_description,
        "milestone_id": milestone_id,
        "config": config,
        "config_id": config_id,
        "cases": cases_in_suite,
        "results": results_by_suite[last_suite_id],
    }

    logging.info(f"n run {last_suite_id}, {last_description}")
    full_test_results = merge_results(
        full_test_results, organize_entries(testrail_session, expected_plan, suite_info)
    )
    return full_test_results