in modules/testrail_integration.py [0:0]
def organize_entries(testrail_session: TestRail, expected_plan: dict, suite_info: dict):
"""
When we get to the level of entries on a TestRail plan, we need to make sure:
* the entry exists or is created
* a run matching the current config / platform exists or is created
* the test cases we care about are on that run or are added
* test results are batched by run and result type (passed, skipped, failed)
"""
# Suite and milestone info
suite_id = suite_info.get("id")
suite_description = suite_info.get("description")
milestone_id = suite_info.get("milestone_id")
# Config
config = suite_info.get("config")
config_id = suite_info.get("config_id")
# Cases and results
cases_in_suite = suite_info.get("cases")
cases_in_suite = [int(n) for n in cases_in_suite]
results = suite_info.get("results")
plan_title = expected_plan.get("name")
suite_entries = [
entry
for entry in expected_plan.get("entries")
if entry.get("suite_id") == suite_id
]
# Add a missing entry to a plan
plan_id = expected_plan.get("id")
if not suite_entries:
# If no entry, create entry for suite
logging.info(f"Create entry in plan {plan_id} for suite {suite_id}")
logging.info(f"cases: {cases_in_suite}")
entry = testrail_session.create_new_plan_entry(
plan_id=plan_id,
suite_id=suite_id,
name=suite_description,
description="Automation-generated test plan entry",
case_ids=cases_in_suite,
)
expected_plan = testrail_session.matching_plan_in_milestone(
TESTRAIL_FX_DESK_PRJ, milestone_id, plan_title
)
suite_entries = [
entry
for entry in expected_plan.get("entries")
if entry.get("suite_id") == suite_id
]
if len(suite_entries) != 1:
logging.info("Suite entries are broken somehow")
# There should only be one entry per suite per plan
# Check that this entry has a run with the correct config
# And if not, make that run
entry = suite_entries[0]
config_runs = [run for run in entry.get("runs") if run.get("config") == config]
logging.info(f"config runs {config_runs}")
if not config_runs:
expected_plan = testrail_session.create_test_run_on_plan_entry(
plan_id,
entry.get("id"),
[config_id],
description=f"Auto test plan entry: {suite_description}",
case_ids=cases_in_suite,
)
suite_entries = [
entry
for entry in expected_plan.get("entries")
if entry.get("suite_id") == suite_id
]
entry = suite_entries[0]
logging.info(f"new entry: {entry}")
config_runs = [run for run in entry.get("runs") if run.get("config") == config]
run = testrail_session.get_run(config_runs[0].get("id"))
# If the run is missing cases, add them
run_cases = [
t.get("case_id") for t in testrail_session.get_test_results(run.get("id"))
]
if run_cases:
expected_case_ids = list(set(run_cases + cases_in_suite))
if len(expected_case_ids) > len(run_cases):
testrail_session.update_run_in_entry(
run.get("id"), case_ids=expected_case_ids, include_all=False
)
run = testrail_session.get_run(config_runs[0].get("id"))
run_cases = [
t.get("case_id")
for t in testrail_session.get_test_results(run.get("id"))
]
if run.get("is_completed"):
logging.info(f"Run {run.get('id')} is already completed.")
return {}
run_id = run.get("id")
# Gather the test results by category of result
passkey = {
"passed": ["passed", "xpassed", "warnings"],
"failed": ["failed", "error"],
"xfailed": ["xfailed"],
"skipped": ["skipped", "deselected"],
}
test_results = {
"project_id": TESTRAIL_FX_DESK_PRJ,
"passed": {},
"failed": {},
"xfailed": {},
"skipped": {},
}
for test_case, outcome in results.items():
logging.info(f"{test_case}: {outcome}")
if outcome == "rerun":
logging.info("Rerun result...skipping...")
continue
category = next(status for status in passkey if outcome in passkey.get(status))
if not test_results[category].get(run_id):
test_results[category][run_id] = []
test_results[category][run_id].append(
{"suite_id": suite_id, "test_case": test_case}
)
return test_results