in scripts/gha/summarize_test_results.py [0:0]
def summarize_logs(dir, markdown=False, github_log=False):
build_log_files = glob.glob(os.path.join(dir, BUILD_FILE_PATTERN))
test_log_files = glob.glob(os.path.join(dir, TEST_FILE_PATTERN))
# Replace the "*" in the file glob with a regex capture group,
# so we can report the test name.
build_log_name_re = re.escape(
os.path.join(dir,BUILD_FILE_PATTERN)).replace("\\*", "(.*)")
test_log_name_re = re.escape(
os.path.join(dir,TEST_FILE_PATTERN)).replace("\\*", "(.*)")
success_or_only_flakiness = True
log_data = {}
# log_data format:
# { testapps: {"build": [configs]},
# {"test": {"errors": [configs]},
# {"failures": {failed_test: [configs]}},
# {"flakiness": {flaky_test: [configs]}}}}
all_tested_configs = { "build_configs": [], "test_configs": []}
for build_log_file in build_log_files:
configs = get_configs_from_file_name(build_log_file, build_log_name_re)
all_tested_configs["build_configs"].append(configs)
with open(build_log_file, "r") as log_reader:
log_text = log_reader.read()
if "__SUMMARY_MISSING__" in log_text:
success_or_only_flakiness = False
log_data.setdefault(MISSING_LOG, {}).setdefault("build", []).append(configs)
else:
log_reader_data = json.loads(log_text)
for (testapp, _) in log_reader_data["errors"].items():
success_or_only_flakiness = False
log_data.setdefault(testapp, {}).setdefault("build", []).append(configs)
for test_log_file in test_log_files:
configs = get_configs_from_file_name(test_log_file, test_log_name_re)
all_tested_configs["test_configs"].append(configs)
with open(test_log_file, "r") as log_reader:
log_text = log_reader.read()
if "__SUMMARY_MISSING__" in log_text:
success_or_only_flakiness = False
log_data.setdefault(MISSING_LOG, {}).setdefault("test", {}).setdefault("errors", []).append(configs)
else:
log_reader_data = json.loads(log_text)
# logging.info("log_reader_data: %s", log_reader_data)
for (testapp, _) in log_reader_data["errors"].items():
success_or_only_flakiness = False
log_data.setdefault(testapp, {}).setdefault("test", {}).setdefault("errors", []).append(configs)
for (testapp, failures) in log_reader_data["failures"].items():
log_data.setdefault(testapp, {}).setdefault("test", {}).setdefault("failures", []).append(configs)
# for (test, _) in failures["failed_tests"].items():
# success_or_only_flakiness = False
# log_data.setdefault(testapp, {}).setdefault("test", {}).setdefault("failures", {}).setdefault(test, []).append(configs)
for (testapp, flakiness) in log_reader_data["flakiness"].items():
log_data.setdefault(testapp, {}).setdefault("test", {}).setdefault("flakiness", []).append(configs)
# if flakiness["flaky_tests"].items():
# for (test, _) in flakiness["flaky_tests"].items():
# log_data.setdefault(testapp, {}).setdefault("test", {}).setdefault("flakiness", {}).setdefault(test, []).append(configs)
# else:
# log_data.setdefault(testapp, {}).setdefault("test", {}).setdefault("flakiness", {}).setdefault("CRASH/TIMEOUT", []).append(configs)
logging.info("all_tested_configs: %s", all_tested_configs)
if success_or_only_flakiness and not log_data:
# No failures and no flakiness occurred, nothing to log.
return (success_or_only_flakiness, None)
# if failures (include flakiness) exist:
# log_results format:
# { testapps: {configs: [failed tests]} }
all_tested_configs = reorganize_all_tested_configs(all_tested_configs)
logging.info("all_tested_configs: %s", all_tested_configs)
log_results = reorganize_log(log_data, all_tested_configs)
log_lines = []
if markdown:
log_lines = print_markdown_table(log_results)
# If outputting Markdown, don't bother justifying the table.
elif github_log:
log_lines = print_github_log(log_results)
else:
log_lines = print_log(log_results)
log_summary = "\n".join(log_lines)
print(log_summary)
return (success_or_only_flakiness, log_summary)