in utils/consolidated_test_report.py [0:0]
def consolidate_reports(reports_dir):
"""Consolidate test reports from multiple test runs, including from subdirectories."""
# Get all stats files, including those in subdirectories
stats_files = glob.glob(f"{reports_dir}/**/*_stats.txt", recursive=True)
results = {}
total_stats = {"tests": 0, "passed": 0, "failed": 0, "skipped": 0}
# Collect all slow tests across all test suites
all_slow_tests = []
# Process each stats file and its corresponding failures file
for stats_file in stats_files:
# Extract test suite name from filename (e.g., tests_pipeline_allegro_cuda_stats.txt -> pipeline_allegro_cuda)
base_name = os.path.basename(stats_file).replace("_stats.txt", "")
# Include parent directory in suite name if it's in a subdirectory
rel_path = os.path.relpath(os.path.dirname(stats_file), reports_dir)
if rel_path and rel_path != ".":
# Remove 'test_reports' suffix from directory name if present
dir_name = os.path.basename(rel_path)
if dir_name.endswith("_test_reports"):
dir_name = dir_name[:-13] # Remove '_test_reports' suffix
base_name = f"{dir_name}/{base_name}"
# Parse stats
stats = parse_stats_file(stats_file)
# If no slowest tests found in stats file, try the durations file directly
if not stats.get("slowest_tests"):
stats["slowest_tests"] = parse_durations_file(stats_file)
# Update total stats
for key in ["tests", "passed", "failed", "skipped"]:
total_stats[key] += stats[key]
# Collect slowest tests with their suite name
for slow_test in stats.get("slowest_tests", []):
all_slow_tests.append({"test": slow_test["test"], "duration": slow_test["duration"], "suite": base_name})
# Parse failures if there are any
failures = []
if stats["failed"] > 0:
# First try to get test paths from summary_short.txt which has the best format
summary_file = stats_file.replace("_stats.txt", "_summary_short.txt")
if os.path.exists(summary_file):
try:
with open(summary_file, "r") as f:
content = f.read()
# Look for full lines with test path and error message: "FAILED test_path - error_msg"
failed_test_lines = re.findall(
r"FAILED\s+(tests/[\w/]+\.py::[A-Za-z0-9_\.]+::[A-Za-z0-9_]+)(?:\s+-\s+(.+))?", content
)
if failed_test_lines:
for match in failed_test_lines:
test_path = match[0]
error_msg = match[1] if len(match) > 1 and match[1] else "No error message"
failures.append({"test": test_path, "error": error_msg})
except Exception as e:
print(f"Error parsing summary file: {e}")
# If no failures found in summary, try other failure files
if not failures:
failure_patterns = ["_failures_short.txt", "_failures.txt", "_failures_line.txt", "_failures_long.txt"]
for pattern in failure_patterns:
failures_file = stats_file.replace("_stats.txt", pattern)
if os.path.exists(failures_file):
failures = parse_failures_file(failures_file)
if failures:
break
# No debug output needed
# Store results for this test suite
results[base_name] = {"stats": stats, "failures": failures}
# Filter out entries with "secs were omitted"
filtered_slow_tests = [test for test in all_slow_tests if "secs were omitted" not in test["test"]]
# Sort all slow tests by duration (descending)
filtered_slow_tests.sort(key=lambda x: x["duration"], reverse=True)
# Get the number of slowest tests to show from environment variable or default to 10
num_slowest_tests = int(os.environ.get("SHOW_SLOWEST_TESTS", "10"))
top_slowest_tests = filtered_slow_tests[:num_slowest_tests] if filtered_slow_tests else []
# Calculate additional duration statistics
total_duration = sum(test["duration"] for test in all_slow_tests)
# Calculate duration per suite
suite_durations = {}
for test in all_slow_tests:
suite_name = test["suite"]
if suite_name not in suite_durations:
suite_durations[suite_name] = 0
suite_durations[suite_name] += test["duration"]
# Removed duration categories
return {
"total_stats": total_stats,
"test_suites": results,
"slowest_tests": top_slowest_tests,
"duration_stats": {"total_duration": total_duration, "suite_durations": suite_durations},
}