in scripts/generate_landings_risk_report.py [0:0]
def notification(days: int) -> None:
with open("landings_by_date.json", "r") as f:
data = json.load(f)
bug_summaries: list[dict] = sum(data["summaries"].values(), [])
feature_meta_bugs = {
meta_bug["id"]: meta_bug["summary"] for meta_bug in data["featureMetaBugs"]
}
bug_summary_ids = {bug_summary["id"] for bug_summary in bug_summaries}
with open("crash_signatures.json", "r") as f:
crash_signatures = json.load(f)
all_crash_bugs = set(
get_crash_bugs(crash_signatures["release"])
+ get_crash_bugs(crash_signatures["nightly"])
)
with open("component_test_stats.json", "r") as f:
component_test_stats = json.load(f)
all_intermittent_failure_bugs: Set[int] = set()
component_team_mapping = get_component_team_mapping()
for product_component, day_to_data in component_test_stats.items():
product, component = product_component.split("::", 1)
cur_team = component_team_mapping.get(product, {}).get(component)
if cur_team is None or cur_team in ("Other", "Mozilla"):
continue
for day, data in day_to_data.items():
if "bugs" not in data:
continue
if dateutil.parser.parse(day) < datetime.utcnow() - relativedelta(weeks=1):
continue
all_intermittent_failure_bugs.update(bug["id"] for bug in data["bugs"])
all_s1_s2_bugs = []
bug_map = {}
for b in bugzilla.get_bugs():
if (
b["id"] in bug_summary_ids
or b["id"] in all_crash_bugs
or b["id"] in all_intermittent_failure_bugs
or b["severity"] == "S1"
or ("regression" in b["keywords"] and b["severity"] == "S2")
):
bug_map[b["id"]] = b
if b["severity"] == "S1" or (
"regression" in b["keywords"] and b["severity"] == "S2"
):
all_s1_s2_bugs.append(b)
all_teams = set(bug["team"] for bug in bug_summaries if bug["team"] is not None)
all_teams.remove("Other")
all_teams.remove("Mozilla")
tracking_flag_re = re.compile("cf_tracking_firefox([0-9]+)")
def get_tracking_info(bug):
versions = []
for flag, value in bug.items():
if value not in ("+", "blocking"):
continue
match = tracking_flag_re.search(flag)
if match is None:
continue
versions.append(match.group(1))
return versions
versions = libmozdata.versions.get(base=True)
nightly_ver = versions["nightly"]
beta_ver = versions["beta"]
release_ver = versions["release"]
team_data: dict[str, Any] = {}
for team in all_teams:
cur_team_data = team_data[team] = {}
cur_team_data["month_changes"] = 0
cur_team_data["high_risk_changes"] = 0
cur_team_data["medium_risk_changes"] = 0
cur_team_data["low_risk_changes"] = 0
cur_team_data["prev_high_risk_changes"] = 0
cur_team_data["new_regressions"] = 0
cur_team_data["fixed_new_regressions"] = 0
cur_team_data["unassigned_new_regressions"] = 0
cur_team_data["new_crash_regressions"] = 0
cur_team_data["week_old_fixed_regressions"] = 0
cur_team_data["month_old_fixed_regressions"] = 0
cur_team_data["more_than_month_old_fixed_regressions"] = 0
cur_team_data["unfixed_regressions"] = []
cur_team_data["fix_times"] = []
cur_team_data["prev_fix_times"] = []
cur_team_data["patch_coverage_covered"] = 0
cur_team_data["patch_coverage_added"] = 0
cur_team_data["coverage_patches"] = []
cur_team_data["open_review_times"] = []
cur_team_data["review_times"] = []
cur_team_data["prev_review_times"] = []
cur_team_data["intermittent_failures"] = collections.defaultdict(int)
cur_team_data["skipped_tests"] = 0
cur_team_data["prev_skipped_tests"] = 0
cur_team_data["carryover_regressions"] = 0
cur_team_data["affecting_carryover_regressions"] = []
cur_team_data["s1_bugs"] = []
cur_team_data["s2_bugs"] = []
carrytest = set()
for bug in bug_summaries:
if bug["team"] in (None, "Other", "Mozilla"):
continue
cur_team_data = team_data[bug["team"]]
creation_date = dateutil.parser.parse(bug["creation_date"])
fix_date = (
dateutil.parser.parse(bug["date"]) if bug["date"] is not None else None
)
if not bug["fixed"]:
for revision in bug["revisions"]:
if revision["pending_review_time"] is None:
continue
cur_team_data["open_review_times"].append(
(revision["pending_review_time"], revision["id"])
)
if fix_date is not None and fix_date > datetime.utcnow() - relativedelta(
months=1
):
cur_team_data["month_changes"] += 1
if fix_date > datetime.utcnow() - relativedelta(weeks=1):
cur_team_data["fix_times"].append(
(fix_date - creation_date).total_seconds() / 86400
)
if bug["risk_band"] == "h":
cur_team_data["high_risk_changes"] += 1
elif bug["risk_band"] == "m":
cur_team_data["medium_risk_changes"] += 1
elif bug["risk_band"] == "l":
cur_team_data["low_risk_changes"] += 1
if bug["regression"]:
if creation_date > datetime.utcnow() - relativedelta(weeks=1):
cur_team_data["week_old_fixed_regressions"] += 1
elif creation_date > datetime.utcnow() - relativedelta(months=1):
cur_team_data["month_old_fixed_regressions"] += 1
else:
cur_team_data["more_than_month_old_fixed_regressions"] += 1
cur_team_data["review_times"] += [
revision["first_review_time"]
for revision in bug["revisions"]
if revision["first_review_time"] is not None
]
for commit in bug["commits"]:
if commit["backedout"]:
continue
# We don't care about old commits associated to newly fixed bugs (e.g. a tentative fix from a year ago).
if dateutil.parser.parse(
commit["date"]
) < datetime.utcnow() - relativedelta(weeks=1):
continue
lines_added = 0
lines_covered = 0
if commit["coverage"] and commit["coverage"][0] is not None:
lines_added += commit["coverage"][0]
# Consider unknown as covered.
lines_covered += commit["coverage"][1] + commit["coverage"][2]
cur_team_data["patch_coverage_added"] += lines_added
cur_team_data["patch_coverage_covered"] += lines_covered
if lines_added != 0:
cur_team_data["coverage_patches"].append(
(lines_covered / lines_added, commit["rev_id"])
)
elif fix_date > datetime.utcnow() - relativedelta(weeks=2):
cur_team_data["prev_fix_times"].append(
(fix_date - creation_date).total_seconds() / 86400
)
if bug["risk_band"] == "h":
cur_team_data["prev_high_risk_changes"] += 1
cur_team_data["prev_review_times"] += [
revision["first_review_time"]
for revision in bug["revisions"]
if revision["first_review_time"] is not None
]
if bug["regression"] and creation_date > datetime.utcnow() - relativedelta(
weeks=2
):
cur_team_data["new_regressions"] += 1
if bug["team"] == "Compiler and Development Tools":
print("New regression: {}".format(bug["id"]))
if "crash" in bug["types"]:
cur_team_data["new_crash_regressions"] += 1
if bug["fixed"]:
cur_team_data["fixed_new_regressions"] += 1
elif bug["assignee"] is None:
cur_team_data["unassigned_new_regressions"] += 1
if creation_date > datetime.utcnow() - relativedelta(weeks=2):
if bug["regression"] and not bug["fixed"]:
if bug["team"] == "Compiler and Development Tools":
print("Unfixed regression: {}".format(bug["id"]))
cur_team_data["unfixed_regressions"].append(bug)
if creation_date > datetime.utcnow() - relativedelta(days=days):
if bug["regression"] and not bug["fixed"]:
cur_team_data["carryover_regressions"] += 1
if bug["team"] == "DOM":
carrytest.add(bug["id"])
full_bug = bug_map[bug["id"]]
if (
"stalled" not in full_bug["keywords"]
and "intermittent-failure" not in full_bug["keywords"]
):
for version in [nightly_ver, beta_ver, release_ver]:
if (
f"cf_status_firefox{version}" in full_bug
and full_bug[f"cf_status_firefox{version}"] == "affected"
and (
f"cf_tracking_firefox{version}" not in full_bug
or full_bug[f"cf_tracking_firefox{version}"] != "-"
)
and f"cf_status_firefox{version - 1}" in full_bug
and full_bug[f"cf_status_firefox{version - 1}"]
not in ("unaffected", "?", "---")
):
cur_team_data["affecting_carryover_regressions"].append(bug)
break
for bug in all_s1_s2_bugs:
if bug["status"] in ("VERIFIED", "RESOLVED"):
continue
team = component_team_mapping.get(bug["product"], {}).get(bug["component"])
if team in team_data:
if bug["severity"] == "S1":
team_data[team]["s1_bugs"].append(bug)
elif bug["severity"] == "S2":
team_data[team]["s2_bugs"].append(bug)
for product_component, day_to_data in component_test_stats.items():
product, component = product_component.split("::", 1)
team = component_team_mapping.get(product, {}).get(component)
if team is None or team in ("Other", "Mozilla") or team not in team_data:
continue
cur_team_data = team_data[team]
for day, data in day_to_data.items():
if "bugs" not in data:
continue
if dateutil.parser.parse(day) < datetime.utcnow() - relativedelta(weeks=1):
continue
for bug in data["bugs"]:
cur_team_data["intermittent_failures"][bug["id"]] += bug["count"]
today = datetime.utcnow().strftime("%Y-%m-%d")
two_weeks_ago = (datetime.utcnow() - timedelta(days=14)).strftime("%Y-%m-%d")
skips = 0
prev_skips = 0
for day, data in day_to_data.items():
if "skips" not in data:
continue
if day == today:
skips = data["skips"]
elif day == two_weeks_ago:
prev_skips = data["skips"]
cur_team_data["skipped_tests"] += skips
cur_team_data["prev_skipped_tests"] += prev_skips
total_carryover_regressions = sum(
cur_team_data["carryover_regressions"] for cur_team_data in team_data.values()
)
print("New regressions")
print(
{
cur_team: cur_team_data["new_regressions"]
for cur_team, cur_team_data in team_data.items()
}
)
print("Month changes:")
print(
{
cur_team: cur_team_data["month_changes"]
for cur_team, cur_team_data in team_data.items()
}
)
print("Rates:")
print(
{
cur_team: cur_team_data["new_regressions"] / cur_team_data["month_changes"]
if cur_team_data["month_changes"] != 0
else 0.0
for cur_team, cur_team_data in team_data.items()
}
)
total_covered = sum(
cur_team_data["patch_coverage_covered"] for cur_team_data in team_data.values()
)
total_added = sum(
cur_team_data["patch_coverage_added"] for cur_team_data in team_data.values()
)
average_patch_coverage = round(
100 * total_covered / total_added if total_added != 0 else 100,
1,
)
average_median_fix_time = statistics.mean(
statistics.median(cur_team_data["fix_times"])
for cur_team_data in team_data.values()
if len(cur_team_data["fix_times"]) > 0
)
all_intermittent_failures: dict[int, int] = collections.defaultdict(int)
for cur_team_data in team_data.values():
for bug_id, count in cur_team_data["intermittent_failures"].items():
all_intermittent_failures[bug_id] += count
sorted_intermittent_failures = sorted(
all_intermittent_failures.items(), key=lambda x: -x[1]
)
intermittent_failure_positions = {
bug_id: i + 1 for i, (bug_id, count) in enumerate(sorted_intermittent_failures)
}
median_skipped_tests = statistics.mean(
cur_team_data["skipped_tests"] for cur_team_data in team_data.values()
)
average_median_first_review_time = statistics.mean(
statistics.median(cur_team_data["review_times"])
for cur_team_data in team_data.values()
if len(cur_team_data["review_times"]) > 0
)
revisions_without_reviewer = set(
revision["id"]
for revision in phabricator.get_revisions()
if len(revision["attachments"]["reviewers"]["reviewers"]) == 0
)
def regression_to_text(bug):
full_bug = bug_map[bug["id"]]
tracked_versions = " and ".join(sorted(get_tracking_info(full_bug)))
blocked_features = (
", ".join(f"'{feature_meta_bugs[meta_id]}'" for meta_id in bug["meta_ids"])
if "meta_ids" in bug
else ""
)
assignment = (
"Assigned"
if full_bug["assigned_to"] != "nobody@mozilla.org"
else "**Unassigned**"
)
hours = math.ceil(
(
datetime.now(timezone.utc)
- dateutil.parser.parse(
bugzilla.get_last_activity_excluding_bots(full_bug)
)
).total_seconds()
/ 3600
)
last_activity = (
f"**{math.ceil(hours / 24)} days ago**"
if hours >= 120
else f"{math.ceil(hours / 24)} days ago"
if hours >= 24
else f"{hours} hours ago"
)
notes = []
if full_bug["severity"] in ("S1", "S2"):
notes.append("**Severity {}**".format(full_bug["severity"]))
pending_needinfos = []
for flag in full_bug["flags"]:
if flag["name"] == "needinfo" and flag["status"] == "?":
pending_needinfos.append(
(
datetime.now(timezone.utc)
- dateutil.parser.parse(flag["creation_date"])
).total_seconds()
/ 86400
)
if len(pending_needinfos) > 0:
days = math.ceil(max(pending_needinfos))
days_str = f"{days} days" if days < 3 else f"**{days} days**"
notes.append(f"{len(pending_needinfos)} needinfo pending for {days_str}")
if full_bug["priority"] == "--":
days = math.ceil(
(
datetime.now(timezone.utc)
- dateutil.parser.parse(full_bug["creation_time"])
).total_seconds()
/ 86400
)
days_str = f"{days} days" if days < 3 else f"**{days} days**"
notes.append(f"No priority set for {days_str}")
if full_bug["severity"] == "--":
days = math.ceil(
(
datetime.now(timezone.utc)
- dateutil.parser.parse(full_bug["creation_time"])
).total_seconds()
/ 86400
)
days_str = f"{days} days" if days < 3 else f"**{days} days**"
notes.append(f"No severity set for {days_str}")
if tracked_versions:
notes.append(f"Tracked for {tracked_versions}")
if blocked_features:
notes.append(f"Blocking {blocked_features}")
return (
"|[{}](https://bugzilla.mozilla.org/show_bug.cgi?id={})|{}|{}|{}|".format(
escape_markdown(
html.escape(
textwrap.shorten(
"Bug {} - {}".format(bug["id"], full_bug["summary"]),
width=98,
placeholder="…",
)
)
),
bug["id"],
last_activity,
assignment,
", ".join(notes),
)
)
def get_top_crashes(team: str, channel: str) -> str | None:
top_crashes = []
if team in super_teams:
teams = set(super_teams[team])
else:
teams = {team}
for signature, data in crash_signatures[channel].items():
bugs = [
bug
for bug in data["bugs"]
if bug["resolution"] == ""
and component_team_mapping.get(bug["product"], {}).get(bug["component"])
in teams
]
if len(bugs) == 0:
continue
top_crashes.append(
"|[{}](https://crash-stats.mozilla.org/signature/?product=Firefox&{}) ({}#{} globally{})|{}{}".format(
escape_markdown(html.escape(signature)),
urllib.parse.urlencode({"signature": signature}),
"**" if data["tc_rank"] <= 50 else "",
data["tc_rank"],
"**" if data["tc_rank"] <= 50 else "",
data["crash_count"],
regression_to_text(bugs[0]),
)
)
if len(top_crashes) == 0:
return None
top_crashes_text = "\n".join(top_crashes[:10])
return f"|Signature|# of crashes|Bug|Last Activity|Assignment|Notes|\n|---|---|---|---|---|---|\n{top_crashes_text}"
failure = False
send_grid_client = sendgrid.SendGridAPIClient(
api_key=get_secret("SENDGRID_API_KEY")
)
style = """<style>
table, td, th {
border: 1px solid black;
}
table {
width: 100%;
border-collapse: collapse;
}
</style>"""
super_teams = json.loads(get_secret("SUPER_TEAMS"))
for super_team, teams in super_teams.items():
assert super_team not in team_data
team_data[super_team] = {}
for team in teams:
for key, value in team_data[team].items():
if key not in team_data[super_team]:
team_data[super_team][key] = copy.deepcopy(value)
else:
if isinstance(value, dict):
team_data[super_team][key].update(value)
else:
team_data[super_team][key] += value
team_to_receivers = json.loads(get_secret("NOTIFICATION_TEAMS"))
for team, cur_team_data in team_data.items():
if team not in team_to_receivers:
continue
try:
month_changes = cur_team_data["month_changes"]
high_risk_changes = cur_team_data["high_risk_changes"]
predicted_regressions = round(
0.33 * cur_team_data["high_risk_changes"]
+ 0.16 * cur_team_data["medium_risk_changes"]
+ 0.06 * cur_team_data["low_risk_changes"],
1,
)
prev_high_risk_changes = cur_team_data["prev_high_risk_changes"]
new_regressions = cur_team_data["new_regressions"]
fixed_new_regressions = cur_team_data["fixed_new_regressions"]
unassigned_new_regressions = cur_team_data["unassigned_new_regressions"]
new_crash_regressions = cur_team_data["new_crash_regressions"]
carryover_regressions = cur_team_data["carryover_regressions"]
prev_carryover_regressions = (
cur_team_data["carryover_regressions"]
- cur_team_data["new_regressions"]
+ cur_team_data["week_old_fixed_regressions"]
+ cur_team_data["month_old_fixed_regressions"]
+ cur_team_data["more_than_month_old_fixed_regressions"]
)
# Calculate median regression rate for similarly sized teams (teams that land at least
# a fifth of the changes of the current team and less than five times the changes of the
# current team).
median_regression_rate = statistics.median(
ctd["new_regressions"] / ctd["month_changes"]
for ctd in team_data.values()
if ctd["month_changes"] > max(1, cur_team_data["month_changes"]) / 5
and ctd["month_changes"] < max(1, cur_team_data["month_changes"]) * 5
)
unfixed_regressions = "\n".join(
regression_to_text(reg)
for reg in sorted(
cur_team_data["unfixed_regressions"],
key=lambda bug: bugzilla.get_last_activity_excluding_bots(
bug_map[bug["id"]]
),
)
)
affecting_carryover_regressions = cur_team_data[
"affecting_carryover_regressions"
]
s1_bugs = cur_team_data["s1_bugs"]
s2_bugs = cur_team_data["s2_bugs"]
affecting_carryover_regressions_and_s1_s2 = affecting_carryover_regressions
affecting_carryover_regressions_and_s1_s2_ids = set(
bug["id"] for bug in affecting_carryover_regressions
)
for bug in s1_bugs + s2_bugs:
if bug["id"] in affecting_carryover_regressions_and_s1_s2_ids:
continue
affecting_carryover_regressions_and_s1_s2.append(bug)
affecting_carryover_regressions_and_s1_s2_ids.add(bug["id"])
affecting_carryover_regressions_and_s1_s2_text = "\n".join(
regression_to_text(reg)
for reg in sorted(
affecting_carryover_regressions_and_s1_s2,
key=lambda bug: bugzilla.get_last_activity_excluding_bots(
bug_map[bug["id"]]
),
)
)
if len(cur_team_data["fix_times"]) > 1:
fix_time_deciles = statistics.quantiles(
cur_team_data["fix_times"], n=10, method="inclusive"
)
median_fix_time = round(fix_time_deciles[4], 1)
ninth_decile_fix_time = round(fix_time_deciles[8], 1)
elif len(cur_team_data["fix_times"]) > 0:
median_fix_time = round(cur_team_data["fix_times"][0], 1)
ninth_decile_fix_time = round(cur_team_data["fix_times"][0], 1)
else:
median_fix_time = None
ninth_decile_fix_time = None
if len(cur_team_data["prev_fix_times"]) > 0:
prev_median_fix_time = round(
statistics.median(cur_team_data["prev_fix_times"]), 1
)
else:
prev_median_fix_time = None
if median_fix_time is not None:
median_fix_time_text = f"The median time to fix for regressions fixed in the past week was {median_fix_time} days"
if team not in super_teams:
median_fix_time_text += f""" ({"**higher** than" if median_fix_time > average_median_fix_time else "lower than" if average_median_fix_time > median_fix_time else "equal to"} the average of {round(average_median_fix_time, 1)} across other teams)"""
median_fix_time_text += "."
if prev_median_fix_time is not None:
verb = (
"improving"
if median_fix_time < prev_median_fix_time
else "**worsening**"
if prev_median_fix_time < median_fix_time
else "staying constant"
)
fix_time_diff = f"This is {verb} when compared to two weeks ago (median was {prev_median_fix_time} days)."
else:
fix_time_diff = ""
else:
median_fix_time_text = ""
fix_time_diff = ""
top_intermittent_failures = "\n".join(
"{} failures ({}#{} globally{}){}".format(
count,
"**" if intermittent_failure_positions[bug_id] <= 21 else "",
intermittent_failure_positions[bug_id],
"**" if intermittent_failure_positions[bug_id] <= 21 else "",
regression_to_text(bug_map[bug_id]),
)
for bug_id, count in sorted(
cur_team_data["intermittent_failures"].items(), key=lambda x: -x[1]
)[:7]
)
skipped_tests = cur_team_data["skipped_tests"]
prev_skipped_tests = cur_team_data["prev_skipped_tests"]
patch_coverage = round(
100
* cur_team_data["patch_coverage_covered"]
/ cur_team_data["patch_coverage_added"]
if cur_team_data["patch_coverage_added"] != 0
else 100,
1,
)
low_coverage_patches = "\n".join(
"- [D{}](https://phabricator.services.mozilla.com/D{}) - {}{}%{}".format(
rev_id,
rev_id,
"**" if pc < 0.8 else "",
round(100 * pc, 1),
"**" if pc < 0.8 else "",
)
for pc, rev_id in sorted(
cur_team_data["coverage_patches"], key=lambda x: x[0]
)
if pc != 1.0
)
if len(cur_team_data["review_times"]) > 1:
review_time_deciles = statistics.quantiles(
cur_team_data["review_times"],
n=10,
method="inclusive",
)
median_first_review_time = round(review_time_deciles[4], 1)
ninth_decile_first_review_time = round(review_time_deciles[8], 1)
elif len(cur_team_data["review_times"]) > 0:
median_first_review_time = round(cur_team_data["review_times"][0], 1)
ninth_decile_first_review_time = round(
cur_team_data["review_times"][0], 1
)
else:
median_first_review_time = None
ninth_decile_first_review_time = None
if len(cur_team_data["prev_review_times"]) > 0:
prev_median_first_review_time = round(
statistics.median(cur_team_data["prev_review_times"]), 1
)
else:
prev_median_first_review_time = None
if median_first_review_time is not None:
if prev_median_first_review_time is not None:
verb = (
"improving"
if median_first_review_time < prev_median_first_review_time
else "**worsening**"
if prev_median_first_review_time < median_first_review_time
else "staying constant"
)
review_time_diff = f"This is {verb} when compared to two weeks ago (median was {prev_median_first_review_time} days)."
else:
review_time_diff = ""
median_first_review_time_text = f"The median time to first review patches for last week's fixed bugs was {median_first_review_time} days"
if team not in super_teams:
median_first_review_time_text += f""" ({"**higher** than" if median_first_review_time > average_median_first_review_time else "lower than" if average_median_first_review_time > median_first_review_time else "equal to"} the average of {round(average_median_first_review_time, 1)} across other teams)"""
median_first_review_time_text += f". {review_time_diff}"
if ninth_decile_first_review_time is not None:
median_first_review_time_text += f"\n90% of patches were first reviewed within {ninth_decile_first_review_time} days."
else:
median_first_review_time_text = ""
slow_review_patches = "\n".join(
"- [D{}](https://phabricator.services.mozilla.com/D{}) - {}{} days{}".format(
rev_id,
rev_id,
"**" if review_time >= 14 else "",
round(review_time, 1),
"**" if review_time >= 14 else "",
)
for review_time, rev_id in sorted(
cur_team_data["open_review_times"], key=lambda x: -x[0]
)
if review_time >= 3 and rev_id not in revisions_without_reviewer
)
report_url_querystring = urllib.parse.urlencode({"teams": team})
regression_rate = (
new_regressions / month_changes if month_changes > 0 else 0
)
new_regressions_section = f"""<b>NEW REGRESSIONS</b>
{new_regressions} new regressions ({new_crash_regressions} crashes) during the past two weeks. {fixed_new_regressions} of them were fixed, {unassigned_new_regressions} are still unassigned.
The regression rate (regressions from the past two weeks / changes from this month) is {round(regression_rate, 2)}"""
if team not in super_teams:
new_regressions_section += f""" ({"**higher** than" if regression_rate > median_regression_rate else "lower than" if median_regression_rate > regression_rate else "equal to"} the median of {round(median_regression_rate, 2)} across other similarly sized teams)"""
new_regressions_section += f""".
This week your team committed {high_risk_changes} high risk[^risk] changes, {"**more** than" if high_risk_changes > prev_high_risk_changes else "less than" if prev_high_risk_changes > high_risk_changes else "equal to"} {prev_high_risk_changes} from last week.
Based on historical information, your past week changes are likely to cause {predicted_regressions} regressions in the future.
[^risk]: The risk associated to changes is evaluated with a machine learning model trained on historical regressions. On average, more than 1 out of 3 high risk changes cause a regression.
Unfixed regressions from the past two weeks:
|Bug|Last Activity|Assignment|Notes|
|---|---|---|---|
{unfixed_regressions}
<br />
{median_fix_time_text}
{fix_time_diff}
90% of bugs were fixed within {ninth_decile_fix_time} days."""
carryover_regressions_section_title_texts = []
if len(s1_bugs) > 0:
carryover_regressions_section_title_texts.append("S1 BUGS")
if len(s2_bugs) > 0:
carryover_regressions_section_title_texts.append("S2 REGRESSIONS")
carryover_regressions_section_title_texts.append("CARRYOVER REGRESSIONS")
carryover_regressions_section = (
"<b>"
+ " AND ".join(carryover_regressions_section_title_texts)
+ f"""</b>
<br />
There are {carryover_regressions} carryover regressions in your team out of a total of {total_carryover_regressions} in Firefox, {"**increasing**" if carryover_regressions > prev_carryover_regressions else "reducing" if prev_carryover_regressions > carryover_regressions else "staying constant"} from {prev_carryover_regressions} you had last week.<br /><br />"""
)
carryover_regressions_section_list_texts = []
if len(s1_bugs) > 0:
carryover_regressions_section_list_texts.append("S1 bugs[^severity]")
if len(s2_bugs) > 0:
carryover_regressions_section_list_texts.append("S2 regressions")
if len(affecting_carryover_regressions) > 0:
carryover_text = "carryover regressions which are still tracked as affecting Release, Beta or Nightly"
if len(carryover_regressions_section_list_texts) == 0:
carryover_text = carryover_text.capitalize()
carryover_regressions_section_list_texts.append(carryover_text)
if (
len(s1_bugs) > 0
or len(s2_bugs)
or len(affecting_carryover_regressions) > 0
):
carryover_regressions_section += (
" and ".join(carryover_regressions_section_list_texts)
+ f""":
|Bug|Last Activity|Assignment|Notes|
|---|---|---|---|
{affecting_carryover_regressions_and_s1_s2_text}
[^severity]: Remember S1 bugs are defined as "(Catastrophic) Blocks development/testing, may impact more than 25% of users, causes data loss, potential chemspill, and no workaround available" (https://firefox-source-docs.mozilla.org/bug-mgmt/guides/severity.html). Please retriage as you see fit.
"""
)
crashes_section = """<b>CRASHES</b>
<br />
"""
top_nightly_crashes = get_top_crashes(team, "nightly")
if top_nightly_crashes is not None:
crashes_section += f"""Top recent Nightly crashes:
{top_nightly_crashes}"""
else:
crashes_section += "No crashes in the top 200 for Nightly."
top_release_crashes = get_top_crashes(team, "release")
if top_release_crashes is not None:
crashes_section += f"""
<br />Top recent Release crashes:
{top_release_crashes}"""
else:
crashes_section += "\nNo crashes in the top 200 for Release."
intermittent_failures_section = f"""<b>INTERMITTENT FAILURES</b>
<br />
Top intermittent failures from the past week:
|# of failures|Bug|Last Activity|Assignment|Notes|
|---|---|---|---|---|
{top_intermittent_failures}
There are {skipped_tests} tests skipped in some configurations"""
if team not in super_teams:
intermittent_failures_section += f""" ({"**higher** than" if skipped_tests > median_skipped_tests else "lower than" if median_skipped_tests > skipped_tests else "equal to"} the median across other teams, {round(median_skipped_tests)})"""
intermittent_failures_section += f""".
They are {"**increasing**" if skipped_tests > prev_skipped_tests else "reducing" if prev_skipped_tests > skipped_tests else "staying constant"} from {prev_skipped_tests} you had two weeks ago."""
test_coverage_section = f"""<b>TEST COVERAGE</b>
<br />
Total coverage for patches landing this past week was {patch_coverage}% ({"higher than" if patch_coverage > average_patch_coverage else "**lower** than" if average_patch_coverage > patch_coverage else "equal to"} the average across other teams, {average_patch_coverage}%)."""
if len(low_coverage_patches) > 0:
test_coverage_section += f"""<br />List of lowest coverage patches:
{low_coverage_patches}"""
review_section = f"""<b>REVIEW</b>
<br />
{median_first_review_time_text}
List of revisions that have been waiting for a review for longer than 3 days:
{slow_review_patches}"""
def calculate_maintenance_effectiveness(
period: relativedelta,
) -> dict[str, dict]:
start_date = datetime.utcnow() - period
if team in super_teams:
me_teams = super_teams[team]
else:
me_teams = [team]
return bugzilla.calculate_maintenance_effectiveness_indicator(
me_teams, start_date, datetime.utcnow()
)
def format_maintenance_effectiveness(period: relativedelta) -> str:
me = calculate_maintenance_effectiveness(period)
return "ME: {}%, WeightedBurnDownTime: {} y\n[Opened bugs]({})\n[Closed bugs]({})".format(
round(me["stats"]["ME"], 2),
round(me["stats"]["WBDTime"], 2),
me["queries"]["Opened"],
me["queries"]["Closed"],
)
maintenance_effectiveness_section = f"""<b>[MAINTENANCE EFFECTIVENESS](https://docs.google.com/document/d/1y2dUDZI5U3xvY0jMY1LfIDARc5b_QB9mS2DV7MWrfa0)</b>
<br />
Last week: {format_maintenance_effectiveness(relativedelta(weeks=1))}
Last month: {format_maintenance_effectiveness(relativedelta(months=1))}
Last 3 months: {format_maintenance_effectiveness(relativedelta(months=3))}
"""
sections = [
new_regressions_section,
carryover_regressions_section,
crashes_section,
intermittent_failures_section,
test_coverage_section,
review_section,
]
maintenance_effectiveness_teams = set(
json.loads(get_secret("MAINTENANCE_EFFECTIVENESS_TEAMS"))
)
if team in maintenance_effectiveness_teams:
sections.append(maintenance_effectiveness_section)
notification = (
"\n\n<br />\n<br />\n".join(sections)
+ f"""
Find the full report with fancy charts at [https://changes.moz.tools/team.html?{report_url_querystring}](https://changes.moz.tools/team.html?{report_url_querystring}).
Report bugs or enhancement requests on [https://github.com/mozilla/bugbug](https://github.com/mozilla/bugbug) or to [mcastelluccio@mozilla.com](mailto:mcastelluccio@mozilla.com).
"""
)
receivers = team_to_receivers[team]
logger.info("Sending email to %s", team)
from_email = sendgrid.helpers.mail.From(get_secret("NOTIFICATION_SENDER"))
to_emails = [sendgrid.helpers.mail.To(receivers[0])] + [
sendgrid.helpers.mail.Cc(receiver) for receiver in receivers[1:]
]
subject = sendgrid.helpers.mail.Subject(f"Quality report for '{team}'")
html_text = markdown2.markdown(notification, extras=["tables", "footnotes"])
html_content = sendgrid.helpers.mail.HtmlContent(style + html_text)
soup = bs4.BeautifulSoup(html_text)
plain_text = soup.get_text()
plain_text_content = sendgrid.helpers.mail.Content("text/plain", plain_text)
message = sendgrid.helpers.mail.Mail(
from_email, to_emails, subject, plain_text_content, html_content
)
response = send_grid_client.send(message=message)
logger.info("Status code: %s", response.status_code)
logger.info("Headers: %s", response.headers)
logger.info("Body: %s", response.body)
except Exception:
traceback.print_exc()
failure = True
if failure:
assert False, "There was at least one failure"