def fmt_treatment_badge()

in analysis/render.py [0:0]


def fmt_treatment_badge(row: pd.Series) -> str:
    """Format a treatment effect as a badge"""
    effect = row["TreatmentEffect"]
    pvalue = row["PValue"]
    metric_type = row["MetricType"]
    reldiff = row["RelativeDifference"]
    value = row["TreatmentMetricValue"]

    if effect in ["Improved", "Degraded", "Changed"]:
        if pvalue <= HSS_THRESHOLD:
            color = f"{effect}Strong"
            tooltip_stat = "Highly statistically significant"
        elif pvalue <= SS_THRESHOLD:
            color = f"{effect}Weak"
            tooltip_stat = "Marginally statistically significant"
        else:
            color = "Warning"
            tooltip_stat = "Unexpected classification"
        tooltip_stat += f" (p-value: {fmt_pvalue(pvalue)})."
    elif effect == "Inconclusive":
        if pvalue > SS_THRESHOLD:
            color = effect
            tooltip_stat = "Not statistically significant"
        else:
            color = "Warning"
            tooltip_stat = "Unexpected classification"
        tooltip_stat += f" (p-value: {fmt_pvalue(pvalue)})."
    elif effect == "Too few samples":
        color = "Warning"
        tooltip_stat = "Insufficient observations to determine statistical significance"
    elif effect == "Zero samples":
        color = "Warning"
        tooltip_stat = (
            "Zero observations might indicate a problem with "
            "the metric definition or data collection"
        )
    else:
        color = PALE_GREY
        tooltip_stat = ""

    tooltip_value = f"Metric value = {fmt_metric_value(value, metric_type)}"
    if metric_type in ["EventCount", "UserCount", "Sum"]:
        tooltip_value += " (comparison accounts for unequal allocation)"
    tooltip_value += "."

    tooltip = "\n".join([tooltip_value, tooltip_stat])
    return fmt_badge(effect, fmt_reldiff(reldiff), color, tooltip)