def _get_model_performance_explanation_text()

in src/responsibleai/rai_analyse/_score_card/classification_components.py [0:0]


def _get_model_performance_explanation_text(metric, data):
    score_value = str(int(100 * data["metrics"][metric]))
    pos_label = data["pos_label"]
    neg_label = data["neg_label"]
    y_test_size = len(data["y_test"])
    if metric == "accuracy_score":
        tp = data["confusion_matrix"]["tp"]
        tn = data["confusion_matrix"]["tn"]
        total = len(data["y_pred"])
        return div(
            h3("Accuracy"),
            p(
                "{}% of data points have the correct prediction.<br>".format(
                    score_value
                )
            ),
            p(
                "Accuracy = correct predictions / all predictions<br>"
                "= ({} + {}) / {}".format(tp, tn, total)
            ),
        )
    elif metric == "recall_score":
        return div(
            h3("{}% Recall".format(score_value)),
            p(
                '{}% of data points that are actually "{}" are likely to be predicted as "{}"'.format(
                    score_value, pos_label, pos_label
                )
            ),
        )
    elif metric == "precision_score":
        return div(
            h3("{}% Precision".format(score_value)),
            p(
                '{}% of data points predicted as "{}", are likely to actually be "{}"'.format(
                    score_value, pos_label, neg_label
                )
            ),
        )
    elif metric == "false_negative":
        adjusted_score = int(round(100 * data["metrics"][metric] / y_test_size))
        return div(
            h3("{}% False Negative".format(adjusted_score)),
            p(
                '{}% of data points that are predicted as "{}" should have been predicted as "{}"'.format(
                    adjusted_score, neg_label, pos_label
                )
            ),
        )
    elif metric == "false_positive":
        adjusted_score = int(round(100 * data["metrics"][metric] / y_test_size))
        return div(
            h3("{}% False Positive".format(adjusted_score)),
            p(
                '{}% of data points that are predicted as "{}" should have been predicted as "{}"'.format(
                    adjusted_score, pos_label, neg_label
                )
            ),
        )
    else:
        return div()