def get_model_performance_data()

in src/responsibleai/rai_analyse/_score_card/_rai_insight_data.py [0:0]


    def get_model_performance_data(self):
        y_pred = self.data.get_y_pred()
        y_test = self.data.get_y_test()
        return_data = {"y_pred": y_pred, "y_test": y_test, "metrics": {}}

        report_metrics = self.config["Metrics"]

        if self.tasktype == "regression":
            return_data["y_error"] = list(map(lambda x, y: x - y, y_pred, y_test))
            report_metrics = ["mean_absolute_error", "mean_squared_error", "r2_score"]

            for metric in report_metrics:
                return_data["metrics"][metric] = get_metric(
                    metric, y_test, y_pred, **self.get_metric_kwargs()
                )

        if self.tasktype == "classification":
            tn, fp, fn, tp = skm.confusion_matrix(
                y_pred, y_test, labels=self.classes
            ).ravel()
            return_data["confusion_matrix"] = {"tn": tn, "fp": fp, "fn": fn, "tp": tp}
            return_data["classes"] = self.classes
            return_data["pos_label"] = self.pos_label
            return_data["neg_label"] = next(
                iter(set(self.classes) - set([self.pos_label]))
            )
            report_metrics = [
                "accuracy_score",
                "recall_score",
                "precision_score",
                "false_negative",
                "false_positive",
            ]

            for metric in report_metrics:
                return_data["metrics"][metric] = get_metric(
                    metric, y_test, y_pred, **self.get_metric_kwargs()
                )

        return_data["user_requested_metrics"] = self.config["Metrics"]
        return return_data