in src/responsibleai/rai_analyse/_score_card/_rai_insight_data.py [0:0]
def get_fairlearn_data(self):
fairness_config = self.config["Fairness"]
fm = {}
dataset = self.data.get_test()
for sensitive_feature in fairness_config["sensitive_features"]:
fm[sensitive_feature] = {}
fm[sensitive_feature]["metrics"] = {}
fm[sensitive_feature]["statistics"] = {}
topnlabels = (
dataset[sensitive_feature].value_counts().nlargest(20).index.to_list()
)
for metric in fairness_config["metric"]:
gm = self.data.get_fairlearn_grouped_metric(
metric, sensitive_feature, self.pos_label, self.classes
)
fm_lookup = {
"difference": gm.difference(method="between_groups"),
"ratio": gm.ratio(),
}
gmd = gm.by_group.to_dict()
gmd = {k: gmd[k] for k in topnlabels}
sorted_group_metric = sorted(gmd.items(), key=lambda x: x[1])
fm[sensitive_feature]["metrics"][metric] = {
"kind": fairness_config["fairness_evaluation_kind"],
"value": fm_lookup[fairness_config["fairness_evaluation_kind"]],
"group_metric": OrderedDict(sorted_group_metric),
"group_max": next(iter(sorted_group_metric[-1:]), None),
"group_min": next(iter(sorted_group_metric[:1]), None),
}
feature_statistics = dict(
self.data.get_feature_statistics(sensitive_feature)
)
fl_short_label_generator = AlphabetLabelIterator()
for k, _ in feature_statistics.items():
filtermap = self.data.get_test()[sensitive_feature] == k
cohort_data = self.data.get_cohort_data(filtermap)
cohort_data["short_label"] = next(fl_short_label_generator)
cohort_data["pos_label"] = self.pos_label
fm[sensitive_feature]["statistics"][k] = cohort_data
return fm