in assets/responsibleai/tabular/components/src/_score_card/regression_components.py [0:0]
def get_fairlearn_page(data):
heading = div(
p(
"Understand your model's fairness issues "
"using group-fairness metrics across sensitive features and cohorts. "
"Pay particular attention to the cohorts who receive worse treatments "
"(predictions) by your model."
),
_class="left",
)
left_containers = []
main_containers = []
for f in data:
section = [h2('Feature "{}"'.format(f))]
feature_list = ul()
for i in data[f]["statistics"]:
feature_list.append(
li("{}: {}".format(data[f]["statistics"][i]["short_label"], i))
)
section.append(h3("Legends:"))
section.append(feature_list)
for metric_key, metric_details in data[f]["metrics"].items():
section.append(h3("{}:".format(metric_key)))
section.append(
p(
'"{}" has the highest {}: {}'.format(
metric_details["group_max"][0],
metric_key,
round(metric_details["group_max"][1], 2),
)
)
)
section.append(
p(
'"{}" has the lowest {}: {}'.format(
metric_details["group_min"][0],
metric_key,
round(metric_details["group_min"][1], 2),
)
)
)
if metric_details["kind"] == "difference":
section.append(
p(
"⇒ Maximum difference in {} is {}".format(
metric_key,
round(
metric_details["group_max"][1] - metric_details["group_min"][1],
2,
),
)
)
)
elif metric_details["kind"] == "ratio":
section.append(
p(
"⇒ Minimum ratio of {} is {}".format(
metric_key,
round(
metric_details["group_min"][1] / metric_details["group_max"][1],
2,
),
)
)
)
left_containers.append(div(section, _class="nobreak_div"))
# left_elems.append(div(metric_section, _class="nobreak_div"))
# left_container = div(left_elems, _class="left")
def get_fairness_box_plot(data):
box_plot_data = {"data": []}
for c in data:
box_plot_data["data"].append(
{
"label": data[c]["short_label"] + "<br>" + str(int(100 * data[c]["population"])) + "% n",
"datapoints": data[c]["y_pred"],
}
)
box_plot_data["data"] = list(reversed(box_plot_data["data"]))
png_base64 = cc.get_box_plot(box_plot_data)
return div(
img(_src="data:image/png;base64,{}".format(png_base64)),
_class="image_div",
)
def get_table_row(heading, data):
table_row_elems = []
table_row_elems.append(th(heading, _class="header_cell"))
for v in data:
table_row_elems.append(td(v, _class="cell"))
return tr(table_row_elems, _class="row")
def get_table(data):
metric_list = [d for d in data["metrics"]]
# metric_list = [m for sublist in metric_list for m in sublist]
horizontal_headings = [
"Average<br>Prediction",
"Average<br>Groundtruth",
] + [d.replace("_", " ").title().replace(" ", "<br>") for d in metric_list]
vertical_headings = list(data["statistics"].keys())
headings_td = [td(_class="header_cell")] + [
td(x, _class="header_cell") for x in horizontal_headings
]
headings = thead(tr(headings_td, _class="row"), _class="table-head")
rows_elems = []
for vh in vertical_headings:
row_data = [
round(mean(data["statistics"][vh]["y_pred"]), 2),
round(mean(data["statistics"][vh]["y_test"]), 2),
]
for m in metric_list:
row_data.append(round(data["metrics"][m]["group_metric"][vh], 2))
rows_elems.append(get_table_row(vh, row_data))
body = tbody(rows_elems, _class="table-body")
return table(headings, body, _class="table")
# prediction distribution
for f in data:
distribution = div(
h2('Feature "{}"'.format(f)),
h3("Prediction distribution chart"),
get_fairness_box_plot(data[f]["statistics"]),
_class="nobreak_div",
)
ctable = div(
h3("Analysis across cohorts"),
get_table(data[f]),
_class="nobreak_div",
)
main_containers.append(str(distribution) + str(ctable))
containers = []
for i in range(len(left_containers)):
containers.append(
str(
div(
div(left_containers[i], _class="left"),
div(main_containers[i], _class="main"),
_class="container",
)
)
)
return str(
div(
cc.get_page_divider("Fairness Assessment"),
heading,
_class="container nobreak_div",
)
) + "".join(containers)