in src/responsibleai/rai_analyse/_score_card/classification_components.py [0:0]
def get_model_performance_page(data):
left_metric_elems = [p("Observe evidence of your model performance here:")]
def get_metric_bar_plot(mname, data):
y_0_filtermap = [
True if i == data["classes"][0] else False for i in data["y_test"]
]
y_1_filtermap = [
True if i == data["classes"][1] else False for i in data["y_test"]
]
class_0_metric = get_metric(
mname,
data["y_test"][y_0_filtermap],
data["y_pred"][y_0_filtermap],
pos_label=data["classes"][0],
labels=data["classes"],
)
class_1_metric = get_metric(
mname,
data["y_test"][y_1_filtermap],
data["y_pred"][y_1_filtermap],
pos_label=data["classes"][1],
labels=data["classes"],
)
y_data = [
'Acutal "{}"'.format(data["classes"][0]),
'Acutal "{}"'.format(data["classes"][1]),
]
x_data = [int(class_0_metric * 100), int(class_1_metric * 100)]
x_data = [[x, 100 - x] for x in x_data]
legend = [m]
tickvals = [0, 25, 50, 75, 100]
ticktext = [str(x) + "%" for x in tickvals]
png_base64 = cc.get_bar_plot(
y_data,
x_data,
legend=legend,
tickvals=tickvals,
ticktext=ticktext,
tickappend="%",
)
return div(
img(_src="data:image/png;base64,{}".format(png_base64)),
_class="image_div",
)
main_elems = []
main_elems.append(_get_confusion_matrix_grid(data))
for m in data["metrics"]:
left_metric_elems.append(_get_model_performance_explanation_text(m, data))
left_container = div(left_metric_elems, _class="left")
main_container = div(main_elems, _class="main")
return str(
div(
cc.get_page_divider("Model Performance"),
left_container,
main_container,
_class="container",
)
)