in src/responsibleai/rai_analyse/_score_card/common_components.py [0:0]
def get_model_overview(data):
model_left_items = []
if "ModelSummary" not in data:
raise UserConfigValidationException(
"Invalid model config data, expecting key ModelSummary to exist in the model config data."
)
else:
model_left_items.append(div(h3("Purpose"), p(data["ModelSummary"])))
if data["ModelType"] == "binary_classification":
model_left_items.append(
div(
p(
"Classification: {} vs {}".format(
data["classes"][0], data["classes"][1]
)
)
)
)
else:
model_left_items.append(
div(p("This is a {} model.".format(data["ModelType"].lower())))
)
model_left_items.append(
div(
h3("Model evaluation"),
p(
"This model is evaluated on a test set with {} datapoints.".format(
len(data["y_test"])
)
),
)
)
model_overview_left_container = div(model_left_items, _class="left_model_overview")
model_main_items = []
model_main_items.extend(
[
h3("Target values"),
p(
"Here are your defined target values for your model "
"performance and/or other model assessment parameters:"
),
]
)
metric_targets_elems = []
for item in data["metrics_targets"]:
metric_targets_elems.append(li(item))
model_main_items.append(
div(
ul(metric_targets_elems),
_style="border: 2px solid black; border-radius: 5px;",
)
)
model_overview_main_container = div(model_main_items, _class="main_model_overview")
heading = [h1(data["ModelName"])]
if data["runinfo"]:
heading.append(
p(
"Generated by {} on {}".format(
data["runinfo"]["submittedBy"], data["runinfo"]["startTimeUtc"]
)
)
)
heading.append(
p(
"Source RAI dashboard: ",
a(
data["runinfo"]["dashboard_title"],
_href=data["runinfo"]["dashboard_link"],
),
)
)
heading.append(p(f"Model id: {data['runinfo']['model_id']}"))
model_overview_container = div(
div(heading, _class="header"),
get_page_divider("Model Summary"),
model_overview_left_container,
model_overview_main_container,
_class="container",
)
return model_overview_container