in src/responsibleai/rai_analyse/create_score_card.py [0:0]
def main(args):
dashboard_info = load_dashboard_info_file(args.rai_insights_dashboard)
_logger.info("Constructor info: {0}".format(dashboard_info))
insight_data = RaiInsightData(args.rai_insights_dashboard)
with open(args.pdf_generation_config, "r") as json_file:
config = json.load(json_file)
if args.predefined_cohorts_json:
with open(args.predefined_cohorts_json, "r") as json_file:
cohorts_definition = json.load(json_file)
cohorts_map = {
c["name"]: c["cohort_filter_list"] for c in cohorts_definition
}
config["cohorts_definition"] = cohorts_map
config = validate_and_correct_config(config, insight_data)
for k, v in config["Metrics"].items():
if "threshold" in v.keys():
tt, ta = parse_threshold(v["threshold"])
if tt and ta:
config["Metrics"][k]["threshold"] = (tt, ta)
else:
config["Metrics"][k].pop("threshold")
if not args.local:
run = Run.get_context()
run_details = run.get_details()
ws = run.experiment.workspace
wsid = f"/subscriptions/{ws.subscription_id}/resourceGroups/{ws.resource_group}/\
providers/Microsoft.MachineLearningServices/workspaces/{ws.name}"
dashboard_link = "https://ml.azure.com/model/analysis/{}/{}/?wsid={}".format(
dashboard_info[DashboardInfo.RAI_INSIGHTS_MODEL_ID_KEY],
dashboard_info[DashboardInfo.RAI_INSIGHTS_GATHER_RUN_ID_KEY],
wsid,
)
if "startTimeUtc" not in run_details:
# Get UTC from python datetime module if this is not available from run details
startTimeUtc = datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%S.%fZ')
else:
startTimeUtc = run_details["startTimeUtc"]
config["runinfo"] = {
"submittedBy": run_details["submittedBy"],
"startTimeUtc": startTimeUtc,
"dashboard_link": dashboard_link,
"model_id": dashboard_info[DashboardInfo.RAI_INSIGHTS_MODEL_ID_KEY],
"dashboard_title": dashboard_info[
DashboardInfo.RAI_INSIGHTS_DASHBOARD_TITLE_KEY
],
}
if config["Model"]["ModelType"].lower() == "regression":
wf = Workflow(insight_data, config, args, RegressionComponents)
elif config["Model"]["ModelType"].lower() in ("classification", "multiclass"):
wf = Workflow(insight_data, config, args, ClassificationComponents)
else:
raise ValueError(
"Model type {} cannot be matched to a score card generation workflow".format(
config["Model"]["ModelType"]
)
)
wf.generate_pdf()
if not args.local:
add_properties_to_gather_run(
dashboard_info, {"ScoreCardTitle": config["Model"]["ModelName"]}
)
run.upload_folder("scorecard", args.pdf_output_path)