def process_best()

in prediction_postprocessing_scripts/handpick_best.py [0:0]


def process_best(method):
    hyperparams = dict()
    stripped_method = method.replace("best_", "")
    for dataset_metrics in datasets_metrics:
        for unit_method in dataset_metrics["results"][method]:
            conf = unit_method["args"]
            conf_str = json.dumps(conf, sort_keys=True)
            if unit_method["status"] == "SUCCESS":
                metrics = unit_method["scores"]
                f1 = metrics["f1"]
                precision = metrics["precision"]
                recall = metrics["recall"]
                if conf_str in hyperparams:
                    hyperparams[conf_str]["f1"].append(f1)
                    hyperparams[conf_str]["precision"].append(precision)
                    hyperparams[conf_str]["recall"].append(recall)
                else:
                    metrics_dict = {
                        "f1": [f1],
                        "precision": [precision],
                        "recall": [recall]
                    }
                    hyperparams[conf_str] = metrics_dict

    dict_f1 = {key: sum(value['f1']) / len(value['f1']) for key, value in hyperparams.items() if len(value['f1']) > nb_datasets_threshold}
    dict_precision = {key: sum(value['precision']) / len(value['precision']) for key, value in hyperparams.items() if len(value['precision']) > nb_datasets_threshold}
    dict_recall = {key: sum(value['recall']) / len(value['recall']) for key, value in hyperparams.items() if len(value['recall']) > nb_datasets_threshold}

    try:
        max_f1 = dict_f1[max(dict_f1, key=dict_f1.get)]
    except Exception as e:
        max_f1 = None
    try:
        precision_max_f1 = dict_precision[max(dict_f1, key=dict_f1.get)]
    except Exception as e:
        precision_max_f1 = None
    try:
        recall_max_f1 = dict_recall[max(dict_f1, key=dict_f1.get)]
    except Exception as e:
        recall_max_f1 = None
    try:
        max_precision = dict_precision[max(dict_precision, key=dict_precision.get)]
    except Exception as e:
        max_precision = None
    try:
        max_recall = dict_recall[max(dict_recall, key=dict_recall.get)]
    except Exception as e:
        max_recall = None
    MethodsMeasurements[stripped_method].f1_best = max_f1
    MethodsMeasurements[stripped_method].precision_best = max_precision
    MethodsMeasurements[stripped_method].recall_best = max_recall
    MethodsMeasurements[stripped_method].precision_f1_best = precision_max_f1
    MethodsMeasurements[stripped_method].recall_f1_best = recall_max_f1
    if max_f1:
        best_f1_conf = max(dict_f1, key=dict_f1.get)
        for dataset_metrics in datasets_metrics:
            signature_id = dataset_metrics["dataset"]
            best_conf_file_names = [conf["task_file"] for conf in dataset_metrics["results"][method] if json.dumps(conf["args"], sort_keys=True)  == best_f1_conf]
            for file_name in best_conf_file_names:
                best_paths.append(signature_id + "/" + method + "/" + file_name)