prediction_generation/old-code/summarize_metrics_new_debug.py [232:277]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
        max_recall = None
    MethodsMeasurements[stripped_method].f1_best = max_f1
    MethodsMeasurements[stripped_method].precision_best = max_precision
    MethodsMeasurements[stripped_method].recall_best = max_recall
    MethodsMeasurements[stripped_method].precision_f1_best = precision_max_f1
    MethodsMeasurements[stripped_method].recall_f1_best = recall_max_f1


def process_oracle(method):
    metrics_dict = {'f1': [], 'recall': [], 'precision': []}
    stripped_method = method.replace("best_", "")
    for dataset_metrics in datasets_metrics:
        oracle_f1, oracle_precision, oracle_recall = -1, -1, -1
        for unit_method in dataset_metrics["results"][method]: 
            if unit_method["status"] == "SUCCESS":
                metrics = unit_method["scores"]
                f1 = metrics["f1"]
                precision = metrics["precision"]
                recall = metrics["recall"]
                if f1 > oracle_f1:
                    oracle_f1 = f1
                if precision > oracle_precision:
                    oracle_precision = precision
                if recall > oracle_recall:
                    oracle_recall = recall
        if oracle_f1 > -1:
            metrics_dict["f1"].append(oracle_f1)
        if oracle_precision > -1:
            metrics_dict["precision"].append(oracle_precision)
        if oracle_recall > -1:
            metrics_dict["recall"].append(oracle_recall)
    if len(metrics_dict["f1"]) > 0:
        MethodsMeasurements[stripped_method].f1_oracle = sum(metrics_dict["f1"]) / len(metrics_dict["f1"])
    if len(metrics_dict["precision"]) > 0:
        MethodsMeasurements[stripped_method].precision_oracle = sum(metrics_dict["precision"]) / len(metrics_dict["precision"])
    if len(metrics_dict["recall"]) > 0:
        MethodsMeasurements[stripped_method].recall_oracle = sum(metrics_dict["recall"]) / len(metrics_dict["recall"])


for method in default_methods:
    process_default(method)
for method in best_methods:
    process_oracle(method)
    process_best(method)

data = {key: vars(value) for key, value in MethodsMeasurements.items()}
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



prediction_generation/old-code/summarize_metrics_old.py [139:184]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
        max_recall = None
    MethodsMeasurements[stripped_method].f1_best = max_f1
    MethodsMeasurements[stripped_method].precision_best = max_precision
    MethodsMeasurements[stripped_method].recall_best = max_recall
    MethodsMeasurements[stripped_method].precision_f1_best = precision_max_f1
    MethodsMeasurements[stripped_method].recall_f1_best = recall_max_f1


def process_oracle(method):
    metrics_dict = {'f1': [], 'recall': [], 'precision': []}
    stripped_method = method.replace("best_", "")
    for dataset_metrics in datasets_metrics:
        oracle_f1, oracle_precision, oracle_recall = -1, -1, -1
        for unit_method in dataset_metrics["results"][method]: 
            if unit_method["status"] == "SUCCESS":
                metrics = unit_method["scores"]
                f1 = metrics["f1"]
                precision = metrics["precision"]
                recall = metrics["recall"]
                if f1 > oracle_f1:
                    oracle_f1 = f1
                if precision > oracle_precision:
                    oracle_precision = precision
                if recall > oracle_recall:
                    oracle_recall = recall
        if oracle_f1 > -1:
            metrics_dict["f1"].append(oracle_f1)
        if oracle_precision > -1:
            metrics_dict["precision"].append(oracle_precision)
        if oracle_recall > -1:
            metrics_dict["recall"].append(oracle_recall)
    if len(metrics_dict["f1"]) > 0:
        MethodsMeasurements[stripped_method].f1_oracle = sum(metrics_dict["f1"]) / len(metrics_dict["f1"])
    if len(metrics_dict["precision"]) > 0:
        MethodsMeasurements[stripped_method].precision_oracle = sum(metrics_dict["precision"]) / len(metrics_dict["precision"])
    if len(metrics_dict["recall"]) > 0:
        MethodsMeasurements[stripped_method].recall_oracle = sum(metrics_dict["recall"]) / len(metrics_dict["recall"])


for method in default_methods:
    process_default(method)
for method in best_methods:
    process_oracle(method)
    process_best(method)

data = {key: vars(value) for key, value in MethodsMeasurements.items()}
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



