in prediction_generation/summarize_metrics.py [0:0]
def process_oracle(method):
metrics_dict = {'f1': [], 'recall': [], 'precision': []}
stripped_method = method.replace("best_", "")
for dataset_metrics in datasets_metrics:
oracle_f1, oracle_precision, oracle_recall = -1, -1, -1
for unit_method in dataset_metrics["results"][method]:
if unit_method["status"] == "SUCCESS":
metrics = unit_method["scores"]
f1 = metrics["f1"]
precision = metrics["precision"]
recall = metrics["recall"]
if f1 > oracle_f1:
oracle_f1 = f1
if precision > oracle_precision:
oracle_precision = precision
if recall > oracle_recall:
oracle_recall = recall
if oracle_f1 > -1:
metrics_dict["f1"].append(oracle_f1)
if oracle_precision > -1:
metrics_dict["precision"].append(oracle_precision)
if oracle_recall > -1:
metrics_dict["recall"].append(oracle_recall)
if len(metrics_dict["f1"]) > 0:
MethodsMeasurements[stripped_method].f1_oracle = sum(metrics_dict["f1"]) / len(metrics_dict["f1"])
if len(metrics_dict["precision"]) > 0:
MethodsMeasurements[stripped_method].precision_oracle = sum(metrics_dict["precision"]) / len(metrics_dict["precision"])
if len(metrics_dict["recall"]) > 0:
MethodsMeasurements[stripped_method].recall_oracle = sum(metrics_dict["recall"]) / len(metrics_dict["recall"])