in metrics/super_glue/super_glue.py [0:0]
def _compute(self, predictions, references):
if self.config_name == "axb":
return {"matthews_correlation": matthews_corrcoef(references, predictions)}
elif self.config_name == "cb":
return acc_and_f1(predictions, references, f1_avg="macro")
elif self.config_name == "record":
dataset = [
{
"qas": [
{"id": ref["idx"]["query"], "answers": [{"text": ans} for ans in ref["answers"]]}
for ref in references
]
}
]
predictions = {pred["idx"]["query"]: pred["prediction_text"] for pred in predictions}
return evaluate_record(dataset, predictions)[0]
elif self.config_name == "multirc":
return evaluate_multirc(predictions, references)
elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]:
return {"accuracy": simple_accuracy(predictions, references)}
else:
raise KeyError(
"You should supply a configuration name selected in "
'["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]'
)