in lmms_eval/tasks/mathvista/mathvista_evals.py [0:0]
def normalize_extracted_answer(self, extraction, choices, question_type, answer_type, precision):
"""
Normalize the extracted answer to match the answer type
"""
if question_type == "multi_choice":
# make sure the extraction is a string
if isinstance(extraction, str):
extraction = extraction.strip()
else:
try:
extraction = str(extraction)
except:
extraction = ""
# extract "A" from "(A) text"
letter = re.findall(r"\(([a-zA-Z])\)", extraction)
if len(letter) > 0:
extraction = letter[0].upper()
options = [chr(ord("A") + i) for i in range(len(choices))]
if extraction in options:
# convert option letter to text, e.g. "A" -> "text"
ind = options.index(extraction)
extraction = choices[ind]
else:
# select the most similar option
extraction = self.get_most_similar(extraction, choices)
assert extraction in choices
elif answer_type == "integer":
try:
extraction = str(int(float(extraction)))
except:
extraction = None
elif answer_type == "float":
try:
extraction = str(round(float(extraction), precision))
except:
extraction = None
elif answer_type == "list":
try:
extraction = str(extraction)
except:
extraction = None
return extraction