in scripts/adapet/ADAPET/src/eval/eval_model.py [0:0]
def dev_eval(config, model, batcher, num_batches, dict_avg_val=None):
'''
Evaluates the accuracy on the dev partition
:param config:
:param model:
:param batcher: batcher to get batches of data
:param num_batches:
:param dict_avg_val: dictionary storing metrics
:return: currrent dev score
'''
dict_eval = {}
dict_eval["num_batches"] = num_batches
if dict_avg_val is not None:
dict_eval.update(dict_avg_val)
# Get train Score
if config.eval_train:
train_scorer = Scorer(config, config.dataset)
train_iter = batcher.get_eval_train_batch()
eval(config, model, train_iter, train_scorer)
_, train_scores = train_scorer.get_score("train")
dict_eval.update(train_scores)
# Get dev Score
if config.eval_dev:
dev_scorer = Scorer(config, config.dataset)
dev_iter = batcher.get_dev_batch()
eval(config, model, dev_iter, dev_scorer)
score_eval, dev_scores = dev_scorer.get_score("dev")
dict_eval.update(dev_scores)
dev_logits = dev_scorer.get_logits()
else:
score_eval = 0
dev_logits = None
with open(config.dev_score_file, 'a+') as f_out:
f_out.write(json.dumps(dict_eval))
f_out.write('\n')
return score_eval, dev_logits