in main.py [0:0]
def evaluate_trainer(trainer, args, datasets, is_trnsf, name, **kwargs):
"""Evaluate trainers on the train and test dataset."""
logger.info(f"Evaluating {name} ...")
if is_trnsf:
evaluator = eval_trnsf
else:
evaluator = eval_corr_gen if args.is_correlation else eval_clf
chckpnt_paths = get_chckpnt_paths(args, name)
tensorboard_dir = get_tensorboard_paths(args, name)
trainers = {"best": trainer}
is_append = False
for epoch, trainer in trainers.items():
# Test Evaluation
eval_trainer_log(
trainer,
datasets["test"],
args.csv_score_pattern,
chckpnt_paths,
dict(args.hyperparameters),
evaluator=evaluator,
tensorboard_dir=tensorboard_dir,
epoch=epoch,
is_append=is_append,
**kwargs,
)
# only create the field for the first time you log
is_append = True
# evaluation should be made on the training without any addition (e.g. anti generalization)
train_data = datasets.get("train_unmodified", datasets["train"])
# Train Evaluation
eval_trainer_log(
trainer,
train_data,
args.csv_score_pattern,
chckpnt_paths,
dict(args.hyperparameters),
evaluator=evaluator,
tensorboard_dir=None,
is_append=is_append,
mode="train",
file_clf_rep="train_" + FILE_CLF_REP,
epoch=epoch,
**kwargs,
)