in ubteacher/engine/hooks.py [0:0]
def _do_loss_eval(self):
record_acc_dict = {}
with inference_context(self._model), torch.no_grad():
for _, inputs in enumerate(self._data_loader):
record_dict = self._get_loss(inputs, self._model)
# accumulate the losses
for loss_type in record_dict.keys():
if loss_type not in record_acc_dict.keys():
record_acc_dict[loss_type] = record_dict[loss_type]
else:
record_acc_dict[loss_type] += record_dict[loss_type]
# average
for loss_type in record_acc_dict.keys():
record_acc_dict[loss_type] = record_acc_dict[loss_type] / len(
self._data_loader
)
# divide loss and other metrics
loss_acc_dict = {}
for key in record_acc_dict.keys():
if key[:4] == "loss":
loss_acc_dict[key] = record_acc_dict[key]
# only output the results of major node
if comm.is_main_process():
total_losses_reduced = sum(loss for loss in loss_acc_dict.values())
self.trainer.storage.put_scalar(
"val_total_loss_val" + self._model_name, total_losses_reduced
)
record_acc_dict = {
"val_" + k + self._model_name: record_acc_dict[k]
for k in record_acc_dict.keys()
}
if len(record_acc_dict) > 1:
self.trainer.storage.put_scalars(**record_acc_dict)