in bring-your-own-container/fairseq_translation/fairseq/train_driver.py [0:0]
def get_training_stats(trainer):
stats = collections.OrderedDict()
stats["loss"] = "{:.3f}".format(trainer.get_meter("train_loss").avg)
if trainer.get_meter("train_nll_loss").count > 0:
nll_loss = trainer.get_meter("train_nll_loss").avg
stats["nll_loss"] = "{:.3f}".format(nll_loss)
else:
nll_loss = trainer.get_meter("train_loss").avg
stats["ppl"] = get_perplexity(nll_loss)
stats["wps"] = round(trainer.get_meter("wps").avg)
stats["ups"] = "{:.1f}".format(trainer.get_meter("ups").avg)
stats["wpb"] = round(trainer.get_meter("wpb").avg)
stats["bsz"] = round(trainer.get_meter("bsz").avg)
stats["num_updates"] = trainer.get_num_updates()
stats["lr"] = trainer.get_lr()
stats["gnorm"] = "{:.3f}".format(trainer.get_meter("gnorm").avg)
stats["clip"] = "{:.0%}".format(trainer.get_meter("clip").avg)
stats["oom"] = trainer.get_meter("oom").avg
if trainer.get_meter("loss_scale") is not None:
stats["loss_scale"] = "{:.3f}".format(trainer.get_meter("loss_scale").avg)
stats["wall"] = round(trainer.get_meter("wall").elapsed_time)
stats["train_wall"] = round(trainer.get_meter("train_wall").sum)
return stats