in custom/baseline_cross_entropy.py [0:0]
def aggregate_logging_outputs(logging_outputs):
"""Aggregate logging outputs from data parallel training."""
loss_sum = sum(log.get('loss', 0) for log in logging_outputs)
ntokens = sum(log.get('ntokens', 0) for log in logging_outputs)
nsentences = sum(log.get('nsentences', 0) for log in logging_outputs)
sample_size = sum(log.get('sample_size', 0) for log in logging_outputs)
agg_output = {
'loss': loss_sum / sample_size / math.log(2) if sample_size > 0 else 0.,
'ntokens': ntokens,
'nsentences': nsentences,
'sample_size': sample_size,
}
from fairseq.custom.metrics import TrainingMetrics
custom_output = TrainingMetrics.aggregate_and_normalize(logging_outputs)
for k, v in custom_output.items():
agg_output[k] = v
if sample_size != ntokens:
agg_output['nll_loss'] = loss_sum /ntokens / math.log(2) if ntokens > 0 else 0.
return agg_output