def forward()

in custom/baseline_cross_entropy.py [0:0]


    def forward(self, model, sample, reduce=True, compute_custom_metrics=True):
        """Compute the loss for the given sample.

        Returns a tuple with three elements:
        1) the loss
        2) the sample size, which is used as the denominator for the gradient
        3) logging outputs to display while training
        """
        net_output = model(**sample['net_input'])
        logits = net_output[0].view(-1, net_output[0].size(-1))
        target = model.get_targets(sample, net_output)
        target = target.view(-1)
        loss, _ = self.compute_loss(model, net_output, sample, reduce=reduce)
        sample_size = sample['target'].size(0) if self.args.sentence_avg else sample['ntokens']

        true_token_logits = -F.nll_loss(
            logits,
            target,
            ignore_index=self.padding_idx,
            reduction='none',
        )
        orig = utils.strip_pad(target, self.padding_idx)
        ntokens = orig.numel()

        logging_output = {
            'loss': utils.item(loss.data) if reduce else loss.data,
            'ntokens': sample['ntokens'],
            'nsentences': sample['target'].size(0),
            'sample_size': sample_size,
        }
        if compute_custom_metrics:
            custom_output = TrainingMetrics.ranking_metrics(logits, true_token_logits, sample, ntokens, target)
            for k, v in custom_output.items():
                logging_output[k] = v
        return loss, sample_size, logging_output