def forward()

in custom/candidate_penalty_ce_loss.py [0:0]


    def forward(self, model, sample, reduce=True, compute_custom_metrics=True):
        net_output = model(**sample['net_input'])
        target = model.get_targets(sample, net_output)
        nsentences = target.size(0)
        target = target.view(-1)

        # -- mle loss
        lprobs = model.get_normalized_probs(net_output, log_probs=True)
        lprobs = lprobs.view(-1, lprobs.size(-1))
        true_token_lprobs = F.nll_loss(
            lprobs,
            target,
            ignore_index=self.padding_idx,
            reduction='none',
        )
        mle_loss = true_token_lprobs.sum()

        # -- custom loss
        # Maximize (1 - p(x_nt)) for negative target tokens x_nt (equivalently minimize -log(1-p(x_nt)))

        # - form negative targets
        with torch.no_grad():
            # E.g. DABCC | D | EFFGD => {A,B,C} are negative targets.
            if self.candidate_type == 'prev_context':
                # Make 'the triangle'.
                ctx_cands = target.unsqueeze(0).expand(target.size(0), target.size(0))
                ctx_cands_ = (ctx_cands.tril(-1) + self.padding_idx)
                ctx_cands_ = ctx_cands_ * ctx_cands_.triu()
                ctx_cands = ctx_cands.tril(-1) + ctx_cands_

                # Don't include the target for that timestep as a negative target.
                ctx_cands = ctx_cands.masked_fill(ctx_cands == target.unsqueeze(1), self.padding_idx)
                negative_targets = torch.zeros_like(lprobs).scatter_(1, ctx_cands, 1)
            else:
                raise NotImplementedError('candidate type %s' % self.candidate_type)

        # - compute loss
        one_minus_probs = torch.clamp((1.0 - lprobs.exp()), min=1e-5)

        custom_loss = -torch.log(one_minus_probs)*negative_targets
        custom_loss = custom_loss.sum()

        loss = mle_loss + self.rank_alpha * custom_loss

        # -- metrics
        logits = net_output[0].view(-1, net_output[0].size(-1))
        true_token_logits = -F.nll_loss(
            logits,
            target,
            ignore_index=self.padding_idx,
            reduction='none',
        )

        orig = utils.strip_pad(target, self.padding_idx)
        ntokens = orig.numel()
        sample_size = sample['target'].size(0) if self.args.sentence_avg else ntokens

        logging_output = {
            'custom_loss': utils.item(custom_loss.data),
            'loss': utils.item(mle_loss.data),
            'ntokens': ntokens,
            'nsentences': nsentences,
            'sample_size': sample_size,
        }
        if compute_custom_metrics:
            custom_output = TrainingMetrics.ranking_metrics(logits, true_token_logits, sample, ntokens, target)
            for k, v in custom_output.items():
                logging_output[k] = v

        return loss, sample_size, logging_output