src/evaluator.py [459:470]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
            alen = torch.arange(len2.max(), dtype=torch.long, device=len2.device)
            pred_mask = (
                alen[:, None] < len2[None] - 1
            )  # do not predict anything given the last target word
            y = x2[1:].masked_select(pred_mask[:-1])
            assert len(y) == (len2 - 1).sum().item()

            # optionally truncate input
            x1_, len1_ = x1, len1

            # cuda
            x1_, len1_, x2, len2, y = to_cuda(x1_, len1_, x2, len2, y)
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



src/evaluator.py [594:605]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
            alen = torch.arange(len2.max(), dtype=torch.long, device=len2.device)
            pred_mask = (
                alen[:, None] < len2[None] - 1
            )  # do not predict anything given the last target word
            y = x2[1:].masked_select(pred_mask[:-1])
            assert len(y) == (len2 - 1).sum().item()

            # optionally truncate input
            x1_, len1_ = x1, len1

            # cuda
            x1_, len1_, x2, len2, y = to_cuda(x1_, len1_, x2, len2, y)
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



