pytorch_translate/char_source_hybrid.py [262:356]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
        )

        # Variable tracker
        self.tracker = VariableTracker()
        # Initialize adversarial mode
        self.set_gradient_tracking_mode(False)
        self.set_embed_noising_mode(False)

        # disables sorting and word-length thresholding if True
        # (enables ONNX tracing of length-sorted input with batch_size = 1)
        self.onnx_export_model = False

    def prepare_for_onnx_export_(self):
        self.onnx_export_model = True

    def set_gradient_tracking_mode(self, mode=True):
        """This allows AdversarialTrainer to turn on retrain_grad when
        running adversarial example generation model."""
        self.tracker.reset()
        self.track_gradients = mode

    def set_embed_noising_mode(self, mode=True):
        """This allows adversarial trainer to turn on and off embedding noising
        layers. In regular training, this mode is off, and it is not included
        in forward pass.
        """
        self.embed_noising_mode = mode

    def forward(self, src_tokens, src_lengths, char_inds, word_lengths):
        self.tracker.reset()
        # char_inds has shape (batch_size, max_words_per_sent, max_word_len)
        bsz, seqlen, maxchars = char_inds.size()
        # char_cnn_encoder takes input (max_word_length, total_words)
        char_inds_flat = char_inds.view(-1, maxchars).t()
        # output (total_words, encoder_dim)
        char_cnn_output = self.char_cnn_encoder(char_inds_flat)
        x = char_cnn_output.view(bsz, seqlen, char_cnn_output.shape[-1])
        x = x.transpose(0, 1)  # (seqlen, bsz, char_cnn_output_dim)
        x = self.char_layer_norm(x)
        x = self.char_scale * x

        embedded_tokens = self.embed_tokens(src_tokens)
        # (seqlen, bsz, token_embed_dim)
        embedded_tokens = embedded_tokens.transpose(0, 1)
        embedded_tokens = self.word_layer_norm(embedded_tokens)
        embedded_tokens = self.word_scale * embedded_tokens
        x = torch.cat([x, embedded_tokens], dim=2)

        self.tracker.track(x, "token_embeddings", retain_grad=self.track_gradients)

        # T x B x C -> B x T x C
        x = x.transpose(0, 1)

        if self.word_to_transformer_embed is not None:
            x = self.word_to_transformer_embed(x)
        positions = self.embed_positions(src_tokens)
        x += positions
        x = F.dropout(x, p=self.dropout, training=self.training)
        # B x T x C -> T x B x C
        x = x.transpose(0, 1)
        # compute padding mask (B x T)
        encoder_padding_mask = src_tokens.eq(self.padding_idx)
        if not encoder_padding_mask.any():
            encoder_padding_mask = None

        x = self.transformer_encoder_given_embeddings(
            x=x, positions=positions, encoder_padding_mask=encoder_padding_mask
        )

        # tracing requires a tensor value
        if self.onnx_export_model and encoder_padding_mask is None:
            encoder_padding_mask = torch.Tensor([]).type_as(src_tokens)

        return x, src_tokens, encoder_padding_mask

    def reorder_encoder_out(self, encoder_out, new_order):
        (x, src_tokens, encoder_padding_mask) = encoder_out
        if x is not None:
            x = x.index_select(1, new_order)
        if src_tokens is not None:
            src_tokens = src_tokens.index_select(0, new_order)
        if encoder_padding_mask is not None:
            encoder_padding_mask = encoder_padding_mask.index_select(0, new_order)
        return (x, src_tokens, encoder_padding_mask)

    def max_positions(self):
        """Maximum input length supported by the encoder."""
        return self.embed_positions.max_positions

    def upgrade_state_dict(self, state_dict):
        if isinstance(self.embed_positions, SinusoidalPositionalEmbedding):
            if "encoder.embed_positions.weights" in state_dict:
                del state_dict["encoder.embed_positions.weights"]
            state_dict["encoder.embed_positions._float_tensor"] = torch.FloatTensor(1)
        return state_dict
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



pytorch_translate/char_source_transformer_model.py [264:357]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
        )

        # Variable tracker
        self.tracker = VariableTracker()
        # Initialize adversarial mode
        self.set_gradient_tracking_mode(False)
        self.set_embed_noising_mode(False)

        # disables sorting and word-length thresholding if True
        # (enables ONNX tracing of length-sorted input with batch_size = 1)
        self.onnx_export_model = False

    def prepare_for_onnx_export_(self):
        self.onnx_export_model = True

    def set_gradient_tracking_mode(self, mode=True):
        """This allows AdversarialTrainer to turn on retrain_grad when
        running adversarial example generation model."""
        self.tracker.reset()
        self.track_gradients = mode

    def set_embed_noising_mode(self, mode=True):
        """This allows adversarial trainer to turn on and off embedding noising
        layers. In regular training, this mode is off, and it is not included
        in forward pass.
        """
        self.embed_noising_mode = mode

    def forward(self, src_tokens, src_lengths, char_inds, word_lengths):
        self.tracker.reset()
        # char_inds has shape (batch_size, max_words_per_sent, max_word_len)
        bsz, seqlen, maxchars = char_inds.size()
        # char_cnn_encoder takes input (max_word_length, total_words)
        char_inds_flat = char_inds.view(-1, maxchars).t()
        # output (total_words, encoder_dim)
        char_cnn_output = self.char_cnn_encoder(char_inds_flat)
        x = char_cnn_output.view(bsz, seqlen, char_cnn_output.shape[-1])
        x = x.transpose(0, 1)  # (seqlen, bsz, char_cnn_output_dim)
        x = self.char_layer_norm(x)
        x = self.char_scale * x

        embedded_tokens = self.embed_tokens(src_tokens)
        # (seqlen, bsz, token_embed_dim)
        embedded_tokens = embedded_tokens.transpose(0, 1)
        embedded_tokens = self.word_layer_norm(embedded_tokens)
        embedded_tokens = self.word_scale * embedded_tokens
        x = torch.cat([x, embedded_tokens], dim=2)

        self.tracker.track(x, "token_embeddings", retain_grad=self.track_gradients)

        # T x B x C -> B x T x C
        x = x.transpose(0, 1)

        if self.word_to_transformer_embed is not None:
            x = self.word_to_transformer_embed(x)
        positions = self.embed_positions(src_tokens)
        x += positions
        x = F.dropout(x, p=self.dropout, training=self.training)
        # B x T x C -> T x B x C
        x = x.transpose(0, 1)
        # compute padding mask (B x T)
        encoder_padding_mask = src_tokens.eq(self.padding_idx)
        if not encoder_padding_mask.any():
            encoder_padding_mask = None

        x = self.transformer_encoder_given_embeddings(
            x=x, positions=positions, encoder_padding_mask=encoder_padding_mask
        )

        if self.onnx_export_model and encoder_padding_mask is None:
            encoder_padding_mask = torch.Tensor([]).type_as(src_tokens)

        return x, src_tokens, encoder_padding_mask

    def reorder_encoder_out(self, encoder_out, new_order):
        (x, src_tokens, encoder_padding_mask) = encoder_out
        if x is not None:
            x = x.index_select(1, new_order)
        if src_tokens is not None:
            src_tokens = src_tokens.index_select(0, new_order)
        if encoder_padding_mask is not None:
            encoder_padding_mask = encoder_padding_mask.index_select(0, new_order)
        return (x, src_tokens, encoder_padding_mask)

    def max_positions(self):
        """Maximum input length supported by the encoder."""
        return self.embed_positions.max_positions

    def upgrade_state_dict(self, state_dict):
        if isinstance(self.embed_positions, SinusoidalPositionalEmbedding):
            if "encoder.embed_positions.weights" in state_dict:
                del state_dict["encoder.embed_positions.weights"]
            state_dict["encoder.embed_positions._float_tensor"] = torch.FloatTensor(1)
        return state_dict
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



