ludwig/encoders/sequence_encoders.py [1428:1492]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
            bidirectional=bidirectional,
            activation=activation,
            recurrent_activation=recurrent_activation,
            use_bias=use_bias,
            unit_forget_bias=unit_forget_bias,
            weights_initializer=weights_initializer,
            recurrent_initializer=recurrent_initializer,
            bias_initializer=bias_initializer,
            weights_regularizer=weights_regularizer,
            recurrent_regularizer=recurrent_regularizer,
            bias_regularizer=bias_regularizer,
            activity_regularizer=activity_regularizer,
            # kernel_constraint=kernel_constraint,
            # recurrent_constraint=recurrent_constraint,
            # bias_constraint=bias_constraint,
            dropout=dropout,
            recurrent_dropout=recurrent_dropout,
        )

        if self.reduce_output is not None:
            logger.debug('  FCStack')
            self.fc_stack = FCStack(
                layers=fc_layers,
                num_layers=num_fc_layers,
                default_fc_size=fc_size,
                default_use_bias=use_bias,
                default_weights_initializer=weights_initializer,
                default_bias_initializer=bias_initializer,
                default_weights_regularizer=weights_regularizer,
                default_bias_regularizer=bias_regularizer,
                default_activity_regularizer=activity_regularizer,
                # default_weights_constraint=weights_constraint,
                # default_bias_constraint=bias_constraint,
                default_norm=norm,
                default_norm_params=norm_params,
                default_activation=fc_activation,
                default_dropout=fc_dropout,
            )

    def call(self, inputs, training=None, mask=None):
        """
            :param input_sequence: The input sequence fed into the encoder.
                   Shape: [batch x sequence length], type tf.int32
            :type input_sequence: Tensor
            :param regularizer: The regularizer to use for the weights
                   of the encoder.
            :type regularizer:
            :param dropout: Tensor (tf.float) of the probability of dropout
            :type dropout: Tensor
            :param is_training: Tesnor (tf.bool) specifying if in training mode
                   (important for dropout)
            :type is_training: Tensor
        """
        # ================ Embeddings ================
        if self.should_embed:
            embedded_sequence = self.embed_sequence(
                inputs, training=training, mask=mask
            )
        else:
            embedded_sequence = inputs
            while len(embedded_sequence.shape) < 3:
                embedded_sequence = tf.expand_dims(embedded_sequence, -1)

        # shape=(?, sequence_length, embedding_size)
        hidden = embedded_sequence
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



ludwig/encoders/sequence_encoders.py [1735:1799]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
            bidirectional=bidirectional,
            activation=activation,
            recurrent_activation=recurrent_activation,
            use_bias=use_bias,
            unit_forget_bias=unit_forget_bias,
            weights_initializer=weights_initializer,
            recurrent_initializer=recurrent_initializer,
            bias_initializer=bias_initializer,
            weights_regularizer=weights_regularizer,
            recurrent_regularizer=recurrent_regularizer,
            bias_regularizer=bias_regularizer,
            activity_regularizer=activity_regularizer,
            # kernel_constraint=kernel_constraint,
            # recurrent_constraint=recurrent_constraint,
            # bias_constraint=bias_constraint,
            dropout=dropout,
            recurrent_dropout=recurrent_dropout,
        )

        if self.reduce_output is not None:
            logger.debug('  FCStack')
            self.fc_stack = FCStack(
                layers=fc_layers,
                num_layers=num_fc_layers,
                default_fc_size=fc_size,
                default_use_bias=use_bias,
                default_weights_initializer=weights_initializer,
                default_bias_initializer=bias_initializer,
                default_weights_regularizer=weights_regularizer,
                default_bias_regularizer=bias_regularizer,
                default_activity_regularizer=activity_regularizer,
                # default_weights_constraint=weights_constraint,
                # default_bias_constraint=bias_constraint,
                default_norm=norm,
                default_norm_params=norm_params,
                default_activation=fc_activation,
                default_dropout=fc_dropout,
            )

    def call(self, inputs, training=None, mask=None):
        """
            :param input_sequence: The input sequence fed into the encoder.
                   Shape: [batch x sequence length], type tf.int32
            :type input_sequence: Tensor
            :param regularizer: The regularizer to use for the weights
                   of the encoder.
            :type regularizer:
            :param dropout: Tensor (tf.float) of the probability of dropout
            :type dropout: Tensor
            :param is_training: Tesnor (tf.bool) specifying if in training mode
                   (important for dropout)
            :type is_training: Tensor
        """
        # ================ Embeddings ================
        if self.should_embed:
            embedded_sequence = self.embed_sequence(
                inputs, training=training, mask=mask
            )
        else:
            embedded_sequence = inputs
            while len(embedded_sequence.shape) < 3:
                embedded_sequence = tf.expand_dims(embedded_sequence, -1)

        # shape=(?, sequence_length, embedding_size)
        hidden = embedded_sequence
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



