ludwig/encoders/sequence_encoders.py [471:512]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
        )

        if self.reduce_output is not None:
            logger.debug('  FCStack')
            self.fc_stack = FCStack(
                layers=fc_layers,
                num_layers=num_fc_layers,
                default_fc_size=fc_size,
                default_use_bias=use_bias,
                default_weights_initializer=weights_initializer,
                default_bias_initializer=bias_initializer,
                default_weights_regularizer=weights_regularizer,
                default_bias_regularizer=bias_regularizer,
                default_activity_regularizer=activity_regularizer,
                # default_weights_constraint=weights_constraint,
                # default_bias_constraint=bias_constraint,
                default_norm=norm,
                default_norm_params=norm_params,
                default_activation=activation,
                default_dropout=dropout,
            )

    def call(self, inputs, training=None, mask=None):
        """
            :param inputs: The input sequence fed into the encoder.
                   Shape: [batch x sequence length], type tf.int
            :type inputs: Tensor
            :param training: bool specifying if in training mode (important for dropout)
            :type training: bool
        """
        # ================ Embeddings ================
        if self.should_embed:
            embedded_sequence = self.embed_sequence(
                inputs, training=training, mask=mask
            )
        else:
            embedded_sequence = inputs
            while len(embedded_sequence.shape) < 3:
                embedded_sequence = tf.expand_dims(embedded_sequence, -1)

        # shape=(?, sequence_length, embedding_size)
        hidden = embedded_sequence
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



ludwig/encoders/sequence_encoders.py [822:869]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
        )

        if self.reduce_output is not None:
            logger.debug('  FCStack')
            self.fc_stack = FCStack(
                layers=fc_layers,
                num_layers=num_fc_layers,
                default_fc_size=fc_size,
                default_use_bias=use_bias,
                default_weights_initializer=weights_initializer,
                default_bias_initializer=bias_initializer,
                default_weights_regularizer=weights_regularizer,
                default_bias_regularizer=bias_regularizer,
                default_activity_regularizer=activity_regularizer,
                # default_weights_constraint=weights_constraint,
                # default_bias_constraint=bias_constraint,
                default_norm=norm,
                default_norm_params=norm_params,
                default_activation=activation,
                default_dropout=dropout,
            )

    def call(self, inputs, training=None, mask=None):
        """
            :param input_sequence: The input sequence fed into the encoder.
                   Shape: [batch x sequence length], type tf.int32
            :type input_sequence: Tensor
            :param regularizer: The regularizer to use for the weights
                   of the encoder.
            :type regularizer:
            :param dropout: Tensor (tf.float) of the probability of dropout
            :type dropout: Tensor
            :param is_training: Tesnor (tf.bool) specifying if in training mode
                   (important for dropout)
            :type is_training: Tensor
        """
        # ================ Embeddings ================
        if self.should_embed:
            embedded_sequence = self.embed_sequence(
                inputs, training=training, mask=mask
            )
        else:
            embedded_sequence = inputs
            while len(embedded_sequence.shape) < 3:
                embedded_sequence = tf.expand_dims(embedded_sequence, -1)

        # shape=(?, sequence_length, embedding_size)
        hidden = embedded_sequence
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



ludwig/encoders/sequence_encoders.py [1164:1211]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
        )

        if self.reduce_output is not None:
            logger.debug('  FCStack')
            self.fc_stack = FCStack(
                layers=fc_layers,
                num_layers=num_fc_layers,
                default_fc_size=fc_size,
                default_use_bias=use_bias,
                default_weights_initializer=weights_initializer,
                default_bias_initializer=bias_initializer,
                default_weights_regularizer=weights_regularizer,
                default_bias_regularizer=bias_regularizer,
                default_activity_regularizer=activity_regularizer,
                # default_weights_constraint=weights_constraint,
                # default_bias_constraint=bias_constraint,
                default_norm=norm,
                default_norm_params=norm_params,
                default_activation=activation,
                default_dropout=dropout,
            )

    def call(self, inputs, training=None, mask=None):
        """
            :param inputs: The input sequence fed into the encoder.
                   Shape: [batch x sequence length], type tf.int32
            :type inputs: Tensor
            :param regularizer: The regularizer to use for the weights
                   of the encoder.
            :type regularizer:
            :param dropout: Tensor (tf.float) of the probability of dropout
            :type dropout: Tensor
            :param is_training: Tesnor (tf.bool) specifying if in training mode
                   (important for dropout)
            :type is_training: Tensor
        """
        # ================ Embeddings ================
        if self.should_embed:
            embedded_sequence = self.embed_sequence(
                inputs, training=training, mask=mask
            )
        else:
            embedded_sequence = inputs
            while len(embedded_sequence.shape) < 3:
                embedded_sequence = tf.expand_dims(embedded_sequence, -1)

        # shape=(?, sequence_length, embedding_size)
        hidden = embedded_sequence
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



