in pytext/models/seq_models/conv_encoder.py [0:0]
def __init__(self, dictionary, embed_tokens, layers, encoder_config):
super().__init__()
self.dropout = encoder_config.dropout
input_embed_dim = embed_tokens.embedding_dim
self.padding_idx = dictionary.get_pad_index()
self.max_source_positions = encoder_config.max_source_positions
self.embed_scale = math.sqrt(input_embed_dim) # todo: try with input_embed_dim
self.no_token_positional_embeddings = (
encoder_config.no_token_positional_embeddings
)
# creating this is also conditional
self.project_in_dim = (
Linear(input_embed_dim, encoder_config.encoder_embed_dim)
if encoder_config.encoder_embed_dim != input_embed_dim
else PlaceholderIdentity()
)
self.embed_layer_norm = LayerNorm(encoder_config.encoder_embed_dim)
self.combine_pos_embed = encoder_config.combine_pos_embed.value
if encoder_config.combine_pos_embed == PostionalEmbedCombine.SUM:
pos_embed_dim = encoder_config.encoder_embed_dim
elif encoder_config.combine_pos_embed == PostionalEmbedCombine.CONCAT:
pos_embed_dim = encoder_config.encoder_embed_dim - input_embed_dim
else:
raise NotImplementedError
if not encoder_config.no_token_positional_embeddings:
if encoder_config.positional_embedding_type == PostionalEmbedType.LEARNED:
self.embed_positions = PositionalEmbedding(
encoder_config.max_source_positions,
pos_embed_dim,
self.padding_idx,
)
elif (
encoder_config.positional_embedding_type
== PostionalEmbedType.SINUSOIDAL
or encoder_config.positional_embedding_type == PostionalEmbedType.HYBRID
):
self.embed_positions = SinusoidalPositionalEmbedding(
pos_embed_dim,
self.padding_idx,
init_size=encoder_config.max_source_positions,
learned_embed=encoder_config.positional_embedding_type
== PostionalEmbedType.HYBRID,
)
else:
raise NotImplementedError("Positional embedding type not supported")
else:
self.embed_positions = PlaceholderIdentity()
self.layers = nn.ModuleList(layers)
self.normalize = encoder_config.encoder_normalize_before
if self.normalize:
self.layer_norm = LayerNorm(encoder_config.encoder_embed_dim)
else:
self.layer_norm = PlaceholderIdentity()