in pytext/models/representations/lightconv.py [0:0]
def __init__(self, config: Config, embed_dim: int, padding_idx: Tensor) -> None:
super().__init__(config)
self.padding_idx = padding_idx
self.pooling_type = config.pooling_type
self.dropout = nn.Dropout(config.encoder_config.dropout)
input_embed_dim = embed_dim
self.embed_scale = math.sqrt(input_embed_dim) # todo: try with input_embed_dim
self.max_source_positions = config.encoder_config.max_source_positions
self.no_token_positional_embeddings = (
config.encoder_config.no_token_positional_embeddings
)
# creating this is also conditional
self.project_in_dim = (
Linear(input_embed_dim, config.encoder_config.encoder_embed_dim)
if config.encoder_config.encoder_embed_dim != input_embed_dim
else PlaceholderIdentity()
)
layers = []
# Overwrite the config.layer_config.encoder_embed_dim so that it will always match with config.encoder_config.encoder_embed_dim
config.layer_config.encoder_embed_dim = config.encoder_config.encoder_embed_dim
for size in config.encoder_kernel_size_list:
layers.append(create_module(config.layer_config, kernel_size=size))
self.layers = nn.ModuleList(layers)
self.embed_layer_norm = LayerNorm(config.encoder_config.encoder_embed_dim)
self.combine_pos_embed = config.encoder_config.combine_pos_embed.value
if config.encoder_config.combine_pos_embed == PostionalEmbedCombine.SUM:
pos_embed_dim = config.encoder_config.encoder_embed_dim
elif config.encoder_config.combine_pos_embed == PostionalEmbedCombine.CONCAT:
pos_embed_dim = config.encoder_config.encoder_embed_dim - input_embed_dim
else:
raise NotImplementedError
if not config.encoder_config.no_token_positional_embeddings:
if (
config.encoder_config.positional_embedding_type
== PostionalEmbedType.LEARNED
):
self.embed_positions = PositionalEmbedding(
config.encoder_config.max_source_positions,
pos_embed_dim,
self.padding_idx,
)
elif (
config.encoder_config.positional_embedding_type
== PostionalEmbedType.SINUSOIDAL
or config.encoder_config.positional_embedding_type
== PostionalEmbedType.HYBRID
):
self.embed_positions = SinusoidalPositionalEmbedding(
pos_embed_dim,
self.padding_idx,
init_size=config.encoder_config.max_source_positions,
learned_embed=config.encoder_config.positional_embedding_type
== PostionalEmbedType.HYBRID,
)
else:
raise NotImplementedError("Positional embedding type not supported")
else:
self.embed_positions = PlaceholderIdentity()
self.normalize = config.encoder_config.encoder_normalize_before
if self.normalize:
self.layer_norm = LayerNorm(config.encoder_config.encoder_embed_dim)
else:
self.layer_norm = PlaceholderIdentity()
log_class_usage(__class__)