in src/model.py [0:0]
def init_base_lstm(model_config, vocab_size):
dimension_params = {
"emb_dim":int(model_config.get("emb_dim")),
"context_dim":int(model_config.get("emb_dim")),
"hidden_dim":int(model_config.get("hidden_dim")),
"vocab_size":vocab_size,
}
metadata_constructor_params = {
"md_projection_dim":int(model_config.get("md_projection_dim", fallback="50")),
"md_dims":[int(x) for x in model_config.get("md_dims", fallback="").split(',') if x],
"md_group_sizes":[int(x) for x in model_config.get("md_group_sizes", fallback="").split(',') if x],
"attention_mechanism": "",
"hierarchical_attention": "",
}
layer_params = {
"n_layers":1, # Fixed in ACL paper
"use_weight_tying":eval(model_config.get("use_weight_tying", fallback='False')),
}
model = BaseLSTM(dimension_params, metadata_constructor_params, layer_params)
return model