in src/model.py [0:0]
def init_concat_lstm(model_config, vocab_size):
dimension_params = {
"emb_dim":int(model_config.get("emb_dim")),
"context_dim":int(model_config.get("context_dim",
fallback=int(model_config.get("emb_dim")))),
"hidden_dim":int(model_config.get("hidden_dim")),
"vocab_size":vocab_size,
}
metadata_constructor_params = {
"md_projection_dim":int(model_config.get("md_projection_dim", fallback="50")),
"md_dims":[int(x) for x in model_config.get("md_dims").split(',')],
"md_group_sizes":[int(x) for x in model_config.get("md_group_sizes").split(',')],
"attention_mechanism":model_config.get("attention_mechanism", fallback=""),
"query_type":model_config.get("query_type", fallback=""),
"use_null_token":eval(model_config.get("use_null_token", fallback="False")),
"hierarchical_attention":eval(model_config.get("hierarchical_attention", fallback="False")),
}
layer_params = {
"n_layers":1, # Fixed in ACL paper
"use_softmax_adaptation":eval(model_config.get("use_softmax_adaptation", fallback='False')),
"use_layernorm":eval(model_config.get("use_layernorm", fallback='False')),
"use_weight_tying":eval(model_config.get("use_weight_tying", fallback='False')),
}
model = ConcatLSTM(dimension_params, metadata_constructor_params, layer_params)
return model