in question_generation_model.py [0:0]
def build_keyword_model(self):
"""
Build model definition using GloVe embeddings and image, keyword input
:return:
"""
self.logger.debug('In keyword model')
# image feature
inputs1 = layers.Input(shape=(self.input_shape,))
fe1 = Dropout(self.dropout)(inputs1)
fe2 = layers.Dense(self.hidden_units, activation='relu')(fe1)
# partial question sequence model
inputs2 = Input(shape=(self.datasets.max_question_len,))
se1 = Embedding(self.vocab_size, self.embedding_dim, mask_zero=True)(inputs2)
se2 = Dropout(self.dropout)(se1)
question_seq_model = LSTM(self.hidden_units)(se2)
# question_seq_model = Bidirectional(LSTM(self.hidden_units))(se2)
# question_seq_model = layers.Dense(self.hidden_units, activation='relu')(question_seq_model)
# se4 = Dropout(self.dropout)(se3)
# keyword sequence model
inputs3 = Input(shape=(self.datasets.max_keyword_len,))
k1 = Embedding(self.vocab_size, self.embedding_dim, mask_zero=True)(inputs3)
k2 = Dropout(self.dropout)(k1)
keyword_seq_model = LSTM(self.hidden_units)(k2)
# decoder (feed forward) model
decoder1 = Add()([fe2, question_seq_model, keyword_seq_model])
decoder2 = layers.Dense(self.hidden_units, activation='relu')(decoder1)
outputs = layers.Dense(self.vocab_size, activation='softmax')(decoder2)
# merge the two input models
model = Model(inputs=[inputs1, inputs2, inputs3], outputs=outputs)
# make embedding layer weights fixed
model.layers[3].set_weights([self.embedding_matrix])
model.layers[3].trainable = False
model.layers[4].set_weights([self.embedding_matrix])
model.layers[4].trainable = False
model.summary()
optimizer = Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False)
# optimizer = SGD()
#TODO: try different optimizer?
model.compile(loss='categorical_crossentropy', optimizer=optimizer, metrics=['accuracy'])
return model