in drqa/reader/rnn_reader.py [0:0]
def forward(self, x1, x1_f, x1_mask, x2, x2_mask):
"""Inputs:
x1 = document word indices [batch * len_d]
x1_f = document word features indices [batch * len_d * nfeat]
x1_mask = document padding mask [batch * len_d]
x2 = question word indices [batch * len_q]
x2_mask = question padding mask [batch * len_q]
"""
# Embed both document and question
x1_emb = self.embedding(x1)
x2_emb = self.embedding(x2)
# Dropout on embeddings
if self.args.dropout_emb > 0:
x1_emb = nn.functional.dropout(x1_emb, p=self.args.dropout_emb,
training=self.training)
x2_emb = nn.functional.dropout(x2_emb, p=self.args.dropout_emb,
training=self.training)
# Form document encoding inputs
drnn_input = [x1_emb]
# Add attention-weighted question representation
if self.args.use_qemb:
x2_weighted_emb = self.qemb_match(x1_emb, x2_emb, x2_mask)
drnn_input.append(x2_weighted_emb)
# Add manual features
if self.args.num_features > 0:
drnn_input.append(x1_f)
# Encode document with RNN
doc_hiddens = self.doc_rnn(torch.cat(drnn_input, 2), x1_mask)
# Encode question with RNN + merge hiddens
question_hiddens = self.question_rnn(x2_emb, x2_mask)
if self.args.question_merge == 'avg':
q_merge_weights = layers.uniform_weights(question_hiddens, x2_mask)
elif self.args.question_merge == 'self_attn':
q_merge_weights = self.self_attn(question_hiddens, x2_mask)
question_hidden = layers.weighted_avg(question_hiddens, q_merge_weights)
# Predict start and end positions
start_scores = self.start_attn(doc_hiddens, question_hidden, x1_mask)
end_scores = self.end_attn(doc_hiddens, question_hidden, x1_mask)
return start_scores, end_scores