def _build_memory_encoder()

in models_vd/generator_attnet.py [0:0]


  def _build_memory_encoder(self, holders, scope='encoder', reuse=None):
    lstm_dim = self.lstm_dim
    num_layers = self.num_layers
    apply_dropout = self.encoder_dropout

    input_seq = holders['ques']
    input_seq_len = holders['ques_len']

    # facts/memories
    hist_size = holders['hist'].shape.as_list()
    hist_flat = tf.reshape(holders['hist'], [-1, hist_size[2]])
    hist_len_flat = tf.reshape(holders['hist_len'], [-1])

    with tf.variable_scope(scope, reuse=reuse):
      T = input_seq.shape.as_list()[0]
      N = tf.shape(input_seq)[1]
      self.T_encoder = T
      self.N = N
      with tf.variable_scope(self.embed_scope, reuse=True):
        embed_mat = tf.get_variable('embed_mat', [self.encoder_num_vocab,
                                                  self.encoder_embed_dim])
      # text_seq has shape [T, N] and embedded_seq has shape [T, N, D].
      embed_seq = tf.nn.embedding_lookup(embed_mat, input_seq)
      self.embedded_input_seq = embed_seq

      # The RNN
      cell = _get_lstm_cell(num_layers, lstm_dim, apply_dropout)

      # encoder_outputs has shape [T, N, lstm_dim]
      encoder_outputs, encoder_states = tf.nn.dynamic_rnn(cell,
                  embed_seq, input_seq_len, dtype=tf.float32,
                  time_major=True, scope='lstm')
      self.encoder_outputs = encoder_outputs

      # batch first encoder outputs
      batch_encoder_outputs = tf.transpose(encoder_outputs, [1, 0, 2])
      ques_enc = support.last_relevant(batch_encoder_outputs, input_seq_len)
      size = [-1, self.params['num_rounds'], self.params['lstm_size']]
      ques_enc = tf.reshape(ques_enc, size)
      self.encoder_states = encoder_states

      # similarly encode history
      hist_out = tf.nn.embedding_lookup(embed_mat, hist_flat)
      # rnns to encode history
      cell = tf.contrib.rnn.BasicLSTMCell(self.params['lstm_size'])
      for ii in range(0, self.params['num_layers']):
        # dynamic rnn
        hist_out, states = tf.nn.dynamic_rnn(cell, hist_out, \
                sequence_length=hist_len_flat, \
                dtype=tf.float32, scope='hist_layer_%d' % ii)
      # get output from last timestep
      hist_enc = support.last_relevant(hist_out, hist_len_flat)

      # reshape back
      size = [-1, hist_size[1], self.params['lstm_size']]
      hist_enc = tf.reshape(hist_enc, size)

      # concatenate, mlp and tanh
      num_r = self.params['num_rounds']
      # dot product
      attention = tf.matmul(ques_enc, hist_enc, transpose_b=True)

      # a very small large number
      u_mat = np.full((num_r, num_r), -1e10)
      suppress_mat = tf.constant(np.triu(u_mat, 1), dtype=tf.float32)
      l_mat = np.full((num_r, num_r), 1)
      mask_mat = tf.constant(np.tril(l_mat), dtype=tf.float32)
      attention = tf.nn.softmax(tf.multiply(attention, mask_mat)
                            + suppress_mat)
      self.att_history = attention
      att_hist_enc = tf.matmul(attention, hist_enc)

      # flatten out
      size = [-1, self.params['lstm_size']]
      att_hist_flat = tf.reshape(att_hist_enc, size)

      # concatenate attended history and encoder state for the last layer
      concat = tf.concat([encoder_states[-1].h, att_hist_flat], -1)
      new_state = LSTMStateTuple(encoder_states[-1].c,
                                 FC(concat, self.params['lstm_size']))

      # make it mutable
      encoder_states = list(encoder_states)
      encoder_states[-1] = new_state
      self.encoder_states = tuple(encoder_states)

      # check if wv flag is set
      if self.params['use_word_vectors']:
        # transform the encoder outputs for further attention alignments
        # encoder_outputs_flat has shape [T, N, lstm_dim]
        encoder_h_transformed = fc('encoder_h_transform',
          tf.reshape(embedded_seq, [-1, self.encoder_embed_dim]),
                            output_dim=lstm_dim)
      else:
        # transform the encoder outputs for further attention alignments
        # encoder_outputs_flat has shape [T, N, lstm_dim]
        encoder_h_transformed = fc('encoder_h_transform',
          tf.reshape(encoder_outputs, [-1, lstm_dim]), output_dim=lstm_dim)

      encoder_h_transformed = tf.reshape(encoder_h_transformed,
                       to_T([T, N, lstm_dim]))
      self.encoder_h_transformed = encoder_h_transformed

      # seq_not_finished is a shape [T, N, 1] tensor, where seq_not_finished[t, n]
      # is 1 iff sequence n is not finished at time t, and 0 otherwise
      seq_not_finished = tf.less(tf.range(T)[:, tf.newaxis, tf.newaxis],
                     input_seq_len[:, tf.newaxis])
      seq_not_finished = tf.cast(seq_not_finished, tf.float32)
      self.seq_not_finished = seq_not_finished