in Models/exprsynth/nagdecoder.py [0:0]
def __make_test_model(self):
# Almost everywhere, we need to add extra dimensions to account for the fact that in training,
# we need to handle several samples in one batch, whereas we don't do that during test:
context_token_representations = self.placeholders.get('context_token_representations')
context_token_masks = self.placeholders.get('context_token_mask')
if context_token_representations is not None:
context_token_representations = tf.expand_dims(context_token_representations, axis=0)
# === Grammar productions:
if self.hyperparameters['eg_use_vars_for_production_choice']:
pooled_variable_representations_at_prod_choice = \
tf.reduce_mean(self.placeholders['eg_production_var_representations'], axis=0)
pooled_variable_representations_at_prod_choice = \
tf.expand_dims(pooled_variable_representations_at_prod_choice, axis=0)
else:
pooled_variable_representations_at_prod_choice = None
eg_production_choice_logits = \
self.__make_production_choice_logits_model(
tf.expand_dims(self.placeholders['eg_production_node_representation'], axis=0),
pooled_variable_representations_at_prod_choice,
context_token_representations,
context_token_masks,
production_to_context_id=tf.constant([0], dtype=tf.int32))
self.ops['eg_production_choice_probs'] = tf.nn.softmax(eg_production_choice_logits)[0]
# === Variable productions
eg_varproduction_choice_logits = \
self.__make_variable_choice_logits_model(
tf.expand_dims(self.placeholders['eg_varproduction_node_representation'], axis=0),
tf.expand_dims(self.placeholders['eg_varproduction_options_representations'], axis=0),
)
eg_varproduction_choice_logits = tf.squeeze(eg_varproduction_choice_logits, axis=0)
self.ops['eg_varproduction_choice_probs'] = tf.nn.softmax(eg_varproduction_choice_logits, dim=-1)
# === Literal productions
self.ops['eg_litproduction_choice_probs'] = {}
for literal_kind in LITERAL_NONTERMINALS:
literal_logits = \
self.__make_literal_choice_logits_model(
literal_kind,
tf.expand_dims(self.placeholders['eg_litproduction_node_representation'], axis=0),
context_token_representations,
context_token_masks,
tf.constant([0], dtype=tf.int32),
self.placeholders.get('eg_litproduction_choice_normalizer'),
)
self.ops['eg_litproduction_choice_probs'][literal_kind] = \
tf.nn.softmax(literal_logits, axis=-1)[0]
# Expose one-step message propagation in expansion graph:
eg_hypers = {name.replace("eg_", "", 1): value
for (name, value) in self.hyperparameters.items()
if name.startswith("eg_")}
eg_hypers['propagation_rounds'] = 1
eg_hypers['num_labeled_edge_types'] = len(self.__expansion_labeled_edge_types)
eg_hypers['num_unlabeled_edge_types'] = len(self.__expansion_unlabeled_edge_types)
with tf.variable_scope("ExpansionGraph"):
eg_model = AsyncGGNN(eg_hypers)
# First, embed edge labels. We only need the first step:
edge_labels = self.__embed_edge_labels(1)[0]
all_sending_representations = \
tf.concat(self.placeholders['eg_msg_source_representations'], axis=0)
msg_target_ids = tf.zeros([tf.shape(all_sending_representations)[0]], dtype=tf.int32)
receiving_node_num = tf.constant(1, dtype=tf.int32)
# Get node label embedding:
target_node_label_embeddings =\
tf.nn.embedding_lookup(
self.parameters['eg_token_embeddings'],
tf.expand_dims(self.placeholders['eg_msg_target_label_id'], axis=0),
) # Shape [1, eg_h_dim]
with tf.variable_scope("async_ggnn/prop_round0"):
self.ops['eg_step_propagation_result'] = \
eg_model.propagate_one_step(self.placeholders['eg_msg_source_representations'],
edge_labels,
msg_target_ids,
receiving_node_num,
target_node_label_embeddings)[0]