in models_mnist/executor.py [0:0]
def _build_loom_ops(self):
"""TODO(satwik): Some helper text here
"""
params = self.params
types = self._loom_types
# create all modules under the same scope
op_params = {'map_dim': params['map_size']}
with tf.variable_scope('loom_modules') as module_scope:
op_params['module_scope'] = module_scope
# creating ops
loom_ops_dict = {}
in_types = [types['float'], types['float']]
out_types = [types['float']]
loom_ops_dict['add'] = lm.BinaryLoomOp(in_types, out_types, tf.add)
loom_ops_dict['divide'] = lm.BinaryLoomOp(in_types, out_types, tf.divide)
in_types = [types['float']]
loom_ops_dict['exp'] = lm.UnaryLoomOp(in_types, out_types, tf.exp)
in_types = [types['attention'], types['attention']]
out_types = [types['attention']]
loom_ops_dict['add_attention'] = lm.BinaryLoomOp(in_types, out_types, tf.add)
in_types = [types['attention'], types['attention']]
out_types = [types['attention']]
loom_ops_dict['max_attention'] = lm.BinaryLoomOp(in_types, out_types,
tf.maximum)
# basic attention manipulation ops
in_types = [types['attention'], types['float']]
out_types = [types['attention']]
loom_ops_dict['weight_attention'] = lm.AttentionWeightLoomOp(in_types,
out_types)
in_types = [types['text_feat_slice'], types['text_feat_slice'],
types['round'], types['round']]
out_types = [types['float']]
op_params['amalgam_text_feats'] = params['amalgam_text_feats']
op_params['text_embed_size'] = params['text_embed_size']
loom_ops_dict['align_text'] = lm.AlignTextLoomOp(in_types, out_types, op_params)
# slicing ops
in_types = [types['text'], types['round'], types['time']]
out_types = [types['text_slice']]
loom_ops_dict['slice_text'] = lm.SliceTextLoomOp(in_types, out_types)
in_types = [types['text_feat'], types['round'], types['time']]
out_types = [types['text_feat_slice']]
loom_ops_dict['slice_text_feat'] = lm.SliceTextLoomOp(in_types, out_types)
# slice_answer_embedding
in_types = [types['fact'], types['round']]
out_types = [types['text_feat_slice']]
loom_ops_dict['slice_fact'] = lm.SliceAnswerLoomOp(in_types, out_types)
# normalize and complement
in_types = [types['attention']]
out_types = [types['attention']]
loom_ops_dict['normalize_exclude']= lm.NormalizeExcludeLoomOp(in_types,
out_types)
#------------------------------------------------------------------
# find module
in_types = [types['image'], types['text_slice']]
out_types = [types['attention']]
loom_ops_dict['find'] = lm.FindLoomOp(in_types, out_types, op_params)
# and module
in_types = [types['attention'], types['attention']]
loom_ops_dict['and_op'] = lm.AndLoomOp(in_types, out_types, op_params)
# diff module
loom_ops_dict['diff_op'] = lm.DiffLoomOp(in_types, out_types, op_params)
# transform module
in_types = [types['attention'], types['image'], types['text_slice']]
loom_ops_dict['transform'] = lm.TransformLoomOp(in_types, out_types, op_params)
# describe module
out_types = [types['context']]
op_params['encode_size'] = params['lstm_size']
loom_ops_dict['describe'] = lm.DescribeLoomOp(in_types, out_types, op_params)
# exist module
loom_ops_dict['exist'] = lm.ExistLoomOp(in_types, out_types, op_params)
# count module
loom_ops_dict['count'] = lm.CountLoomOp(in_types, out_types, op_params)
# invalid Module
in_types = [types['image']]
loom_ops_dict['invalid'] = lm.InvalidLoomOp(in_types, out_types, op_params)
return loom_ops_dict