models_mnist/executor.py [273:336]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    with tf.variable_scope('loom_modules') as module_scope:
      op_params['module_scope'] = module_scope

    # creating ops
    loom_ops_dict = {}

    in_types = [types['float'], types['float']]
    out_types = [types['float']]
    loom_ops_dict['add'] = lm.BinaryLoomOp(in_types, out_types, tf.add)
    loom_ops_dict['divide'] = lm.BinaryLoomOp(in_types, out_types, tf.divide)
    in_types = [types['float']]
    loom_ops_dict['exp'] = lm.UnaryLoomOp(in_types, out_types, tf.exp)

    in_types = [types['attention'], types['attention']]
    out_types = [types['attention']]
    loom_ops_dict['add_attention'] = lm.BinaryLoomOp(in_types, out_types, tf.add)

    in_types = [types['attention'], types['attention']]
    out_types = [types['attention']]
    loom_ops_dict['max_attention'] = lm.BinaryLoomOp(in_types, out_types,
                                                     tf.maximum)

    # basic attention manipulation ops
    in_types = [types['attention'], types['float']]
    out_types = [types['attention']]
    loom_ops_dict['weight_attention'] = lm.AttentionWeightLoomOp(in_types,
                                                                 out_types)

    in_types = [types['text_feat_slice'], types['text_feat_slice'],
                types['round'], types['round']]
    out_types = [types['float']]
    op_params['amalgam_text_feats'] = params['amalgam_text_feats']
    op_params['text_embed_size'] = params['text_embed_size']
    loom_ops_dict['align_text'] = lm.AlignTextLoomOp(in_types, out_types, op_params)

    # slicing ops
    in_types = [types['text'], types['round'], types['time']]
    out_types = [types['text_slice']]
    loom_ops_dict['slice_text'] = lm.SliceTextLoomOp(in_types, out_types)

    in_types = [types['text_feat'], types['round'], types['time']]
    out_types = [types['text_feat_slice']]
    loom_ops_dict['slice_text_feat'] = lm.SliceTextLoomOp(in_types, out_types)

    # slice_answer_embedding
    in_types = [types['fact'], types['round']]
    out_types = [types['text_feat_slice']]
    loom_ops_dict['slice_fact'] = lm.SliceAnswerLoomOp(in_types, out_types)

    # normalize and complement
    in_types = [types['attention']]
    out_types = [types['attention']]
    loom_ops_dict['normalize_exclude']= lm.NormalizeExcludeLoomOp(in_types,
                                                                  out_types)

    #------------------------------------------------------------------
    # find module
    in_types = [types['image'], types['text_slice']]
    out_types = [types['attention']]
    loom_ops_dict['find'] = lm.FindLoomOp(in_types, out_types, op_params)

    # and module
    in_types = [types['attention'], types['attention']]
    loom_ops_dict['and_op'] = lm.AndLoomOp(in_types, out_types, op_params)
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



models_vd/executor.py [269:332]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    with tf.variable_scope('loom_modules') as module_scope:
      op_params['module_scope'] = module_scope

    # creating ops
    loom_ops_dict = {}

    in_types = [types['float'], types['float']]
    out_types = [types['float']]
    loom_ops_dict['add'] = lm.BinaryLoomOp(in_types, out_types, tf.add)
    loom_ops_dict['divide'] = lm.BinaryLoomOp(in_types, out_types, tf.divide)
    in_types = [types['float']]
    loom_ops_dict['exp'] = lm.UnaryLoomOp(in_types, out_types, tf.exp)

    in_types = [types['attention'], types['attention']]
    out_types = [types['attention']]
    loom_ops_dict['add_attention'] = lm.BinaryLoomOp(in_types, out_types, tf.add)

    in_types = [types['attention'], types['attention']]
    out_types = [types['attention']]
    loom_ops_dict['max_attention'] = lm.BinaryLoomOp(in_types, out_types,
                              tf.maximum)

    # basic attention manipulation ops
    in_types = [types['attention'], types['float']]
    out_types = [types['attention']]
    loom_ops_dict['weight_attention'] = lm.AttentionWeightLoomOp(in_types,
                                                                 out_types)

    in_types = [types['text_feat_slice'], types['text_feat_slice'],
                types['round'], types['round']]
    out_types = [types['float']]
    op_params['amalgam_text_feats'] = params['amalgam_text_feats']
    op_params['text_embed_size'] = params['text_embed_size']
    loom_ops_dict['align_text'] = lm.AlignTextLoomOp(in_types, out_types, op_params)

    # slicing ops
    in_types = [types['text'], types['round'], types['time']]
    out_types = [types['text_slice']]
    loom_ops_dict['slice_text'] = lm.SliceTextLoomOp(in_types, out_types)

    in_types = [types['text_feat'], types['round'], types['time']]
    out_types = [types['text_feat_slice']]
    loom_ops_dict['slice_text_feat'] = lm.SliceTextLoomOp(in_types, out_types)

    # slice_answer_embedding
    in_types = [types['fact'], types['round']]
    out_types = [types['text_feat_slice']]
    loom_ops_dict['slice_fact'] = lm.SliceAnswerLoomOp(in_types, out_types)

    # normalize and complement
    in_types = [types['attention']]
    out_types = [types['attention']]
    loom_ops_dict['normalize_exclude']= lm.NormalizeExcludeLoomOp(in_types,
                                                                  out_types)

    #------------------------------------------------------------------
    # find module
    in_types = [types['image'], types['text_slice']]
    out_types = [types['attention']]
    loom_ops_dict['find'] = lm.FindLoomOp(in_types, out_types, op_params)

    # and module
    in_types = [types['attention'], types['attention']]
    loom_ops_dict['and_op'] = lm.AndLoomOp(in_types, out_types, op_params)
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



