def build()

in product_matching/euclidean_intersection.py [0:0]


    def build(self):
        """
        Build model structure.

        Intersection use Joint learning arthitecture.
        """
        print("Query")
        query_input = Input(
            name='text_left',
            shape=self._params['input_shapes'][0],
        )
        asin_input = Input(
            name='text_right',
            shape=self._params['input_shapes'][1]
        )
        print(query_input)
        query = Embedding(self._params["vocab_size"], self._params["emb_dim"], trainable = True, mask_zero=True)(query_input)
        print(query)
        box = TimeDistributed(Dense(self._params["box_dim"], activation='sigmoid'))(query)
        print(box)
        box_dropout = Dropout(self._params["dropout"])(box)
        print(box_dropout)
        intersection = Lambda(self.intersection_layer)([box_dropout,box_dropout])
        print(intersection)
        reshape_intersection = Reshape((intersection.shape[1]//self._params["box_dim"],self._params["box_dim"]))(intersection)
        print(reshape_intersection)
        query_attention = SelfAttention(return_sequences=True)(reshape_intersection)
        print(query_attention)
        query_dropout = Dropout(self._params["dropout"])(query_attention)
        print(query_dropout)
        query_boxes = Dense(self._params["box_dim"])(query_dropout)
        print(query_boxes)

        print("\nASIN")
        print(asin_input)
        asin = Embedding(self._params["vocab_size"], self._params["emb_dim"], trainable = True, mask_zero=True)(asin_input)
        print(asin)
        lstm = Bidirectional(LSTM(self._params["emb_dim"]//2, return_sequences=True))(asin)
        print(lstm)
        lstm_dropout = Dropout(self._params["dropout"])(lstm)
        print(lstm_dropout)
        #attention = SeqSelfAttention(attention_activation='tanh')(lstm)
        attention = SelfAttention(return_sequences=True)(lstm_dropout)
        print(attention)
        attention_dropout = Dropout(self._params["dropout"])(attention)
        print(attention_dropout)
        dense_attention = Dense(self._params["box_dim"]//2)(attention_dropout)
        print(dense_attention)
        asin_embedding = Add()([self.crop_box(i,None,None)(dense_attention) for i in range(dense_attention.shape[1])])
        print(asin_embedding)
        print("\nJoin")
        merged_layer = Lambda(self.merge_layer)([query_boxes,asin_embedding])
        print(merged_layer)
        #final_loss = Dense(2,activation="softmax")(merged_layer)
        final_loss = self._make_output_layer()(merged_layer)
        print(final_loss)
        self._backend = Model(inputs=[query_input,asin_input], outputs=final_loss)