def _kims_cnn()

in recommenders/models/deeprec/models/dkn.py [0:0]


    def _kims_cnn(self, word, entity, hparams):
        """The KCNN module. KCNN is an extension of traditional CNN that incorporates symbolic knowledge from
        a knowledge graph into sentence representation learning.
        Args:
            word (object): word indices for the sentence.
            entity (object): entity indices for the sentence. Entities are aligned with words in the sentence.
            hparams (object): global hyper-parameters.

        Returns:
            object: Sentence representation.
        """
        # kims cnn parameter
        filter_sizes = hparams.filter_sizes
        num_filters = hparams.num_filters

        dim = hparams.dim
        embedded_chars = tf.nn.embedding_lookup(params=self.embedding, ids=word)
        if hparams.use_entity and hparams.use_context:
            entity_embedded_chars = tf.nn.embedding_lookup(
                params=self.entity_embedding, ids=entity
            )
            context_embedded_chars = tf.nn.embedding_lookup(
                params=self.context_embedding, ids=entity
            )
            concat = tf.concat(
                [embedded_chars, entity_embedded_chars, context_embedded_chars], axis=-1
            )
        elif hparams.use_entity:
            entity_embedded_chars = tf.nn.embedding_lookup(
                params=self.entity_embedding, ids=entity
            )
            concat = tf.concat([embedded_chars, entity_embedded_chars], axis=-1)
        else:
            concat = embedded_chars
        concat_expanded = tf.expand_dims(concat, -1)

        # Create a convolution + maxpool layer for each filter size
        pooled_outputs = []
        for i, filter_size in enumerate(filter_sizes):
            with tf.compat.v1.variable_scope(
                "conv-maxpool-%s" % filter_size, initializer=self.initializer
            ):
                # Convolution Layer
                if hparams.use_entity and hparams.use_context:
                    filter_shape = [filter_size, dim * 3, 1, num_filters]
                elif hparams.use_entity:
                    filter_shape = [filter_size, dim * 2, 1, num_filters]
                else:
                    filter_shape = [filter_size, dim, 1, num_filters]
                W = tf.compat.v1.get_variable(
                    name="W" + "_filter_size_" + str(filter_size),
                    shape=filter_shape,
                    dtype=tf.float32,
                    initializer=tf.compat.v1.keras.initializers.VarianceScaling(
                        scale=1.0,
                        mode="fan_avg",
                        distribution=("uniform" if False else "truncated_normal"),
                    ),
                )
                b = tf.compat.v1.get_variable(
                    name="b" + "_filter_size_" + str(filter_size),
                    shape=[num_filters],
                    dtype=tf.float32,
                )
                if W not in self.layer_params:
                    self.layer_params.append(W)
                if b not in self.layer_params:
                    self.layer_params.append(b)
                conv = tf.nn.conv2d(
                    input=concat_expanded,
                    filters=W,
                    strides=[1, 1, 1, 1],
                    padding="VALID",
                    name="conv",
                )
                # Apply nonlinearity
                h = tf.nn.relu(tf.nn.bias_add(conv, b), name="relu")
                # Maxpooling over the outputs
                pooled = tf.nn.max_pool2d(
                    h,
                    ksize=[1, hparams.doc_size - filter_size + 1, 1, 1],
                    strides=[1, 1, 1, 1],
                    padding="VALID",
                    name="pool",
                )
                pooled_outputs.append(pooled)
        # Combine all the pooled features
        # self.num_filters_total is the kims cnn output dimension
        self.num_filters_total = num_filters * len(filter_sizes)
        h_pool = tf.concat(pooled_outputs, axis=-1)
        h_pool_flat = tf.reshape(h_pool, [-1, self.num_filters_total])
        return h_pool_flat