def _build_fast_CIN()

in recommenders/models/deeprec/models/xDeepFM.py [0:0]


    def _build_fast_CIN(self, nn_input, res=False, direct=False, bias=False):
        """Construct the compressed interaction network with reduced parameters.
        This component provides explicit and vector-wise higher-order feature interactions.
        Parameters from the filters are reduced via a matrix decomposition method.
        Fast CIN is more space and time efficient than CIN.

        Args:
            nn_input (object): The output of field-embedding layer. This is the input for CIN.
            res (bool): Whether use residual structure to fuse the results from each layer of CIN.
            direct (bool): If true, then all hidden units are connected to both next layer and output layer;
                    otherwise, half of hidden units are connected to next layer and the other half will be connected to output layer.
            bias (bool): Whether to add bias term when calculating the feature maps.

        Returns:
            object: Prediction score made by fast CIN.
        """
        hparams = self.hparams
        hidden_nn_layers = []
        field_nums = []
        final_len = 0
        field_num = hparams.FIELD_COUNT
        fast_CIN_d = hparams.fast_CIN_d
        nn_input = tf.reshape(
            nn_input, shape=[-1, int(field_num), hparams.dim]
        )  # (B,F,D)
        nn_input = tf.transpose(a=nn_input, perm=[0, 2, 1])  # (B,D,F)
        field_nums.append(int(field_num))
        hidden_nn_layers.append(nn_input)
        final_result = []
        with tf.compat.v1.variable_scope(
            "exfm_part", initializer=self.initializer
        ) as scope:  # noqa: F841
            for idx, layer_size in enumerate(hparams.cross_layer_sizes):
                if idx == 0:
                    fast_w = tf.compat.v1.get_variable(
                        "fast_CIN_w_" + str(idx),
                        shape=[1, field_nums[0], fast_CIN_d * layer_size],
                        dtype=tf.float32,
                    )

                    self.cross_params.append(fast_w)
                    dot_result_1 = tf.nn.conv1d(
                        input=nn_input, filters=fast_w, stride=1, padding="VALID"
                    )  # shape: (B,D,d*H)
                    dot_result_2 = tf.nn.conv1d(
                        input=tf.pow(nn_input, 2),
                        filters=tf.pow(fast_w, 2),
                        stride=1,
                        padding="VALID",
                    )  # shape: ((B,D,d*H)
                    dot_result = tf.reshape(
                        0.5 * (dot_result_1 - dot_result_2),
                        shape=[-1, hparams.dim, layer_size, fast_CIN_d],
                    )
                    curr_out = tf.reduce_sum(
                        input_tensor=dot_result, axis=3, keepdims=False
                    )  # shape: ((B,D,H)
                else:
                    fast_w = tf.compat.v1.get_variable(
                        "fast_CIN_w_" + str(idx),
                        shape=[1, field_nums[0], fast_CIN_d * layer_size],
                        dtype=tf.float32,
                    )
                    fast_v = tf.compat.v1.get_variable(
                        "fast_CIN_v_" + str(idx),
                        shape=[1, field_nums[-1], fast_CIN_d * layer_size],
                        dtype=tf.float32,
                    )

                    self.cross_params.append(fast_w)
                    self.cross_params.append(fast_v)

                    dot_result_1 = tf.nn.conv1d(
                        input=nn_input, filters=fast_w, stride=1, padding="VALID"
                    )  # shape: ((B,D,d*H)
                    dot_result_2 = tf.nn.conv1d(
                        input=hidden_nn_layers[-1],
                        filters=fast_v,
                        stride=1,
                        padding="VALID",
                    )  # shape: ((B,D,d*H)
                    dot_result = tf.reshape(
                        tf.multiply(dot_result_1, dot_result_2),
                        shape=[-1, hparams.dim, layer_size, fast_CIN_d],
                    )
                    curr_out = tf.reduce_sum(
                        input_tensor=dot_result, axis=3, keepdims=False
                    )  # shape: ((B,D,H)

                if bias:
                    b = tf.compat.v1.get_variable(
                        name="f_b" + str(idx),
                        shape=[1, 1, layer_size],
                        dtype=tf.float32,
                        initializer=tf.compat.v1.zeros_initializer(),
                    )
                    curr_out = tf.nn.bias_add(curr_out, b)
                    self.cross_params.append(b)

                if hparams.enable_BN is True:
                    curr_out = tf.compat.v1.layers.batch_normalization(
                        curr_out,
                        momentum=0.95,
                        epsilon=0.0001,
                        training=self.is_train_stage,
                    )

                curr_out = self._activate(curr_out, hparams.cross_activation)

                if direct:
                    direct_connect = curr_out
                    next_hidden = curr_out
                    final_len += layer_size
                    field_nums.append(int(layer_size))

                else:
                    if idx != len(hparams.cross_layer_sizes) - 1:
                        next_hidden, direct_connect = tf.split(
                            curr_out, 2 * [int(layer_size / 2)], 2
                        )
                        final_len += int(layer_size / 2)
                        field_nums.append(int(layer_size / 2))
                    else:
                        direct_connect = curr_out
                        next_hidden = 0
                        final_len += layer_size
                        field_nums.append(int(layer_size))

                final_result.append(direct_connect)
                hidden_nn_layers.append(next_hidden)

            result = tf.concat(final_result, axis=2)
            result = tf.reduce_sum(input_tensor=result, axis=1, keepdims=False)  # (B,H)

            if res:
                base_score = tf.reduce_sum(
                    input_tensor=result, axis=1, keepdims=True
                )  # (B,1)
            else:
                base_score = 0

            w_nn_output = tf.compat.v1.get_variable(
                name="w_nn_output", shape=[final_len, 1], dtype=tf.float32
            )
            b_nn_output = tf.compat.v1.get_variable(
                name="b_nn_output",
                shape=[1],
                dtype=tf.float32,
                initializer=tf.compat.v1.zeros_initializer(),
            )
            self.layer_params.append(w_nn_output)
            self.layer_params.append(b_nn_output)
            exFM_out = (
                tf.compat.v1.nn.xw_plus_b(result, w_nn_output, b_nn_output) + base_score
            )

        return exFM_out