def normI()

in causalml/inference/tree/uplift.pyx [0:0]


    def normI(self, n_c: cython.int, n_c_left: cython.int, n_t: list, n_t_left: list, alpha: cython.float = 0.9, currentDivergence: cython.float = 0.0) -> cython.float:
        '''
        Normalization factor.

        Args
        ----
        currentNodeSummary : list of list
            The summary statistics of the current tree node, [P(Y=1|T), N(T)].

        leftNodeSummary : list of list
            The summary statistics of the left tree node, [P(Y=1|T), N(T)].

        alpha : float
            The weight used to balance different normalization parts.

        Returns
        -------
        norm_res : float
            Normalization factor.
        '''

        norm_res: cython.float = 0.
        pt_a: cython.float
        pc_a: cython.float

        pt_a = 1. * np.sum(n_t_left) / (np.sum(n_t) + 0.1)
        pc_a = 1. * n_c_left / (n_c + 0.1)

        if self.evaluationFunction == self.evaluate_IDDP:
            # Normalization Part 1
            norm_res += (entropyH(1. * np.sum(n_t) / (np.sum(n_t) + n_c), 1. * n_c / (np.sum(n_t) + n_c)) * currentDivergence)
            norm_res += (1. * np.sum(n_t) / (np.sum(n_t) + n_c) * entropyH(pt_a))

        else:
            # Normalization Part 1
            norm_res += (alpha * entropyH(1. * np.sum(n_t) / (np.sum(n_t) + n_c), 1. * n_c / (np.sum(n_t) + n_c)) * kl_divergence(pt_a, pc_a))
            # Normalization Part 2 & 3
            for i in range(len(n_t)):
                pt_a_i = 1. * n_t_left[i] / (n_t[i] + 0.1)
                norm_res += ((1 - alpha) * entropyH(1. * n_t[i] / (n_t[i] + n_c), 1. * n_c / (n_t[i] + n_c)) * kl_divergence(1. * pt_a_i, pc_a))
                norm_res += (1. * n_t[i] / (np.sum(n_t) + n_c) * entropyH(pt_a_i))
        # Normalization Part 4
        norm_res += 1. * n_c / (np.sum(n_t) + n_c) * entropyH(pc_a)

        # Normalization Part 5
        norm_res += 0.5
        return norm_res