def gauss_kernel()

in orbit/utils/kernels.py [0:0]


def gauss_kernel(x, x_i, rho=0.1, alpha=1.0, n_reduce=-1, point_to_flatten=1):
    """
    Parameters
    ----------
    x : array-like
        points required to compute kernel weight
    x_i : array-like
        reference points location used to compute correspondent distance of each entry points
    rho : float
        smoothing parameter known as "length-scale" in gaussian process
    alpha : float
        marginal standard deviation parameter in gaussian process; one should use 1 in kernel regression
    n_reduce : int
        if greater 0 (default=-1), reduce number of positive weights to such input
    point_to_flatten : float
        the time point starting to flatten the weights; default is 1 for normalized time points

    Returns
    -------
    np.ndarray
        2D array with size N x M such that
        N as the number of entry points
        M as the number of reference points
        matrix entries hold the value of weight of each element

    See Also
    --------
    1. https://mc-stan.org/docs/2_24/stan-users-guide/gaussian-process-regression.html
    2. https://en.wikipedia.org/wiki/Local_regression
    """
    N = len(x)
    M = len(x_i)
    k = np.zeros((N, M), np.double)
    alpha_sq = alpha**2
    rho_sq_t2 = 2 * rho**2
    for n in range(N):
        if x[n] <= point_to_flatten:
            k[n, :] = alpha_sq * np.exp(-1 * (x[n] - x_i) ** 2 / rho_sq_t2)
        else:
            # last weights carried forward for future time points
            k[n, :] = alpha_sq * np.exp(-1 * (point_to_flatten - x_i) ** 2 / rho_sq_t2)

    if n_reduce > 0:
        k = np.apply_along_axis(reduce_by_max, axis=1, arr=k, n=n_reduce)

    k = k / np.sum(k, axis=1, keepdims=True)

    return k