def __init__()

in uimnet/algorithms/due.py [0:0]


    def __init__(self,
                 num_outputs,
                 some_features,
                 kernel="RBF",
                 num_inducing_points=20):
        batch_shape = torch.Size([num_outputs])

        initial_inducing_points = self.cluster_(
            some_features, num_inducing_points)

        initial_lengthscale = torch.pdist(some_features).mean() / 2

        variational_distribution = \
            gpytorch.variational.CholeskyVariationalDistribution(
                num_inducing_points, batch_shape=batch_shape)

        variational_strategy = \
            gpytorch.variational.IndependentMultitaskVariationalStrategy(
                gpytorch.variational.VariationalStrategy(
                    self, initial_inducing_points, variational_distribution),
                num_tasks=num_outputs)

        super(GP, self).__init__(variational_strategy)

        kwargs = {
            # These two options gave worse results
            # "ard_num_dims": int(some_features.size(1)),
            # "batch_shape": batch_shape
        }

        if kernel == "RBF":
            kernel = gpytorch.kernels.RBFKernel(**kwargs)
        elif kernel == "Matern12":
            kernel = gpytorch.kernels.MaternKernel(nu=1 / 2, **kwargs)
        elif kernel == "Matern32":
            kernel = gpytorch.kernels.MaternKernel(nu=3 / 2, **kwargs)
        elif kernel == "Matern52":
            kernel = gpytorch.kernels.MaternKernel(nu=5 / 2, **kwargs)
        elif kernel == "RQ":
            kernel = gpytorch.kernels.RQKernel(**kwargs)
        else:
            raise ValueError("Specified kernel not known.")

        kernel.lengthscale = initial_lengthscale * torch.ones_like(
            kernel.lengthscale)
        self.mean_module = gpytorch.means.ConstantMean()
        self.covar_module = gpytorch.kernels.ScaleKernel(kernel)