def plot_prior_samps_2d()

in pubs/owenetal/code/prior_plots.py [0:0]


def plot_prior_samps_2d():
    config = Config(
        config_dict={
            "common": {
                "outcome_type": "single_probit",
                "target": 0.75,
                "lb": "[-3, -3]",
                "ub": "[3, 3]",
            },
            "default_mean_covar_factory": {},
            "song_mean_covar_factory": {},
            "monotonic_mean_covar_factory": {"monotonic_idxs": "[1]"},
        }
    )
    lb = torch.Tensor([-3, -3])
    ub = torch.Tensor([3, 3])
    nsamps = 5
    gridsize = 30
    grid = _dim_grid(lower=lb, upper=ub, dim=2, gridsize=gridsize)
    np.random.seed(global_seed)
    torch.random.manual_seed(global_seed)
    with gpytorch.settings.prior_mode(True):
        rbf_mean, rbf_covar = default_mean_covar_factory(config)
        rbf_model = GPClassificationModel(
            inducing_min=lb,
            inducing_max=ub,
            inducing_size=100,
            mean_module=rbf_mean,
            covar_module=rbf_covar,
        )
        # add just two samples at high and low
        rbf_model.set_train_data(torch.Tensor([-3, -3])[:, None], torch.LongTensor([0]))
        rbf_samps = rbf_model(grid).sample(torch.Size([nsamps]))

        song_mean, song_covar = song_mean_covar_factory(config)
        song_model = GPClassificationModel(
            inducing_min=lb,
            inducing_max=ub,
            inducing_size=100,
            mean_module=song_mean,
            covar_module=song_covar,
        )
        song_model.set_train_data(
            torch.Tensor([-3, -3])[:, None], torch.LongTensor([0])
        )

        song_samps = song_model(grid).sample(torch.Size([nsamps]))

        mono_mean, mono_covar = monotonic_mean_covar_factory(config)
        mono_model = MonotonicRejectionGP(
            likelihood="probit-bernoulli",
            monotonic_idxs=[1],
            mean_module=mono_mean,
            covar_module=mono_covar,
            num_induc=1000,
        )

        bounds_ = torch.tensor([-3.0, -3.0, 3.0, 3.0]).reshape(2, -1)
        # Select inducing points
        mono_model.inducing_points = draw_sobol_samples(
            bounds=bounds_, n=mono_model.num_induc, q=1
        ).squeeze(1)

        inducing_points_aug = mono_model._augment_with_deriv_index(
            mono_model.inducing_points, 0
        )
        scales = ub - lb
        dummy_train_x = mono_model._augment_with_deriv_index(
            torch.Tensor([-3, 3])[None, :], 0
        )
        mono_model.model = MixedDerivativeVariationalGP(
            train_x=dummy_train_x,
            train_y=torch.LongTensor([0]),
            inducing_points=inducing_points_aug,
            scales=scales,
            fixed_prior_mean=torch.Tensor([0.75]),
            covar_module=mono_covar,
            mean_module=mono_mean,
        )
        mono_samps = mono_model.sample(grid, nsamps)

    intensity_grid = np.linspace(-3, 3, gridsize)
    fig, ax = plt.subplots(1, 3, figsize=(7.5, 3))
    fig.tight_layout(rect=[0, 0.03, 1, 0.9])
    fig.suptitle("Prior samples")

    square_samps = np.array([s.reshape((gridsize,) * 2).numpy() for s in song_samps])
    plotsamps = norm.cdf(square_samps[:, ::5, :]).T.reshape(gridsize, -1)
    ax[0].plot(intensity_grid, plotsamps, "b")
    ax[0].set_title("Linear kernel model")

    square_samps = np.array([s.reshape((gridsize,) * 2).numpy() for s in rbf_samps])
    plotsamps = norm.cdf(square_samps[:, ::5, :]).T.reshape(gridsize, -1)
    ax[1].plot(intensity_grid, plotsamps, "b")
    ax[1].set_title("Nonmonotonic RBF kernel model")

    square_samps = np.array([s.reshape((gridsize,) * 2).numpy() for s in mono_samps])
    plotsamps = norm.cdf(square_samps[:, ::5, :]).T.reshape(gridsize, -1)
    ax[2].plot(intensity_grid, plotsamps, "b")
    ax[2].set_title("Monotonic RBF kernel model")

    return fig