in botorch/acquisition/cached_cholesky.py [0:0]
def _get_f_X_samples(self, posterior: GPyTorchPosterior, q_in: int) -> Tensor:
r"""Get posterior samples at the `q_in` new points from the joint posterior.
Args:
posterior: The joint posterior is over (X_baseline, X).
q_in: The number of new points in the posterior. See `_set_sampler` for
more information.
Returns:
A `sample_shape x batch_shape x q x m`-dim tensor of posterior
samples at the new points.
"""
# Technically we should make sure that we add a consistent nugget to the
# cached covariance (and box decompositions) and the new block.
# But recomputing box decompositions every time the jitter changes would
# be quite slow.
if not self._is_mt and self._cache_root and hasattr(self, "_baseline_L"):
try:
return sample_cached_cholesky(
posterior=posterior,
baseline_L=self._baseline_L,
q=q_in,
base_samples=self.sampler.base_samples,
sample_shape=self.sampler.sample_shape,
)
except (NanError, NotPSDError):
warnings.warn(
"Low-rank cholesky updates failed due NaNs or due to an "
"ill-conditioned covariance matrix. "
"Falling back to standard sampling.",
BotorchWarning,
)
# TODO: improve efficiency for multi-task models
samples = self.sampler(posterior)
if isinstance(self.model, HigherOrderGP):
# Select the correct q-batch dimension for HOGP.
q_dim = -self.model._num_dimensions
q_idcs = (
torch.arange(-q_in, 0, device=samples.device) + samples.shape[q_dim]
)
return samples.index_select(q_dim, q_idcs)
else:
return samples[..., -q_in:, :]