in botorch/optim/stopping.py [0:0]
def evaluate(self, fvals: Tensor) -> bool:
r"""Evaluate the stopping criterion.
Args:
fvals: tensor containing function values for the current iteration. If
`fvals` contains more than one element, then the stopping criterion is
evaluated element-wise and True is returned if the stopping criterion is
true for all elements.
TODO: add support for utilizing gradient information
Returns:
Stopping indicator (if True, stop the optimziation).
"""
self.iter += 1
if self.iter == self.maxiter:
return True
if self._prev_fvals is None:
self._prev_fvals = fvals.unsqueeze(0)
else:
self._prev_fvals = torch.cat(
[self._prev_fvals[-self.n_window :], fvals.unsqueeze(0)]
)
if self._prev_fvals.size(0) < self.n_window + 1:
return False
weights = self.weights
weights = weights.to(fvals)
if self._prev_fvals.ndim > 1:
weights = weights.unsqueeze(-1)
# TODO: Update the exp moving average efficiently
prev_ma = (self._prev_fvals[:-1] * weights).sum(dim=0)
ma = (self._prev_fvals[1:] * weights).sum(dim=0)
# TODO: Handle approx. zero losses (normalize by min/max loss range)
rel_delta = (prev_ma - ma) / prev_ma.abs()
if not self.minimize:
rel_delta = -rel_delta
if torch.max(rel_delta) < self.rel_tol:
return True
return False