in nevergrad/optimization/base.py [0:0]
def _update_archive_and_bests(self, candidate: p.Parameter, loss: tp.FloatLoss) -> None:
x = candidate.get_standardized_data(reference=self.parametrization)
if not isinstance(
loss, (Real, float)
): # using "float" along "Real" because mypy does not understand "Real" for now Issue #3186
raise TypeError(
f'"tell" method only supports float values but the passed loss was: {loss} (type: {type(loss)}.'
)
if np.isnan(loss) or loss == np.inf:
self._warn(f"Updating fitness with {loss} value", errors.BadLossWarning)
mvalue: tp.Optional[utils.MultiValue] = None
if x not in self.archive:
self.archive[x] = utils.MultiValue(candidate, loss, reference=self.parametrization)
else:
mvalue = self.archive[x]
mvalue.add_evaluation(loss)
# both parameters should be non-None
if mvalue.parameter.loss > candidate.loss: # type: ignore
mvalue.parameter = candidate # keep best candidate
# update current best records
# this may have to be improved if we want to keep more kinds of best losss
for name in self.current_bests:
if mvalue is self.current_bests[name]: # reboot
best = min(self.archive.values(), key=lambda mv, n=name: mv.get_estimation(n)) # type: ignore
# rebuild best point may change, and which value did not track the updated value anyway
self.current_bests[name] = best
else:
if self.archive[x].get_estimation(name) <= self.current_bests[name].get_estimation(name):
self.current_bests[name] = self.archive[x]
# deactivated checks
# if not (np.isnan(loss) or loss == np.inf):
# if not self.current_bests[name].x in self.archive:
# bval = self.current_bests[name].get_estimation(name)
# avals = (min(v.get_estimation(name) for v in self.archive.values()),
# max(v.get_estimation(name) for v in self.archive.values()))
# raise RuntimeError(f"Best value should exist in the archive at num_tell={self.num_tell})\n"
# f"Best value is {bval} and archive is within range {avals} for {name}")
if self.pruning is not None:
self.archive = self.pruning(self.archive)