in nevergrad/optimization/optimizerlib.py [0:0]
def _internal_tell_candidate(self, candidate: p.Parameter, loss: tp.FloatLoss) -> None:
if self.population_size_adaptation:
self.popsize.add_value(loss)
self.children.append(candidate)
if len(self.children) >= self.popsize.llambda:
# Sorting the population.
self.children.sort(key=base._loss)
# Computing the new parent.
self.parents = self.children[: self.popsize.mu]
self.children = []
self.current_center = (
sum( # type: ignore
c.get_standardized_data(reference=self.parametrization) for c in self.parents
)
/ self.popsize.mu
)
if self.population_size_adaptation:
if (
self.popsize.llambda < self.min_coef_parallel_context * self.dimension
): # Population size not large enough for emna
self.sigma = np.exp(
np.sum(
np.log([c._meta["sigma"] for c in self.parents]),
axis=0 if self.isotropic else None, # type: ignore
)
/ self.popsize.mu
)
else:
stdd = [
(
self.parents[i].get_standardized_data(reference=self.parametrization)
- self.current_center
)
** 2
for i in range(self.popsize.mu)
]
self.sigma = np.sqrt(
np.sum(stdd) / (self.popsize.mu * (self.dimension if self.isotropic else 1))
)
else:
# EMNA update
stdd = [
(
self.parents[i].get_standardized_data(reference=self.parametrization)
- self.current_center
)
** 2
for i in range(self.popsize.mu)
]
self.sigma = np.sqrt(
np.sum(stdd, axis=0 if self.isotropic else None) # type: ignore
/ (self.popsize.mu * (self.dimension if self.isotropic else 1))
)
if self.num_workers / self.dimension > 32: # faster decrease of sigma if large parallel context
imp = max(1, (np.log(self.popsize.llambda) / 2) ** (1 / self.dimension))
self.sigma /= imp