in bayesmark/builtin_opt/pysot_optimizer.py [0:0]
def suggest(self, n_suggestions=1):
"""Get a suggestion from the optimizer.
Parameters
----------
n_suggestions : int
Desired number of parallel suggestions in the output
Returns
-------
next_guess : list of dict
List of `n_suggestions` suggestions to evaluate the objective
function. Each suggestion is a dictionary where each key
corresponds to a parameter being optimized.
"""
if self.batch_size is None: # First call to suggest
self.batch_size = n_suggestions
self.start(self.max_evals)
# Set the tolerances pretending like we are running batch
d, p = float(self.opt.dim), float(n_suggestions)
self.strategy.failtol = p * int(max(np.ceil(d / p), np.ceil(4 / p)))
# Now we can make suggestions
x_w = []
self.proposals = []
for _ in range(n_suggestions):
proposal = self.strategy.propose_action()
record = EvalRecord(proposal.args, status="pending")
proposal.record = record
proposal.accept() # This triggers all the callbacks
# It is possible that pySOT proposes a previously evaluated point
# when all variables are integers, so we just abort in this case
# since we have likely converged anyway. See PySOT issue #30.
x = list(proposal.record.params) # From tuple to list
x_unwarped, = self.space_x.unwarp(x)
if x_unwarped in self.history:
warnings.warn("pySOT proposed the same point twice")
self.start(self.max_evals)
return self.suggest(n_suggestions=n_suggestions)
# NOTE: Append unwarped to avoid rounding issues
self.history.append(copy(x_unwarped))
self.proposals.append(proposal)
x_w.append(copy(x_unwarped))
return x_w