example_opt_root/pysot_optimizer.py (91 lines of code) (raw):
import warnings
from copy import copy
import numpy as np
from poap.strategy import EvalRecord
from pySOT.experimental_design import SymmetricLatinHypercube
from pySOT.optimization_problems import OptimizationProblem
from pySOT.strategy import SRBFStrategy
from pySOT.surrogate import CubicKernel, LinearTail, RBFInterpolant
from bayesmark.abstract_optimizer import AbstractOptimizer
from bayesmark.experiment import experiment_main
from bayesmark.space import JointSpace
class PySOTOptimizer(AbstractOptimizer):
primary_import = "pysot"
def __init__(self, api_config):
"""Build wrapper class to use an optimizer in benchmark.
Parameters
----------
api_config : dict-like of dict-like
Configuration of the optimization variables. See API description.
"""
AbstractOptimizer.__init__(self, api_config)
self.space_x = JointSpace(api_config)
self.bounds = self.space_x.get_bounds()
self.create_opt_prob() # Sets up the optimization problem (needs self.bounds)
self.max_evals = np.iinfo(np.int32).max # NOTE: Largest possible int
self.batch_size = None
self.history = []
self.proposals = []
def create_opt_prob(self):
"""Create an optimization problem object."""
opt = OptimizationProblem()
opt.lb = self.bounds[:, 0] # In warped space
opt.ub = self.bounds[:, 1] # In warped space
opt.dim = len(self.bounds)
opt.cont_var = np.arange(len(self.bounds))
opt.int_var = []
assert len(opt.cont_var) + len(opt.int_var) == opt.dim
opt.objfun = None
self.opt = opt
def start(self, max_evals):
"""Starts a new pySOT run."""
self.history = []
self.proposals = []
# Symmetric Latin hypercube design
des_pts = max([self.batch_size, 2 * (self.opt.dim + 1)])
slhd = SymmetricLatinHypercube(dim=self.opt.dim, num_pts=des_pts)
# Warped RBF interpolant
rbf = RBFInterpolant(
dim=self.opt.dim,
lb=self.opt.lb,
ub=self.opt.ub,
kernel=CubicKernel(),
tail=LinearTail(self.opt.dim),
eta=1e-4,
)
# Optimization strategy
self.strategy = SRBFStrategy(
max_evals=self.max_evals,
opt_prob=self.opt,
exp_design=slhd,
surrogate=rbf,
asynchronous=True,
batch_size=1,
use_restarts=True,
)
def suggest(self, n_suggestions=1):
"""Get a suggestion from the optimizer.
Parameters
----------
n_suggestions : int
Desired number of parallel suggestions in the output
Returns
-------
next_guess : list of dict
List of `n_suggestions` suggestions to evaluate the objective
function. Each suggestion is a dictionary where each key
corresponds to a parameter being optimized.
"""
if self.batch_size is None: # First call to suggest
self.batch_size = n_suggestions
self.start(self.max_evals)
# Set the tolerances pretending like we are running batch
d, p = float(self.opt.dim), float(n_suggestions)
self.strategy.failtol = p * int(max(np.ceil(d / p), np.ceil(4 / p)))
# Now we can make suggestions
x_w = []
self.proposals = []
for _ in range(n_suggestions):
proposal = self.strategy.propose_action()
record = EvalRecord(proposal.args, status="pending")
proposal.record = record
proposal.accept() # This triggers all the callbacks
# It is possible that pySOT proposes a previously evaluated point
# when all variables are integers, so we just abort in this case
# since we have likely converged anyway. See PySOT issue #30.
x = list(proposal.record.params) # From tuple to list
x_unwarped, = self.space_x.unwarp(x)
if x_unwarped in self.history:
warnings.warn("pySOT proposed the same point twice")
self.start(self.max_evals)
return self.suggest(n_suggestions=n_suggestions)
# NOTE: Append unwarped to avoid rounding issues
self.history.append(copy(x_unwarped))
self.proposals.append(proposal)
x_w.append(copy(x_unwarped))
return x_w
def _observe(self, x, y):
# Find the matching proposal and execute its callbacks
idx = [x == xx for xx in self.history]
i = np.argwhere(idx)[0].item() # Pick the first index if there are ties
proposal = self.proposals[i]
proposal.record.complete(y)
self.proposals.pop(i)
self.history.pop(i)
def observe(self, X, y):
"""Send an observation of a suggestion back to the optimizer.
Parameters
----------
X : list of dict-like
Places where the objective function has already been evaluated.
Each suggestion is a dictionary where each key corresponds to a
parameter being optimized.
y : array-like, shape (n,)
Corresponding values where objective has been evaluated
"""
assert len(X) == len(y)
for x_, y_ in zip(X, y):
# Just ignore, any inf observations we got, unclear if right thing
if np.isfinite(y_):
self._observe(x_, y_)
if __name__ == "__main__":
experiment_main(PySOTOptimizer)