def run_branin_benchmarks()

in benchmarks/run_ebo_benchmarks.py [0:0]


def run_branin_benchmarks(rep):

    experiment, f = benchmark_minimize_callable(
        problem=branin_100,
        num_trials=50,
        method_name='ebo',
        replication_index=rep,
    )

    options = {
        'x_range': np.vstack((
            np.hstack((-5 * np.ones(50), np.zeros(50))),
            np.hstack((10 * np.ones(50), 15 * np.ones(50))),
        )),
        'dx': 100,
        'max_value': -0.397887,  # Let it cheat and know the true max value
        'T': 50,
        'gp_sigma': 1e-7,
    }
    options.update(core_options)

    f_max = lambda x: -f(x)  # since EBO maximizes

    e = ebo(f_max, options)
    try:
        e.run()
    except ValueError:
        pass  # EBO can ask for more than T function evaluations

    with open(f'results/branin_100_ebo_rep_{rep}.json', "w") as fout:
       json.dump(object_to_json(experiment), fout)