in benchmarks/compile_benchmark_results.py [0:0]
def compile_branin_gramacy_100():
all_results = {}
for rep in range(50):
with open(f'results/branin_gramacy_100_alebo_rembo_hesbo_sobol_rep_{rep}.json', 'r') as fin:
res_i = object_from_json(json.load(fin))
all_results = merge_benchmark_results(all_results, res_i)
for method_name in ['addgpucb', 'cmaes', 'ebo', 'smac', 'turbo']:
with open(f'results/branin_100_{method_name}_rep_{rep}.json', 'r') as fin:
exp_i = object_from_json(json.load(fin))
all_results = add_exp(
res=all_results,
exp=exp_i,
problem_name='Branin, D=100',
method_name=method_name,
)
res = {
p.name: aggregate_problem_results(runs=all_results[p.name], problem=p)
for p in [branin_100, gramacy_100]
}
# Add in RRembo results
for proj in ['standard', 'reverse']:
method = f'rrembos_{proj}_kPsi'
with open(f'results/branin_100_{method}.json', 'r') as fin:
A = json.load(fin)
res['Branin, D=100'].objective_at_true_best[method] = np.minimum.accumulate(np.array(A), axis=1)
# Save
with open(f'results/branin_gramacy_100_aggregated_results.json', "w") as fout:
json.dump(object_to_json(res), fout)