in bayesmark/experiment_aggregate.py [0:0]
def main():
"""See README for instructions on calling aggregate.
"""
description = "Aggregate study results across functions and optimizers"
args = parse_args(agg_parser(description))
logger.setLevel(logging.INFO) # Note this is the module-wide logger
if args[CmdArgs.verbose]:
logger.addHandler(logging.StreamHandler())
# Get list of UUIDs
uuid_list = XRSerializer.get_uuids(args[CmdArgs.db_root], db=args[CmdArgs.db], key=cc.EVAL)
uuid_list_ = XRSerializer.get_uuids(args[CmdArgs.db_root], db=args[CmdArgs.db], key=cc.TIME)
assert uuid_list == uuid_list_, "UUID list does not match between time and eval results"
uuid_list_ = XRSerializer.get_uuids(args[CmdArgs.db_root], db=args[CmdArgs.db], key=cc.SUGGEST_LOG)
assert uuid_list == uuid_list_, "UUID list does not match between suggest log and eval results"
# Get iterator of all experiment data dumps, load in and process, and concat
data_G = load_experiments(uuid_list, args[CmdArgs.db_root], args[CmdArgs.db])
all_perf, all_time, all_suggest, all_sigs = concat_experiments(data_G, ravel=args[CmdArgs.ravel])
# Check the concat signatures make are coherent
sig_errs, signatures_median = analyze_signatures(all_sigs)
logger.info("Signature errors:\n%s" % sig_errs.to_string())
print(json.dumps({"exp-agg sig errors": sig_errs.T.to_dict()}))
# Dump and save it all out
logger.info("saving")
meta = {"args": serializable_dict(args), "signature": signatures_median}
XRSerializer.save_derived(all_perf, meta, args[CmdArgs.db_root], db=args[CmdArgs.db], key=EVAL_RESULTS)
XRSerializer.save_derived(all_time, meta, args[CmdArgs.db_root], db=args[CmdArgs.db], key=TIME_RESULTS)
for test_case, ds in all_suggest.items():
XRSerializer.save_derived(ds, meta, args[CmdArgs.db_root], db=args[CmdArgs.db], key=test_case)
logger.info("done")