in bayesmark/cmd_parse.py [0:0]
def experiment_parser(description):
parser = argparse.ArgumentParser(description=description, parents=[base_parser()])
add_argument(parser, CmdArgs.uuid, type=uuid, required=True, help="length 32 hex UUID for this experiment")
# This could be made simpler and use '.' default for dataroot, even if no custom data used.
add_argument(parser, CmdArgs.data_root, type=filepath, help="root directory for all custom csv files")
add_argument(parser, CmdArgs.db, type=filename, required=True, help="database ID of this benchmark experiment")
add_argument(parser, CmdArgs.optimizer, required=True, type=joinable, help="optimizer to use")
add_argument(parser, CmdArgs.data, required=True, type=joinable, help="data set to use")
add_argument(parser, CmdArgs.classifier, required=True, type=joinable, help="classifier to use")
add_argument(parser, CmdArgs.metric, required=True, type=str, choices=METRICS, help="scoring metric to use")
add_argument(parser, CmdArgs.n_calls, default=100, type=positive_int, help="number of function evaluations")
add_argument(
parser, CmdArgs.n_suggest, default=1, type=positive_int, help="number of suggestions to provide in parallel"
)
return parser