in bayesmark/cmd_parse.py [0:0]
def launcher_parser(description):
parser = argparse.ArgumentParser(description=description, parents=[base_parser()])
add_argument(parser, CmdArgs.uuid, type=uuid, help="length 32 hex UUID for this experiment")
add_argument(parser, CmdArgs.data_root, type=filepath, help="root directory for all custom csv files")
add_argument(parser, CmdArgs.db, type=filename, help="database ID of this benchmark experiment")
add_argument(parser, CmdArgs.optimizer, type=joinable, nargs="+", help="optimizers to use")
add_argument(parser, CmdArgs.data, type=joinable, nargs="+", help="data sets to use")
add_argument(parser, CmdArgs.classifier, type=joinable, nargs="+", help="classifiers to use")
add_argument(parser, CmdArgs.metric, type=str, choices=METRICS, nargs="+", help="scoring metric to use")
# Iterations counts used in experiments
add_argument(parser, CmdArgs.n_calls, default=100, type=positive_int, help="number of function evaluations")
add_argument(
parser, CmdArgs.n_suggest, default=1, type=positive_int, help="number of suggestions to provide in parallel"
)
add_argument(parser, CmdArgs.n_repeat, default=20, type=positive_int, help="number of repetitions of each study")
add_argument(parser, CmdArgs.timeout, default=0, type=int, help="Timeout per experiment (0 = no timeout)")
# Arguments for creating dry run jobs file
add_argument(
parser,
CmdArgs.n_jobs,
type=int,
default=0,
help="number of jobs to put in the dry run file, the default 0 value disables dry run (real run)",
)
# Using default of current dir for jobs file output since that is generally the default for everything
add_argument(
parser, CmdArgs.jobs_file, type=filepath, default="./jobs.txt", help="a jobs file with all commands to be run"
)
return parser