in parlai/scripts/train_model.py [0:0]
def setup_args(parser=None) -> ParlaiParser:
"""
Build the ParlAI parser, adding command line args if necessary.
:param ParlaiParser parser:
Preexisting parser to append options to. Will be created if needed.
:returns:
the ParlaiParser with CLI options added.
"""
if parser is None:
parser = ParlaiParser(True, True, 'Train a model')
train = parser.add_argument_group('Training Loop Arguments')
train.add_argument(
'-et',
'--evaltask',
help='task to use for valid/test (defaults to the one used for training)',
)
train.add_argument(
'--final-extra-opt',
type=str,
default='',
help="A '.opt' file that is used for final eval. Useful for setting skip-generation to false. 'datatype' must be included as part of the opt.",
)
train.add_argument(
'--eval-batchsize',
type=int,
hidden=True,
help='Eval time batch size (defaults to same as -bs)',
)
train.add_argument(
'--eval-dynamic-batching', # FIXME: see https://github.com/facebookresearch/ParlAI/issues/3367
default=None,
type='nonestr',
choices={None, 'off', 'full', 'batchsort'},
help=(
'Set dynamic batching at evaluation time. Set to off for '
'train-only dynamic batching. Set to none (default) to use same '
'setting as --dynamic-batching.'
),
)
train.add_argument(
'--num-workers',
default=0,
type=int,
help='Number of background workers (training only)',
)
train.add_argument('--display-examples', type='bool', default=False, hidden=True)
train.add_argument('-eps', '--num-epochs', type=float, default=-1)
train.add_argument('-ttim', '--max-train-time', type=float, default=-1)
train.add_argument(
'-tstep',
'--max-train-steps',
'--max-lr-steps',
type=int,
default=-1,
help='End training after n model updates',
)
train.add_argument('-ltim', '--log-every-n-secs', type=float, default=-1)
train.add_argument(
'-lstep',
'--log-every-n-steps',
type=int,
default=50,
help='Log every n training steps',
)
train.add_argument(
'-vtim',
'--validation-every-n-secs',
type=float,
default=-1,
help='Validate every n seconds. Saves model to model_file '
'(if set) whenever best val metric is found',
)
train.add_argument(
'-vstep',
'--validation-every-n-steps',
type=int,
default=-1,
help='Validate every n training steps. Saves model to model_file '
'(if set) whenever best val metric is found',
)
train.add_argument(
'-stim',
'--save-every-n-secs',
type=float,
default=-1,
help='Saves the model to model_file.checkpoint after '
'every n seconds (default -1, never).',
)
train.add_argument(
'-sval',
'--save-after-valid',
type='bool',
default=False,
help='Saves the model to model_file.checkpoint after '
'every validation (default %(default)s).',
)
train.add_argument(
'-veps',
'--validation-every-n-epochs',
type=float,
default=-1,
help='Validate every n epochs. Saves model to model_file '
'(if set) whenever best val metric is found',
)
train.add_argument(
'-vme',
'--validation-max-exs',
type=int,
default=-1,
hidden=True,
help='max examples to use during validation (default -1 uses all)',
)
train.add_argument(
'--short-final-eval',
default=False,
hidden=True,
type='bool',
help='If true, obeys --validation-max-exs in the final '
'validation and test evaluations.',
)
train.add_argument(
'-vp',
'--validation-patience',
type=int,
default=10,
help=(
'number of iterations of validation where result'
' does not improve before we stop training'
),
)
train.add_argument(
'-vmt',
'--validation-metric',
default='accuracy',
help='key into report table for selecting best validation',
)
train.add_argument(
'-vmm',
'--validation-metric-mode',
type=str,
choices=['max', 'min'],
help='the direction in which to optimize the validation metric, i.e. maximize or minimize',
)
train.add_argument(
'-vcut',
'--validation-cutoff',
type=float,
default=1.0,
hidden=True,
help='value at which training will stop if exceeded by metric',
)
train.add_argument(
'-lfc',
'--load-from-checkpoint',
type='bool',
default=True,
hidden=True,
help='load model from checkpoint if available',
)
train.add_argument(
'-vshare',
'--validation-share-agent',
default=False,
hidden=True,
help='use a shared copy of the agent for validation. '
'this will eventually default to True, but '
'currently defaults to False.',
)
train.add_argument(
'-mcs',
'--metrics',
type=str,
default='default',
help='list of metrics to show/compute, e.g. all, default,'
'or give a list split by , like '
'ppl,f1,accuracy,hits@1,rouge,bleu'
'the rouge metrics will be computed as rouge-1, rouge-2 and rouge-l',
)
train.add_argument(
'-micro',
'--aggregate-micro',
type='bool',
default=False,
help='Report micro-averaged metrics instead of macro averaged metrics.',
recommended=False,
)
TensorboardLogger.add_cmdline_args(parser, partial_opt=None)
WandbLogger.add_cmdline_args(parser, partial_opt=None)
parser = setup_dict_args(parser)
return parser