in pytorch_translate/hybrid_transformer_rnn.py [0:0]
def add_args(parser):
"""Add model-specific arguments to the parser."""
parser.add_argument(
"--dropout", type=float, metavar="D", help="dropout probability"
)
parser.add_argument(
"--attention-dropout",
type=float,
metavar="D",
help="dropout probability for attention weights",
)
parser.add_argument(
"--relu-dropout",
type=float,
metavar="D",
help="dropout probability after ReLU in FFN",
)
parser.add_argument(
"--encoder-pretrained-embed",
type=str,
metavar="STR",
help="path to pre-trained encoder embedding",
)
parser.add_argument(
"--encoder-embed-dim",
type=int,
metavar="N",
help="encoder embedding dimension",
)
parser.add_argument(
"--encoder-ffn-embed-dim",
type=int,
metavar="N",
help="encoder embedding dimension for FFN",
)
parser.add_argument(
"--encoder-freeze-embed",
default=False,
action="store_true",
help=(
"whether to freeze the encoder embedding or allow it to be "
"updated during training"
),
)
parser.add_argument(
"--encoder-layers", type=int, metavar="N", help="num encoder layers"
)
parser.add_argument(
"--encoder-attention-heads",
type=int,
metavar="N",
help="num encoder attention heads",
)
parser.add_argument(
"--encoder-normalize-before",
default=False,
action="store_true",
help="apply layernorm before each encoder block",
)
parser.add_argument(
"--encoder-learned-pos",
default=False,
action="store_true",
help="use learned positional embeddings in the encoder",
)
parser.add_argument(
"--decoder-pretrained-embed",
type=str,
metavar="STR",
help="path to pre-trained decoder embedding",
)
parser.add_argument(
"--decoder-embed-dim",
type=int,
metavar="N",
help="decoder embedding dimension",
)
parser.add_argument(
"--decoder-freeze-embed",
default=False,
action="store_true",
help=(
"whether to freeze the encoder embedding or allow it to be "
"updated during training"
),
)
parser.add_argument(
"--decoder-layers", type=int, metavar="N", help="num decoder layers"
)
parser.add_argument(
"--decoder-attention-heads",
type=int,
metavar="N",
help="num decoder attention heads",
)
parser.add_argument(
"--decoder-reduced-attention-dim",
type=int,
default=None,
metavar="N",
help="if specified, computes attention with this dimensionality "
"(instead of using encoder output dims)",
)
parser.add_argument(
"--decoder-lstm-units",
type=int,
metavar="N",
help="num LSTM units for each decoder layer",
)
parser.add_argument(
"--decoder-out-embed-dim",
default=None,
type=int,
metavar="N",
help="decoder output embedding dimension",
)
# Args for vocab reduction
vocab_reduction.add_args(parser)