def parse_args()

in archived/gluoncv_yolo_neo/train_yolo.py [0:0]


def parse_args():
    parser = argparse.ArgumentParser(description="Train YOLO networks with random input shape.")
    parser.add_argument(
        "--network",
        type=str,
        default="mobilenet1.0",
        help="Base network name which serves as feature extraction base.",
    )
    parser.add_argument(
        "--data-shape",
        type=int,
        default=416,
        help="Input data shape for evaluation, use 320, 416, 608... "
        + "Training is with random shapes from (320 to 608).",
    )
    parser.add_argument("--batch-size", type=int, default=64, help="Training mini-batch size")
    parser.add_argument(
        "--dataset", type=str, default="voc", help="Training dataset. Now support voc."
    )
    parser.add_argument(
        "--num-workers",
        "-j",
        dest="num_workers",
        type=int,
        default=4,
        help="Number of data workers, you can use larger "
        "number to accelerate data loading, if you CPU and GPUs are powerful.",
    )
    parser.add_argument(
        "--gpus", type=str, default="0", help="Training with GPUs, you can specify 1,3 for example."
    )
    parser.add_argument("--epochs", type=int, default=200, help="Training epochs.")
    parser.add_argument("--num-epochs", type=int, default=200, help="Training epochs.")
    parser.add_argument(
        "--resume",
        type=str,
        default="",
        help="Resume from previously saved parameters if not None. "
        "For example, you can resume from ./yolo3_xxx_0123.params",
    )
    parser.add_argument(
        "--start-epoch",
        type=int,
        default=0,
        help="Starting epoch for resuming, default is 0 for new training."
        "You can specify it to 100 for example to start from 100 epoch.",
    )
    parser.add_argument("--lr", type=float, default=0.001, help="Learning rate, default is 0.001")
    parser.add_argument(
        "--lr-mode",
        type=str,
        default="step",
        help="learning rate scheduler mode. options are step, poly and cosine.",
    )
    parser.add_argument(
        "--lr-decay", type=float, default=0.1, help="decay rate of learning rate. default is 0.1."
    )
    parser.add_argument(
        "--lr-decay-period",
        type=int,
        default=0,
        help="interval for periodic learning rate decays. default is 0 to disable.",
    )
    parser.add_argument(
        "--lr-decay-epoch",
        type=str,
        default="160,180",
        help="epochs at which learning rate decays. default is 160,180.",
    )
    parser.add_argument(
        "--warmup-lr",
        type=float,
        default=0.0,
        help="starting warmup learning rate. default is 0.0.",
    )
    parser.add_argument("--warmup-epochs", type=int, default=0, help="number of warmup epochs.")
    parser.add_argument("--momentum", type=float, default=0.9, help="SGD momentum, default is 0.9")
    parser.add_argument("--wd", type=float, default=0.0005, help="Weight decay, default is 5e-4")
    parser.add_argument(
        "--log-interval", type=int, default=100, help="Logging mini-batch interval. Default is 100."
    )
    parser.add_argument("--save-prefix", type=str, default="", help="Saving parameter prefix")
    parser.add_argument(
        "--save-interval",
        type=int,
        default=10,
        help="Saving parameters epoch interval, best model will always be saved.",
    )
    parser.add_argument(
        "--val-interval",
        type=int,
        default=1,
        help="Epoch interval for validation, increase the number will reduce the "
        "training time if validation is slow.",
    )
    parser.add_argument("--seed", type=int, default=233, help="Random seed to be fixed.")
    parser.add_argument(
        "--num-samples",
        type=int,
        default=-1,
        help="Training images. Use -1 to automatically get the number.",
    )
    parser.add_argument("--syncbn", action="store_true", help="Use synchronize BN across devices.")
    parser.add_argument(
        "--no-random-shape",
        action="store_true",
        help="Use fixed size(data-shape) throughout the training, which will be faster "
        "and require less memory. However, final model will be slightly worse.",
    )
    parser.add_argument(
        "--no-wd",
        action="store_true",
        help="whether to remove weight decay on bias, and beta/gamma for batchnorm layers.",
    )
    parser.add_argument("--mixup", action="store_true", help="whether to enable mixup.")
    parser.add_argument(
        "--no-mixup-epochs",
        type=int,
        default=20,
        help="Disable mixup training if enabled in the last N epochs.",
    )
    parser.add_argument("--label-smooth", action="store_true", help="Use label smoothing.")
    parser.add_argument(
        "--amp", action="store_true", help="Use MXNet AMP for mixed precision training."
    )
    parser.add_argument(
        "--horovod",
        action="store_true",
        help="Use MXNet Horovod for distributed training. Must be run with OpenMPI. "
        "--gpus is ignored when using --horovod.",
    )

    args = parser.parse_args()
    return args