def prepare_parser()

in BigGAN_PyTorch/utils.py [0:0]


def prepare_parser():
    usage = "Parser for all scripts."
    parser = ArgumentParser(description=usage)

    parser.add_argument(
        "--json_config",
        type=str,
        default="",
        help="Json config from where to load the configuration parameters.",
    )

    ### Dataset/Dataloader stuff ###
    parser.add_argument(
        "--resolution",
        type=int,
        default=64,
        help="Resolution to train with " "(default: %(default)s)",
    )
    parser.add_argument(
        "--augment",
        action="store_true",
        default=False,
        help="Augment with random crops and flips (default: %(default)s)",
    )
    parser.add_argument(
        "--num_workers",
        type=int,
        default=8,
        help="Number of dataloader workers; consider using less for HDF5 "
        "(default: %(default)s)",
    )
    parser.add_argument(
        "--no_pin_memory",
        action="store_false",
        dest="pin_memory",
        default=True,
        help="Pin data into memory through dataloader? (default: %(default)s)",
    )
    parser.add_argument(
        "--shuffle",
        action="store_true",
        default=False,
        help="Shuffle the data (strongly recommended)? (default: %(default)s)",
    )
    parser.add_argument(
        "--load_in_mem",
        action="store_true",
        default=False,
        help="Load all data into memory? (default: %(default)s)",
    )
    parser.add_argument(
        "--use_multiepoch_sampler",
        action="store_true",
        default=False,
        help="Use the multi-epoch sampler for dataloader? (default: %(default)s)",
    )
    parser.add_argument(
        "--use_checkpointable_sampler",
        action="store_true",
        default=False,
        help="Use the checkpointable sampler for dataloader? (default: %(default)s)",
    )
    parser.add_argument(
        "--use_balanced_sampler",
        action="store_true",
        default=False,
        help="Use the class balanced sampler for dataloader? (default: %(default)s)",
    )
    parser.add_argument(
        "--longtail_temperature",
        type=int,
        default=1,
        help="Temperature to relax longtail_distribution",
    )

    parser.add_argument(
        "--longtail",
        action="store_true",
        default=False,
        help="Use long-tail version of the dataset",
    )
    parser.add_argument(
        "--longtail_gen",
        action="store_true",
        default=False,
        help="Use long-tail version of class conditioning sampling for generator.",
    )
    parser.add_argument(
        "--custom_distrib_gen",
        action="store_true",
        default=False,
        help="Use custom distribution for sampling class conditionings in generator.",
    )

    ### Data augmentation ###
    parser.add_argument(
        "--DiffAugment", type=str, default="", help="DiffAugment policy"
    )
    parser.add_argument(
        "--DA",
        action="store_true",
        default=False,
        help="Diff Augment for GANs (default: %(default)s)",
    )
    parser.add_argument(
        "--hflips",
        action="store_true",
        default=False,
        help="Use horizontal flips in data augmentation." "(default: %(default)s)",
    )

    ### IC-GAN arguments ###
    parser.add_argument(
        "--instance_cond",
        action="store_true",
        default=False,
        help="Use instance features as conditioning",
    )
    parser.add_argument(
        "--feature_augmentation",
        action="store_true",
        default=False,
        help="use hflips in instance conditionings (default: %(default)s)",
    )
    parser.add_argument(
        "--which_knn_balance",
        type=str,
        default="instance_balance",
        choices=["instance_balance", "nnclass_balance"],
        help="Class balancing either done at the instance level or at the class level.",
    )
    parser.add_argument(
        "--G_shared_feat",
        action="store_true",
        default=False,
        help="Use fully connected layer for conditioning instance features in G? (default: %(default)s)",
    )
    parser.add_argument(
        "--shared_dim_feat",
        type=int,
        default=2048,
        help="G"
        "s fully connected layer output dimensionality for instance features"
        "(default: %(default)s)",
    )
    parser.add_argument(
        "--k_nn",
        type=int,
        default=50,
        help="Number of neigbors for each instance" "(default: %(default)s)",
    )
    parser.add_argument(
        "--feature_extractor",
        type=str,
        default="classification",
        choices=["classification", "selfsupervised"],
        help="Choice of feature extractor",
    )
    parser.add_argument(
        "--backbone_feature_extractor",
        type=str,
        default="resnet50",
        choices=["resnet50"],
        help="Choice of feature extractor backbone",
    )

    parser.add_argument(
        "--eval_instance_set",
        type=str,
        default="train",
        help="(Eval) Dataset split from which to draw conditioning instances (default: %(default)s)",
    )
    parser.add_argument(
        "--kmeans_subsampled",
        type=int,
        default=-1,
        help="Number of kmeans centers if using subsampled training instances (default: %(default)s)",
    )
    parser.add_argument(
        "--n_subsampled_data",
        type=float,
        default=-1,
        help="Percent of instances used at test time",
    )

    ### COCO_Stuff evaluation ###
    parser.add_argument(
        "--filter_hd",
        type=int,
        default=-1,
        help="Hamming distance to filter val test in COCO_Stuff (by default no filtering) (default: %(default)s)",
    )

    ### Model stuff ###
    parser.add_argument(
        "--model",
        type=str,
        default="BigGAN",
        help="Name of the model module (default: %(default)s)",
    )
    parser.add_argument(
        "--G_param",
        type=str,
        default="SN",
        help="Parameterization style to use for G, spectral norm (SN) or SVD (SVD)"
        " or None (default: %(default)s)",
    )
    parser.add_argument(
        "--D_param",
        type=str,
        default="SN",
        help="Parameterization style to use for D, spectral norm (SN) or SVD (SVD)"
        " or None (default: %(default)s)",
    )
    parser.add_argument(
        "--G_ch",
        type=int,
        default=64,
        help="Channel multiplier for G (default: %(default)s)",
    )
    parser.add_argument(
        "--D_ch",
        type=int,
        default=64,
        help="Channel multiplier for D (default: %(default)s)",
    )
    parser.add_argument(
        "--G_depth",
        type=int,
        default=1,
        help="Number of resblocks per stage in G? (default: %(default)s)",
    )
    parser.add_argument(
        "--D_depth",
        type=int,
        default=1,
        help="Number of resblocks per stage in D? (default: %(default)s)",
    )
    parser.add_argument(
        "--D_thin",
        action="store_false",
        dest="D_wide",
        default=True,
        help="Use the SN-GAN channel pattern for D? (default: %(default)s)",
    )
    parser.add_argument(
        "--G_shared",
        action="store_true",
        default=True,
        help="Use shared embeddings in G? (default: %(default)s)",
    )
    parser.add_argument(
        "--shared_dim",
        type=int,
        default=0,
        help="G"
        "s shared embedding dimensionality; if 0, will be equal to dim_z. "
        "(default: %(default)s)",
    )
    parser.add_argument(
        "--dim_z", type=int, default=120, help="Noise dimensionality: %(default)s)"
    )
    parser.add_argument(
        "--z_var", type=float, default=1.0, help="Noise variance: %(default)s)"
    )
    parser.add_argument(
        "--hier",
        action="store_true",
        default=False,
        help="Use hierarchical z in G? (default: %(default)s)",
    )
    parser.add_argument(
        "--syncbn",
        action="store_true",
        default=False,
        help="Sync batch norm? (default: %(default)s)",
    )
    parser.add_argument(
        "--cross_replica",
        action="store_true",
        default=False,
        help="Cross_replica batchnorm in G?(default: %(default)s)",
    )
    parser.add_argument(
        "--mybn",
        action="store_true",
        default=False,
        help="Use my batchnorm (which supports standing stats?) %(default)s)",
    )
    parser.add_argument(
        "--G_nl",
        type=str,
        default="relu",
        help="Activation function for G (default: %(default)s)",
    )
    parser.add_argument(
        "--D_nl",
        type=str,
        default="relu",
        help="Activation function for D (default: %(default)s)",
    )
    parser.add_argument(
        "--G_attn",
        type=str,
        default="64",
        help="What resolutions to use attention on for G (underscore separated) "
        "(default: %(default)s)",
    )
    parser.add_argument(
        "--D_attn",
        type=str,
        default="64",
        help="What resolutions to use attention on for D (underscore separated) "
        "(default: %(default)s)",
    )
    parser.add_argument(
        "--norm_style",
        type=str,
        default="bn",
        help="Normalizer style for G, one of bn [batchnorm], in [instancenorm], "
        "ln [layernorm], gn [groupnorm] (default: %(default)s)",
    )

    ### Model init stuff ###
    parser.add_argument(
        "--seed",
        type=int,
        default=0,
        help="Random seed to use; affects both initialization and "
        " dataloading. (default: %(default)s)",
    )
    parser.add_argument(
        "--G_init",
        type=str,
        default="ortho",
        help="Init style to use for G (default: %(default)s)",
    )
    parser.add_argument(
        "--D_init",
        type=str,
        default="ortho",
        help="Init style to use for D(default: %(default)s)",
    )
    parser.add_argument(
        "--skip_init",
        action="store_true",
        default=False,
        help="Skip initialization, ideal for testing when ortho init was used "
        "(default: %(default)s)",
    )

    ### Optimizer stuff ###
    parser.add_argument(
        "--G_lr",
        type=float,
        default=5e-5,
        help="Learning rate to use for Generator (default: %(default)s)",
    )
    parser.add_argument(
        "--D_lr",
        type=float,
        default=2e-4,
        help="Learning rate to use for Discriminator (default: %(default)s)",
    )
    parser.add_argument(
        "--G_B1",
        type=float,
        default=0.0,
        help="Beta1 to use for Generator (default: %(default)s)",
    )
    parser.add_argument(
        "--D_B1",
        type=float,
        default=0.0,
        help="Beta1 to use for Discriminator (default: %(default)s)",
    )
    parser.add_argument(
        "--G_B2",
        type=float,
        default=0.999,
        help="Beta2 to use for Generator (default: %(default)s)",
    )
    parser.add_argument(
        "--D_B2",
        type=float,
        default=0.999,
        help="Beta2 to use for Discriminator (default: %(default)s)",
    )

    ### Batch size, parallel, and precision stuff ###
    parser.add_argument(
        "--batch_size",
        type=int,
        default=64,
        help="Default overall batchsize (default: %(default)s)",
    )
    parser.add_argument(
        "--G_batch_size",
        type=int,
        default=0,
        help="Batch size to use for G; if 0, same as D (default: %(default)s)",
    )
    parser.add_argument(
        "--num_G_accumulations",
        type=int,
        default=1,
        help="Number of passes to accumulate G"
        "s gradients over "
        "(default: %(default)s)",
    )
    parser.add_argument(
        "--num_D_steps",
        type=int,
        default=2,
        help="Number of D steps per G step (default: %(default)s)",
    )
    parser.add_argument(
        "--num_D_accumulations",
        type=int,
        default=1,
        help="Number of passes to accumulate D"
        "s gradients over "
        "(default: %(default)s)",
    )
    parser.add_argument(
        "--split_D",
        action="store_true",
        default=False,
        help="Run D twice rather than concatenating inputs? (default: %(default)s)",
    )
    parser.add_argument(
        "--num_epochs",
        type=int,
        default=100,
        help="Number of epochs to train for (default: %(default)s)",
    )
    parser.add_argument(
        "--parallel",
        action="store_true",
        default=False,
        help="Train with multiple GPUs (default: %(default)s)",
    )
    parser.add_argument(
        "--G_fp16",
        action="store_true",
        default=False,
        help="Train with half-precision in G? (default: %(default)s)",
    )
    parser.add_argument(
        "--D_fp16",
        action="store_true",
        default=False,
        help="Train with half-precision in D? (default: %(default)s)",
    )
    parser.add_argument(
        "--D_mixed_precision",
        action="store_true",
        default=False,
        help="Train with half-precision activations but fp32 params in D? "
        "(default: %(default)s)",
    )
    parser.add_argument(
        "--G_mixed_precision",
        action="store_true",
        default=False,
        help="Train with half-precision activations but fp32 params in G? "
        "(default: %(default)s)",
    )
    parser.add_argument(
        "--accumulate_stats",
        action="store_true",
        default=False,
        help='Accumulate "standing" batchnorm stats? (default: %(default)s)',
    )
    parser.add_argument(
        "--num_standing_accumulations",
        type=int,
        default=16,
        help="Number of forward passes to use in accumulating standing stats? "
        "(default: %(default)s)",
    )

    ### Bookkeping stuff ###
    parser.add_argument(
        "--slurm_logdir",
        help="Where to save the logs from SLURM",
        required=False,
        default="biggan-training-runs",
        metavar="DIR",
    )

    parser.add_argument(
        "--G_eval_mode",
        action="store_true",
        default=False,
        help="Run G in eval mode (running/standing stats?) at sample/test time? "
        "(default: %(default)s)",
    )
    parser.add_argument(
        "--save_every",
        type=int,
        default=2000,
        help="Save every X iterations (default: %(default)s)",
    )
    parser.add_argument(
        "--num_save_copies",
        type=int,
        default=2,
        help="How many copies to save (default: %(default)s)",
    )
    parser.add_argument(
        "--num_best_copies",
        type=int,
        default=2,
        help="How many previous best checkpoints to save (default: %(default)s)",
    )
    parser.add_argument(
        "--which_best",
        type=str,
        default="IS",
        help='Which metric to use to determine when to save new "best"'
        "checkpoints, one of IS or FID (default: %(default)s)",
    )
    parser.add_argument(
        "--no_fid",
        action="store_true",
        default=False,
        help="Calculate IS only, not FID? (default: %(default)s)",
    )
    parser.add_argument(
        "--test_every",
        type=int,
        default=5000,
        help="Test every X iterations (default: %(default)s)",
    )
    parser.add_argument(
        "--num_inception_images",
        type=int,
        default=50000,
        help="Number of samples to compute inception metrics with "
        "(default: %(default)s)",
    )
    parser.add_argument(
        "--hashname",
        action="store_true",
        default=False,
        help="Use a hash of the experiment name instead of the full config "
        "(default: %(default)s)",
    )
    parser.add_argument(
        "--base_root",
        type=str,
        default="",
        help="Default location to store all weights, samples, data, and logs "
        " (default: %(default)s)",
    )
    parser.add_argument(
        "--data_root",
        type=str,
        default="data",
        help="Default location where data is stored (default: %(default)s)",
    )
    parser.add_argument(
        "--weights_root",
        type=str,
        default="weights",
        help="Default location to store weights (default: %(default)s)",
    )
    parser.add_argument(
        "--logs_root",
        type=str,
        default="logs",
        help="Default location to store logs (default: %(default)s)",
    )
    parser.add_argument(
        "--samples_root",
        type=str,
        default="samples",
        help="Default location to store samples (default: %(default)s)",
    )
    parser.add_argument(
        "--pbar",
        type=str,
        default="mine",
        help='Type of progressbar to use; one of "mine" or "tqdm" '
        "(default: %(default)s)",
    )
    parser.add_argument(
        "--name_suffix",
        type=str,
        default="",
        help="Suffix for experiment name for loading weights for sampling "
        '(consider "best0") (default: %(default)s)',
    )
    parser.add_argument(
        "--experiment_name",
        type=str,
        default="",
        help="Optionally override the automatic experiment naming with this arg. "
        "(default: %(default)s)",
    )
    parser.add_argument(
        "--config_from_name",
        action="store_true",
        default=False,
        help="Use a hash of the experiment name instead of the full config "
        "(default: %(default)s)",
    )

    ### EMA Stuff ###
    parser.add_argument(
        "--ema",
        action="store_true",
        default=False,
        help="Keep an ema of G" "s weights? (default: %(default)s)",
    )
    parser.add_argument(
        "--ema_decay",
        type=float,
        default=0.9999,
        help="EMA decay rate (default: %(default)s)",
    )
    parser.add_argument(
        "--use_ema",
        action="store_true",
        default=False,
        help="Use the EMA parameters of G for evaluation? (default: %(default)s)",
    )
    parser.add_argument(
        "--ema_start",
        type=int,
        default=20000,
        help="When to start updating the EMA weights (default: %(default)s)",
    )

    ### Numerical precision and SV stuff ###
    parser.add_argument(
        "--adam_eps",
        type=float,
        default=1e-6,
        help="epsilon value to use for Adam (default: %(default)s)",
    )
    parser.add_argument(
        "--BN_eps",
        type=float,
        default=1e-5,
        help="epsilon value to use for BatchNorm (default: %(default)s)",
    )
    parser.add_argument(
        "--SN_eps",
        type=float,
        default=1e-6,
        help="epsilon value to use for Spectral Norm(default: %(default)s)",
    )
    parser.add_argument(
        "--num_G_SVs",
        type=int,
        default=1,
        help="Number of SVs to track in G (default: %(default)s)",
    )
    parser.add_argument(
        "--num_D_SVs",
        type=int,
        default=1,
        help="Number of SVs to track in D (default: %(default)s)",
    )
    parser.add_argument(
        "--num_G_SV_itrs",
        type=int,
        default=1,
        help="Number of SV itrs in G (default: %(default)s)",
    )
    parser.add_argument(
        "--num_D_SV_itrs",
        type=int,
        default=1,
        help="Number of SV itrs in D (default: %(default)s)",
    )

    parser.add_argument(
        "--class_cond",
        action="store_true",
        default=False,
        help="Use classes as conditioning",
    )
    parser.add_argument(
        "--constant_conditioning",
        action="store_true",
        default=False,
        help="Use a a class-conditioning vector where the input label is always 0?  (default: %(default)s)",
    )

    parser.add_argument(
        "--which_dataset",
        type=str,
        default="imagenet",
        #  choices=['imagenet','coco', 'coco_40k'],
        help="Dataset choice.",
    )

    ### Ortho reg stuff ###
    parser.add_argument(
        "--G_ortho",
        type=float,
        default=0.0,  # 1e-4 is default for BigGAN
        help="Modified ortho reg coefficient in G(default: %(default)s)",
    )
    parser.add_argument(
        "--D_ortho",
        type=float,
        default=0.0,
        help="Modified ortho reg coefficient in D (default: %(default)s)",
    )
    parser.add_argument(
        "--toggle_grads",
        action="store_true",
        default=True,
        help="Toggle D and G"
        's "requires_grad" settings when not training them? '
        " (default: %(default)s)",
    )

    ### Which train functions/setup ###
    parser.add_argument(
        "--partition",
        help="Partition name for SLURM",
        required=False,
        default="learnlab",
    )
    parser.add_argument(
        "--which_train_fn",
        type=str,
        default="GAN",
        help="How2trainyourbois (default: %(default)s)",
    )
    parser.add_argument(
        "--run_setup",
        type=str,
        default="slurm",
        help="If local_debug or slurm (default: %(default)s)",
    )
    parser.add_argument(
        "--ddp_train",
        action="store_true",
        default=False,
        help="If use DDP for training",
    )
    parser.add_argument(
        "--n_nodes",
        type=int,
        default=1,
        help="Number of nodes for ddp (default: %(default)s)",
    )
    parser.add_argument(
        "--n_gpus_per_node",
        type=int,
        default=1,
        help="Number of gpus per node for ddp (default: %(default)s)",
    )
    parser.add_argument(
        "--stop_when_diverge",
        action="store_true",
        default=False,
        help="Stop the experiment if there is signs of divergence. "
        "(default: %(default)s)",
    )
    parser.add_argument(
        "--es_patience", type=int, default=50, help="Epochs for early stopping patience"
    )
    parser.add_argument(
        "--deterministic_run",
        action="store_true",
        default=False,
        help="Set deterministic cudnn and set the seed at each epoch"
        "(default: %(default)s)",
    )

    ### Testing parameters ###
    parser.add_argument(
        "--eval_prdc",
        action="store_true",
        default=False,
        help="(Eval) Evaluate prdc " " (default: %(default)s)",
    )
    parser.add_argument(
        "--eval_reference_set",
        type=str,
        default="train",
        help="(Eval) Reference dataset to use for FID computation (default: %(default)s)",
    )

    ### Resume training stuff
    parser.add_argument(
        "--load_weights",
        type=str,
        default="",
        help="Suffix for which weights to load (e.g. best0, copy0) "
        "(default: %(default)s)",
    )
    parser.add_argument(
        "--resume",
        action="store_true",
        default=False,
        help="Resume training? (default: %(default)s)",
    )

    ### Log stuff ###
    parser.add_argument(
        "--logstyle",
        type=str,
        default="%3.3e",
        help="What style to use when logging training metrics?"
        "One of: %#.#f/ %#.#e (float/exp, text),"
        "pickle (python pickle),"
        "npz (numpy zip),"
        "mat (MATLAB .mat file) (default: %(default)s)",
    )
    parser.add_argument(
        "--log_G_spectra",
        action="store_true",
        default=False,
        help="Log the top 3 singular values in each SN layer in G? "
        "(default: %(default)s)",
    )
    parser.add_argument(
        "--log_D_spectra",
        action="store_true",
        default=False,
        help="Log the top 3 singular values in each SN layer in D? "
        "(default: %(default)s)",
    )
    parser.add_argument(
        "--sv_log_interval",
        type=int,
        default=10,
        help="Iteration interval for logging singular values "
        " (default: %(default)s)",
    )

    return parser