def add_train_parameters()

in options/train_options.py [0:0]


    def add_train_parameters(self):
        training = self.parser.add_argument_group("training")
        training.add_argument("--num_workers", type=int, default=0)
        training.add_argument("--start-epoch", type=int, default=0)
        training.add_argument("--num-accumulations", type=int, default=1)
        training.add_argument("--lr", type=float, default=1e-3)
        training.add_argument("--lr_d", type=float, default=1e-3 * 2)
        training.add_argument("--lr_g", type=float, default=1e-3 / 2)
        training.add_argument("--momentum", type=float, default=0.9)
        training.add_argument("--beta1", type=float, default=0)
        training.add_argument("--beta2", type=float, default=0.9)
        training.add_argument("--seed", type=int, default=0)
        training.add_argument("--init", type=str, default="")

        training.add_argument(
            "--use_multi_hypothesis", action="store_true", default=False
        )
        training.add_argument("--num_hypothesis", type=int, default=1)
        training.add_argument("--z_dim", type=int, default=128)
        training.add_argument(
            "--netD", type=str, default="multiscale", help="(multiscale)"
        )
        training.add_argument(
            "--niter",
            type=int,
            default=100,
            help="# of iter at starting learning rate. This is NOT the total #epochs."
            + " Total #epochs is niter + niter_decay",
        )
        training.add_argument(
            "--niter_decay",
            type=int,
            default=10,
            help="# of iter at starting learning rate. This is NOT the total #epochs."
            + " Totla #epochs is niter + niter_decay",
        )

        training.add_argument(
            "--losses", type=str, nargs="+", default=['1.0_l1','10.0_content']
        )
        training.add_argument(
            "--discriminator_losses",
            type=str,
            default="pix2pixHD",
            help="(|pix2pixHD|progressive)",
        )
        training.add_argument(
            "--lambda_feat",
            type=float,
            default=10.0,
            help="weight for feature matching loss",
        )
        training.add_argument(
            "--gan_mode", type=str, default="hinge", help="(ls|original|hinge)"
        )

        training.add_argument(
            "--load-old-model", action="store_true", default=False
        )
        training.add_argument(
            "--load-old-depth-model", action="store_true", default=False
        )
        training.add_argument("--old_model", type=str, default="")
        training.add_argument("--old_depth_model", type=str, default="")

        training.add_argument(
            "--no_ganFeat_loss",
            action="store_true",
            help="if specified, do *not* use discriminator feature matching loss",
        )
        training.add_argument(
            "--no_vgg_loss",
            action="store_true",
            help="if specified, do *not* use VGG feature matching loss",
        )
        training.add_argument("--resume", action="store_true", default=False)

        training.add_argument(
            "--log-dir",
            type=str,
            default="/checkpoint/ow045820/logging/viewsynthesis3d/%s/",
        )

        training.add_argument("--batch-size", type=int, default=16)
        training.add_argument("--continue_epoch", type=int, default=0)
        training.add_argument("--max_epoch", type=int, default=500)
        training.add_argument("--folder_to_save", type=str, default="outpaint")
        training.add_argument(
            "--model-epoch-path",
            type=str,
            default="/%s/%s/models/lr%0.5f_bs%d_model%s_spl%s/noise%s_bn%s_ref%s_d%s_"
            + "camxys%s/_init%s_data%s_seed%d/_multi%s_losses%s_i%s_%s_vol_gan%s/",
        )
        training.add_argument(
            "--run-dir",
            type=str,
            default="/%s/%s/runs/lr%0.5f_bs%d_model%s_spl%s/noise%s_bn%s_ref%s_d%s_"
            + "camxys%s/_init%s_data%s_seed%d/_multi%s_losses%s_i%s_%s_vol_gan%s/",
        )
        training.add_argument("--suffix", type=str, default="")
        training.add_argument(
            "--render_ids", type=int, nargs="+", default=[0, 1]
        )
        training.add_argument("--gpu_ids", type=str, default="0")