def add_model_parameters()

in options/train_options.py [0:0]


    def add_model_parameters(self):
        model_params = self.parser.add_argument_group("model")
        model_params.add_argument(
            "--model_type",
            type=str,
            default="zbuffer_pts",
            choices=(
                "zbuffer_pts",
                "deepvoxels",
                "viewappearance",
                "tatarchenko",
            ),
            help='Model to be used.'
        )
        model_params.add_argument(
            "--refine_model_type", type=str, default="unet",
            help="Model to be used for the refinement network and the feature encoder."
        )
        model_params.add_argument(
            "--accumulation",
            type=str,
            default="wsum",
            choices=("wsum", "wsumnorm", "alphacomposite"),
            help="Method for accumulating points in the z-buffer. Three choices: wsum (weighted sum), wsumnorm (normalised weighted sum), alpha composite (alpha compositing)"
        )

        model_params.add_argument(
            "--depth_predictor_type",
            type=str,
            default="unet",
            choices=("unet", "hourglass", "true_hourglass"),
            help='Model for predicting depth'
        )
        model_params.add_argument(
            "--splatter",
            type=str,
            default="xyblending",
            choices=("xyblending"),
        )
        model_params.add_argument("--rad_pow", type=int, default=2,
            help='Exponent to raise the radius to when computing distance (default is euclidean, when rad_pow=2). ')
        model_params.add_argument("--num_views", type=int, default=2,
            help='Number of views considered per input image (inlcluding input), we only use num_views=2 (1 target view).')
        model_params.add_argument(
            "--crop_size",
            type=int,
            default=256,
            help="Crop to the width of crop_size (after initially scaling the images to load_size.)",
        )
        model_params.add_argument(
            "--aspect_ratio",
            type=float,
            default=1.0,
            help="The ratio width/height. The final height of the load image will be crop_size/aspect_ratio",
        )
        model_params.add_argument(
            "--norm_D",
            type=str,
            default="spectralinstance",
            help="instance normalization or batch normalization",
        )
        model_params.add_argument(
            "--noise", type=str, default="", choices=("style", "")
        )
        model_params.add_argument(
            "--learn_default_feature", action="store_true", default=True
        )
        model_params.add_argument(
            "--use_camera", action="store_true", default=False
        )

        model_params.add_argument("--pp_pixel", type=int, default=128,
            help='K: the number of points to conisder in the z-buffer.'
        )
        model_params.add_argument("--tau", type=float, default=1.0,
            help='gamma: the power to raise the distance to.'
        )
        model_params.add_argument(
            "--use_gt_depth", action="store_true", default=False
        )
        model_params.add_argument(
            "--train_depth", action="store_true", default=False
        )
        model_params.add_argument(
            "--only_high_res", action="store_true", default=False
        )
        model_params.add_argument(
            "--use_inverse_depth", action="store_true", default=False,
            help='If true the depth is sampled as a long tail distribution, else the depth is sampled uniformly. Set to true if the dataset has points that are very far away (e.g. a dataset with landscape images, such as KITTI).'
        )
        model_params.add_argument(
            "--ndf",
            type=int,
            default=64,
            help="# of discrim filters in first conv layer",
        )
        model_params.add_argument(
            "--use_xys", action="store_true", default=False
        )
        model_params.add_argument(
            "--output_nc",
            type=int,
            default=3,
            help="# of output image channels",
        )
        model_params.add_argument("--norm_G", type=str, default="batch")
        model_params.add_argument(
            "--ngf",
            type=int,
            default=64,
            help="# of gen filters in first conv layer",
        )
        model_params.add_argument(
            "--radius",
            type=float,
            default=4,
            help="Radius of points to project",
        )
        model_params.add_argument(
            "--voxel_size", type=int, default=64, help="Size of latent voxels"
        )
        model_params.add_argument(
            "--num_upsampling_layers",
            choices=("normal", "more", "most"),
            default="normal",
            help="If 'more', adds upsampling layer between the two middle resnet blocks. "
            + "If 'most', also add one more upsampling + resnet layer at the end of the generator",
        )