BigGAN_PyTorch/BigGAN.py [470:512]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
        self.resolution = resolution
        # Kernel size
        self.kernel_size = D_kernel_size
        # Attention?
        self.attention = D_attn
        # Number of classes
        self.n_classes = n_classes
        # Activation
        self.activation = D_activation
        # Initialization style
        self.init = D_init
        # Parameterization style
        self.D_param = D_param
        # Epsilon for Spectral Norm?
        self.SN_eps = SN_eps
        # Fp16?
        self.fp16 = D_fp16
        # Architecture
        self.arch = D_arch(self.ch, self.attention)[resolution]

        # Which convs, batchnorms, and linear layers to use
        # No option to turn off SN in D right now
        if self.D_param == "SN":
            self.which_conv = functools.partial(
                layers.SNConv2d,
                kernel_size=3,
                padding=1,
                num_svs=num_D_SVs,
                num_itrs=num_D_SV_itrs,
                eps=self.SN_eps,
            )
            self.which_linear = functools.partial(
                layers.SNLinear,
                num_svs=num_D_SVs,
                num_itrs=num_D_SV_itrs,
                eps=self.SN_eps,
            )
            self.which_embedding = functools.partial(
                layers.SNEmbedding,
                num_svs=num_D_SVs,
                num_itrs=num_D_SV_itrs,
                eps=self.SN_eps,
            )
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



BigGAN_PyTorch/BigGANdeep.py [534:576]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
        self.resolution = resolution
        # Kernel size
        self.kernel_size = D_kernel_size
        # Attention?
        self.attention = D_attn
        # Number of classes
        self.n_classes = n_classes
        # Activation
        self.activation = D_activation
        # Initialization style
        self.init = D_init
        # Parameterization style
        self.D_param = D_param
        # Epsilon for Spectral Norm?
        self.SN_eps = SN_eps
        # Fp16?
        self.fp16 = D_fp16
        # Architecture
        self.arch = D_arch(self.ch, self.attention)[resolution]

        # Which convs, batchnorms, and linear layers to use
        # No option to turn off SN in D right now
        if self.D_param == "SN":
            self.which_conv = functools.partial(
                layers.SNConv2d,
                kernel_size=3,
                padding=1,
                num_svs=num_D_SVs,
                num_itrs=num_D_SV_itrs,
                eps=self.SN_eps,
            )
            self.which_linear = functools.partial(
                layers.SNLinear,
                num_svs=num_D_SVs,
                num_itrs=num_D_SV_itrs,
                eps=self.SN_eps,
            )
            self.which_embedding = functools.partial(
                layers.SNEmbedding,
                num_svs=num_D_SVs,
                num_itrs=num_D_SV_itrs,
                eps=self.SN_eps,
            )
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



