BigGAN_PyTorch/BigGAN.py [266:295]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
            ]

            # If attention on this block, attach it to the end
            if self.arch["attention"][self.arch["resolution"][index]]:
                print(
                    "Adding attention layer in G at resolution %d"
                    % self.arch["resolution"][index]
                )
                self.blocks[-1] += [
                    layers.Attention(self.arch["out_channels"][index], self.which_conv)
                ]

        # Turn self.blocks into a ModuleList so that it's all properly registered.
        self.blocks = nn.ModuleList([nn.ModuleList(block) for block in self.blocks])

        # output layer: batchnorm-relu-conv.
        # Consider using a non-spectral conv here
        self.output_layer = nn.Sequential(
            layers.bn(
                self.arch["out_channels"][-1],
                cross_replica=self.cross_replica,
                mybn=self.mybn,
            ),
            self.activation,
            self.which_conv(self.arch["out_channels"][-1], 3),
        )

        # Initialize weights. Optionally skip init for testing.
        if not skip_init:
            self.init_weights()
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



BigGAN_PyTorch/BigGANdeep.py [288:317]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
            ]

            # If attention on this block, attach it to the end
            if self.arch["attention"][self.arch["resolution"][index]]:
                print(
                    "Adding attention layer in G at resolution %d"
                    % self.arch["resolution"][index]
                )
                self.blocks[-1] += [
                    layers.Attention(self.arch["out_channels"][index], self.which_conv)
                ]

        # Turn self.blocks into a ModuleList so that it's all properly registered.
        self.blocks = nn.ModuleList([nn.ModuleList(block) for block in self.blocks])

        # output layer: batchnorm-relu-conv.
        # Consider using a non-spectral conv here
        self.output_layer = nn.Sequential(
            layers.bn(
                self.arch["out_channels"][-1],
                cross_replica=self.cross_replica,
                mybn=self.mybn,
            ),
            self.activation,
            self.which_conv(self.arch["out_channels"][-1], 3),
        )

        # Initialize weights. Optionally skip init for testing.
        if not skip_init:
            self.init_weights()
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



