def __init__()

in evaluation/tiny_benchmark/maskrcnn_benchmark/modeling/rpn/gaussian_net/gaussian_net.py [0:0]


    def __init__(self, cfg, in_channels):
        super(GauHead, self).__init__()
        num_classes = cfg.MODEL.GAU.NUM_CLASSES - 1

        # cls_tower
        cls_tower = []
        bbox_tower = []
        for i in range(cfg.MODEL.GAU.NUM_CONVS):
            cls_tower.append(
                nn.Conv2d(
                    in_channels,
                    in_channels,
                    kernel_size=3,
                    stride=1,
                    padding=1
                )
            )
            cls_tower.append(nn.GroupNorm(32, in_channels))
            cls_tower.append(nn.ReLU())
            bbox_tower.append(
                nn.Conv2d(
                    in_channels,
                    in_channels,
                    kernel_size=3,
                    stride=1,
                    padding=1
                )
            )
            bbox_tower.append(nn.GroupNorm(32, in_channels))
            bbox_tower.append(nn.ReLU())

        self.add_module('cls_tower', nn.Sequential(*cls_tower))
        self.add_module('bbox_tower', nn.Sequential(*bbox_tower))

        # cls_logits
        self.cls_logits = nn.Conv2d(
            in_channels, num_classes, kernel_size=3, stride=1, padding=1
        )
        self.use_more_logits = True
        if self.use_more_logits:
            self.gau_logits = nn.Conv2d(
                in_channels, num_classes, kernel_size=3, stride=1, padding=1
            )
            # self.dif_logits = nn.Conv2d(
            #     in_channels, num_classes, kernel_size=3, stride=1, padding=1
            # )

        # upsample towers
        # up_towers = []
        # for i, stride in enumerate(cfg.MODEL.GAU.FPN_STRIDES):
        #     # up_tower = []
        #     # for _ in range(int(math.log2(stride)) - 2):
        #     #     up_tower.append(nn.UpsamplingBilinear2d(scale_factor=2))
        #     #     up_tower.append(
        #     #         nn.Conv2d(
        #     #             num_classes,
        #     #             num_classes,
        #     #             3, 1, 1, groups=num_classes
        #     #         )
        #     #     )
        #     #     up_tower.append(nn.ReLU())
        #     #     # up_tower.append(
        #     #     #     nn.ConvTranspose2d(
        #     #     #         num_classes,
        #     #     #         num_classes,
        #     #     #         kernel_size=3,
        #     #     #         stride=2,
        #     #     #         padding=1,
        #     #     #         output_padding=1,
        #     #     #         groups=num_classes
        #     #     #     )
        #     #     # )
        #     #
        #     #     up_tower.append(
        #     #         nn.Conv2d(
        #     #             num_classes,
        #     #             num_classes,
        #     #             3, 1, 1, groups=num_classes
        #     #         )
        #     #     )
        #     up_tower = [nn.UpsamplingBilinear2d(scale_factor=2**(int(math.log2(stride))-2)),
        #                 nn.Conv2d(num_classes, num_classes, 3, 1, 1, groups=num_classes)]
        #     up_towers.append(nn.Sequential(*up_tower))
        # self.up_towers = nn.ModuleList(up_towers)

        # init_modules = [self.cls_tower, self.cls_logits, self.gau_logits] if self.use_more_logits\
        init_modules = [self.cls_tower, self.cls_logits, self.bbox_tower, self.gau_logits] if self.use_more_logits\
            else [self.cls_tower, self.cls_logits]
        for modules in init_modules:  # , self.up_towers]:
            for l in modules.modules():
                if isinstance(l, nn.Conv2d):
                    torch.nn.init.normal_(l.weight, std=0.01)
                    torch.nn.init.constant_(l.bias, 0)
                elif isinstance(l, nn.ConvTranspose2d):
                    torch.nn.init.constant_(l.weight, 1.0/9)
                    torch.nn.init.constant_(l.bias, 0)

        prior_prob = cfg.MODEL.GAU.PRIOR_PROB
        bias_value = -math.log((1 - prior_prob) / prior_prob)
        # for up_tower in self.up_towers:
        #     torch.nn.init.constant_(up_tower[-1].weight, 0)
        #     torch.nn.init.constant_(up_tower[-1].bias, bias_value)
        torch.nn.init.constant_(self.cls_logits.bias, bias_value)
        torch.nn.init.constant_(self.gau_logits.bias, bias_value)