def __init__()

in MaskRCNN/pytorch/maskrcnn_benchmark/modeling/roi_heads/box_head/roi_box_feature_extractors.py [0:0]


    def __init__(self, cfg):
        super(FPNXconv1fcFeatureExtractor, self).__init__()

        resolution = cfg.MODEL.ROI_BOX_HEAD.POOLER_RESOLUTION
        scales = cfg.MODEL.ROI_BOX_HEAD.POOLER_SCALES
        sampling_ratio = cfg.MODEL.ROI_BOX_HEAD.POOLER_SAMPLING_RATIO
        pooler = Pooler(
            output_size=(resolution, resolution),
            scales=scales,
            sampling_ratio=sampling_ratio,
        )
        self.pooler = pooler
        
        use_gn = cfg.MODEL.ROI_BOX_HEAD.USE_GN
        use_gw = cfg.MODEL.ROI_BOX_HEAD.USE_GW

        in_channels = cfg.MODEL.BACKBONE.OUT_CHANNELS
        conv_head_dim = cfg.MODEL.ROI_BOX_HEAD.CONV_HEAD_DIM
        num_stacked_convs = cfg.MODEL.ROI_BOX_HEAD.NUM_STACKED_CONVS
        dilation = cfg.MODEL.ROI_BOX_HEAD.DILATION


        if cfg.MODEL.DECONV.LAYERWISE_NORM:
            norm_type=cfg.MODEL.DECONV.BOX_NORM_TYPE
        else:
            norm_type='none'
            if cfg.MODEL.DECONV.BOX_NORM_TYPE=='layernorm':
                self.box_norm=LayerNorm(eps=cfg.MODEL.DECONV.EPS)


        xconvs = []
        for ix in range(num_stacked_convs):
            if cfg.MODEL.ROI_BOX_HEAD.USE_DECONV:
                xconvs.append(
                    Deconv(
                        in_channels,
                        conv_head_dim,
                        kernel_size=3,
                        stride=1,
                        padding=dilation,
                        dilation=dilation,
                        bias= True,
                        block=cfg.MODEL.DECONV.BLOCK,
                        sampling_stride=cfg.MODEL.DECONV.STRIDE,
                        sync=cfg.MODEL.DECONV.SYNC,norm_type=norm_type
                    )
                )
                in_channels = conv_head_dim
            else:
                xconvs.append(
                    nn.Conv2d(
                        in_channels,
                        conv_head_dim,
                        kernel_size=3,
                        stride=1,
                        padding=dilation,
                        dilation=dilation,
                        bias=False if (use_gn or use_gw) else True
                    )
                )
                in_channels = conv_head_dim
                if use_gn or use_gw:
                    xconvs.append(group_norm(in_channels))

            xconvs.append(nn.ReLU(inplace=True))

        self.add_module("xconvs", nn.Sequential(*xconvs))
        for modules in [self.xconvs,]:
            for l in modules.modules():
                if isinstance(l, nn.Conv2d) or isinstance(l,Deconv):
                    torch.nn.init.normal_(l.weight, std=0.01)
                    if not (use_gn or use_gw):
                        torch.nn.init.constant_(l.bias, 0)

        input_size = conv_head_dim * resolution ** 2
        representation_size = cfg.MODEL.ROI_BOX_HEAD.MLP_HEAD_DIM

        block=0
        use_delinear=cfg.MODEL.ROI_BOX_HEAD.USE_DECONV
        if use_delinear:
            block=cfg.MODEL.DECONV.BLOCK_FC#check here
        
        self.fc6 = make_fc(input_size, representation_size, use_gn=False,use_gw=False,use_delinear=use_delinear,block=block,sync=cfg.MODEL.DECONV.SYNC,norm_type=norm_type)