def __init__()

in siammot/modelling/backbone/dla.py [0:0]


    def __init__(self, levels, channels, num_classes=1000, in_chans=3, cardinality=1, base_width=64,
                 block=DlaBottle2neck, residual_root=False, linear_root=False, batch_norm=FrozenBatchNorm2d,
                 drop_rate=0.0, global_pool='avg', feature_only=True, dcn_config=(False,)):
        super(DLA, self).__init__()
        self.channels = channels
        self.num_classes = num_classes
        self.cardinality = cardinality
        self.base_width = base_width
        self.drop_rate = drop_rate

        # check whether deformable conv config is right
        if len(dcn_config) != 6:
            raise ValueError("Deformable configuration is not correct, "
                             "every level should specifcy a configuration.")

        self.base_layer = nn.Sequential(
            Conv2d(in_chans, channels[0], kernel_size=7, stride=1, padding=3, bias=False),
            batch_norm(channels[0]),
            nn.ReLU(inplace=True))
        self.level0 = self._make_conv_level(channels[0], channels[0], levels[0], batch_norm=batch_norm)
        self.level1 = self._make_conv_level(channels[0], channels[1], levels[1], stride=2, batch_norm=batch_norm)
        cargs = dict(cardinality=cardinality, base_width=base_width, root_residual=residual_root, batch_norm=batch_norm)
        self.level2 = DlaTree(levels[2], block, channels[1], channels[2], 2, level_root=False,
                              with_dcn=dcn_config[2], **cargs)
        self.level3 = DlaTree(levels[3], block, channels[2], channels[3], 2, level_root=True,
                              with_dcn=dcn_config[3], **cargs)
        self.level4 = DlaTree(levels[4], block, channels[3], channels[4], 2, level_root=True,
                              with_dcn=dcn_config[4], **cargs)
        self.level5 = DlaTree(levels[5], block, channels[4], channels[5], 2, level_root=True,
                              with_dcn=dcn_config[5], **cargs)

        if not feature_only:
            self.num_features = channels[-1]
            self.global_pool = SelectAdaptivePool2d(pool_type=global_pool)
            self.fc = nn.Conv2d(self.num_features * self.global_pool.feat_mult(), num_classes, 1, bias=True)