def __init__()

in utils/gluon/utils/resnetv2.py [0:0]


    def __init__(self, block, layers, groups=1, multiplier=1.,
                 ratio=(0., 0., 0., 0.),
                 num_out=(256, 512, 1024, 2048),
                 num_mid=( 64, 128,  256,  512),
                 classes=1000, use_se=False, down_pos=0,
                 norm_kwargs=None, last_gamma=False, deep_stem=False,
                 final_drop=0., use_global_stats=False,
                 name_prefix='', **kwargs):
        super(_ResNetV2, self).__init__(prefix=name_prefix)
        assert last_gamma == False, "last_gamma should be False for ResNetV2"
        norm_kwargs = norm_kwargs if norm_kwargs is not None else {}
        if use_global_stats:
            norm_kwargs['use_global_stats'] = True
        # initialize residual networks
        k = multiplier
        self.use_se = use_se
        self.groups = groups
        self.down_pos=down_pos
        self.norm_kwargs = norm_kwargs

        with self.name_scope():
            self.conv1 = gluon.nn.HybridSequential()
            if not deep_stem:
                self.conv1.add(gluon.nn.Conv2D(channels=int(k*64), kernel_size=7, padding=3, strides=2,
                                         use_bias=False, prefix='conv1_'))
                self.conv1.add(gluon.nn.BatchNorm(prefix='bn1_',
                                         **({} if norm_kwargs is None else norm_kwargs)))
                self.conv1.add(gluon.nn.Activation('relu'))
            else:
                self.conv1.add(gluon.nn.Conv2D(channels=int(k*32), kernel_size=3, padding=1, strides=2,
                                         use_bias=False, prefix='stem_conv1_'))
                self.conv1.add(gluon.nn.BatchNorm(prefix='stem_bn1_',
                                         **({} if norm_kwargs is None else norm_kwargs)))
                self.conv1.add(gluon.nn.Activation('relu'))
                self.conv1.add(gluon.nn.Conv2D(channels=int(k*32), kernel_size=3, padding=1, strides=1,
                                         use_bias=False, prefix='stem_conv2_'))
                self.conv1.add(gluon.nn.BatchNorm(prefix='stem_bn2_',
                                         **({} if norm_kwargs is None else norm_kwargs)))
                self.conv1.add(gluon.nn.Activation('relu'))
                self.conv1.add(gluon.nn.Conv2D(channels=int(k*64), kernel_size=3, padding=1, strides=1,
                                         use_bias=False, prefix='stem_conv3_'))
                self.conv1.add(gluon.nn.BatchNorm(prefix='stem_bn3_',
                                         **({} if norm_kwargs is None else norm_kwargs)))
                self.conv1.add(gluon.nn.Activation('relu'))
            # ------------------------------------------------------------------
            self.maxpool = gluon.nn.MaxPool2D(pool_size=3, strides=2, padding=1)
            # ------------------------------------------------------------------
            # customized convolution starts from this line
            self.inplanes = (int(k*64), -1) # convert to proposed data format
            self._make_layer(1, block, layers[0], int(k*num_out[0]), num_mid[0], ratio[0])
            self._make_layer(2, block, layers[1], int(k*num_out[1]), num_mid[1], ratio[1], strides=2)
            self._make_layer(3, block, layers[2], int(k*num_out[2]), num_mid[2], ratio[2], strides=2)
            self._make_layer(4, block, layers[3], int(k*num_out[3]), num_mid[3], ratio[3], strides=2)
            # ------------------------------------------------------------------
            self.tail = gluon.nn.HybridSequential()
            self.tail.add(gluon.nn.BatchNorm(prefix='tail-bn_',
                                    **({} if norm_kwargs is None else norm_kwargs)))
            self.tail.add(gluon.nn.Activation('relu'))
            # ------------------------------------------------------------------
            self.avgpool = gluon.nn.GlobalAvgPool2D()
            self.drop = gluon.nn.Dropout(final_drop) if final_drop > 0. else lambda x: (x)
            self.classifer = gluon.nn.Conv2D(in_channels=int(k*num_out[3]), channels=classes,
                                       kernel_size=1, prefix='classifier_')
            self.flat = gluon.nn.Flatten()