in utils/gluon/utils/mobilenetv2.py [0:0]
def __init__(self, multiplier=1.0, classes=1000, ratio=0.,
norm_kwargs=None, final_drop=0., last_gamma=False,
name_prefix=None, **kwargs):
super(_MobileNetV2, self).__init__(prefix=name_prefix)
# reference:
# - Howard, Andrew G., et al.
# "Mobilenets: Efficient convolutional neural networks for mobile vision applications."
# arXiv preprint arXiv:1704.04861 (2017).
in_channels = [int(multiplier * x) for x in
[32] + [16] + [24] * 2 + [32] * 3 + [64] * 4 + [96] * 3 + [160] * 3]
mid_channels = [int(t * x) for t, x in zip([1] + [6] * 16, in_channels)]
out_channels = [int(multiplier * x) for t, x in zip([1] + [6] * 16,
[16] + [24] * 2 + [32] * 3 + [64] * 4 + [96] * 3 + [160] * 3 + [320])]
strides = [1, 2] * 2 + [1, 1, 2] + [1] * 6 + [2] + [1] * 3
in_ratios = [0.] + [ratio] * 13 + [0.] * 3
ratios = [ratio] * 13 + [0.] * 4
last_channels = int(1280 * multiplier) if multiplier > 1.0 else 1280
with self.name_scope():
self.conv1 = gluon.nn.HybridSequential()
self.conv1.add(gluon.nn.Conv2D(channels=int(32 * multiplier),
kernel_size=3, padding=1, strides=2, use_bias=False,
prefix='conv1_'))
self.conv1.add(gluon.nn.BatchNorm(prefix='bn1_',
**({} if norm_kwargs is None else norm_kwargs)))
self.conv1.add(RELU6())
# ------------------------------------------------------------------
stage_index, i = 1, 0
for k, (in_c, mid_c, out_c, s, ir, r) in enumerate(zip(in_channels, mid_channels, out_channels, strides, in_ratios, ratios)):
stage_index += 1 if s > 1 else 0
i = 0 if s > 1 else (i + 1)
name = 'L%d_B%d' % (stage_index, i)
# -------------------------------------
in_c = (in_c, -1)
mid_c = self._get_channles(mid_c, r)
out_c = (out_c, -1)
# -------------------------------------
setattr(self, name, _BottleneckV1(in_c, mid_c, out_c,
strides=s,
norm_kwargs=None,
last_gamma=last_gamma,
name_prefix="%s_" % name))
# ------------------------------------------------------------------
self.tail = gluon.nn.HybridSequential()
self.tail.add(gluon.nn.Conv2D(channels=last_channels, in_channels=out_channels[-1],
kernel_size=1, use_bias=False, prefix='tail-conv_'))
self.tail.add(gluon.nn.BatchNorm(prefix='tail-bn_',
**({} if norm_kwargs is None else norm_kwargs)))
self.tail.add(RELU6())
# ------------------------------------------------------------------
self.avgpool = gluon.nn.GlobalAvgPool2D()
self.drop = gluon.nn.Dropout(final_drop) if final_drop > 0. else lambda x: (x)
self.classifer = gluon.nn.Conv2D(in_channels=last_channels, channels=classes,
kernel_size=1, prefix='classifier_')
self.flat = gluon.nn.Flatten()