in utils/gluon/utils/octconv.py [0:0]
def __init__(self, channels, kernel_size, strides=(1, 1), use_bias=True,
in_channels=0, enable_path=((0, 0), (0, 0)), padding=0,
groups=1, sample_type='nearest', prefix=None, **kwargs):
super(Conv2D, self).__init__(prefix=prefix, **kwargs)
# be compatible to conventional convolution
(h2l, h2h), (l2l, l2h) = enable_path
c_h, c_l = channels if type(channels) is tuple else (channels, 0)
in_c_h, in_c_l = in_channels if type(in_channels) is tuple else (in_channels, -1)
assert (in_c_h + in_c_l) == groups or ((in_c_h < 0 or in_c_h/groups >= 1) \
and (in_c_l < 0 or in_c_l/groups >= 1)), \
"Constains are not satisfied: (%d+%d)==%d, %d/%d>1, %d/%d>1" % ( \
in_c_h, in_c_l, groups, in_c_h, groups, in_c_l, groups )
assert in_c_l != 0 and in_c_h != 0, \
"TODO: current version has to specify the `in_channels' to determine the computation graph"
assert strides == 1 or strides == 2 or all((s <= 2 for s in strides)), \
"TODO: current version only support strides({}) <= 2".format(strides)
is_dw = False
# computational graph will be automatic or manually defined
self.enable_l2l = True if l2l != -1 and (in_c_l >= 0 and c_l > 0) else False
self.enable_l2h = True if l2h != -1 and (in_c_l >= 0 and c_h > 0) else False
self.enable_h2l = True if h2l != -1 and (in_c_h >= 0 and c_l > 0) else False
self.enable_h2h = True if h2h != -1 and (in_c_h >= 0 and c_h > 0) else False
if groups == (in_c_h + in_c_l): # depthwise convolution
assert c_l == in_c_l and c_h == in_c_h
self.enable_l2h, self.enable_h2l = False, False
is_dw = True
use_bias_l2l, use_bias_h2l = (False, use_bias) if self.enable_h2l else (use_bias, False)
use_bias_l2h, use_bias_h2h = (False, use_bias) if self.enable_h2h else (use_bias, False)
# deal with stride with resizing (here, implemented by pooling)
s = (strides, strides) if type(strides) is int else strides
do_stride2 = s[0] > 1 or s[1] > 1
with self.name_scope():
self.conv_l2l = None if not self.enable_l2l else nn.Conv2D(
channels=c_l, kernel_size=kernel_size, strides=1,
padding=padding, groups=groups if not is_dw else in_c_l,
use_bias=use_bias_l2l, in_channels=in_c_l,
prefix='-l2l_', **kwargs)
self.conv_l2h = None if not self.enable_l2h else nn.Conv2D(
channels=c_h, kernel_size=kernel_size, strides=1,
padding=padding, groups=groups,
use_bias=use_bias_l2h, in_channels=in_c_l,
prefix='-l2h_', **kwargs)
self.conv_h2l = None if not self.enable_h2l else nn.Conv2D(
channels=c_l, kernel_size=kernel_size, strides=1,
padding=padding, groups=groups,
use_bias=use_bias_h2l, in_channels=in_c_h,
prefix='-h2l_', **kwargs)
self.conv_h2h = None if not self.enable_h2h else nn.Conv2D(
channels=c_h, kernel_size=kernel_size, strides=1,
padding=padding, groups=groups if not is_dw else in_c_h,
use_bias=use_bias_h2h, in_channels=in_c_h,
prefix='-h2h_', **kwargs)
self.l2l_down = (lambda x: (x)) if not self.enable_l2l or not do_stride2 else \
nn.AvgPool2D(pool_size=strides, strides=strides, \
ceil_mode=True, count_include_pad=False)
self.l2h_up = (lambda x: (x)) if not self.enable_l2h or do_stride2 else \
_upsampling(scales=(2, 2), sample_type=sample_type)
self.h2h_down = (lambda x: (x)) if not self.enable_h2h or not do_stride2 else \
nn.AvgPool2D(pool_size=strides, strides=strides, \
ceil_mode=True, count_include_pad=False)
self.h2l_down = (lambda x: (x)) if not self.enable_h2l else \
nn.AvgPool2D(pool_size=(2*s[0], 2*s[1]), \
strides=(2*s[0], 2*s[1]), \
ceil_mode=True, count_include_pad=False)