in python/singa/layer.py [0:0]
def __init__(self,
nb_kernels,
kernel_size,
*args,
stride=1,
padding=0,
dilation=1,
group=1,
bias=True,
pad_mode="NOTSET",
activation="NOTSET",
**kwargs):
"""
Args:
nb_kernels (int): the channel of output, also is the number of filters
kernel_size (int or tuple): kernel size for two direction of each
axis. For example, (2, 3), the first 2 means will add 2 at the
beginning and also 2 at the end for its axis.and if a int is
accepted, the kernel size will be initiated as (int, int)
stride (int or tuple): stride, the logic is the same as kernel size.
padding (int): tuple, list or None, padding, the logic is the same
as kernel size. However, if you set pad_mode as "SAME_UPPER" or
"SAME_LOWER" mode, you can set padding as None, and the padding
will be computed automatically.
dilation (int): only support 1
group (int): group
bias (bool): bias
pad_mode (string): can be NOTSET, SAME_UPPER, or SAME_LOWER, where
default value is NOTSET, which means explicit padding is used.
SAME_UPPER or SAME_LOWER mean pad the input so that the output
spatial size match the input. In case of odd number add the extra
padding at the end for SAME_UPPER and at the beginning for SAME_LOWER.
activation (string): can be NOTSET, RELU, where default value is NOTSET,
which means there is no activation behind the conv2d layer.
RELU means there is a ReLU behind current conv2d layer.
"""
super(Conv2d, self).__init__()
# the old code create the layer like: Conv2d(8, 16, 3), or Conv2d(8, 16, 3, stride=1)
# the following code block is for backward compatibility
if len(args) > 0:
nb_kernels = kernel_size
kernel_size = args[0]
if len(args) > 1:
stride = args[1]
if len(args) > 2:
padding = args[2]
self.nb_kernels = nb_kernels
self.kernel_size = kernel_size
self.stride = stride
self.padding = padding
self.dilation = dilation
self.group = group
self.bias = bias
self.pad_mode = pad_mode
self.activation = activation
if isinstance(kernel_size, int):
self.kernel_size = (kernel_size, kernel_size)
elif isinstance(kernel_size, tuple):
self.kernel_size = kernel_size
else:
raise TypeError("Wrong kernel_size type.")
if isinstance(stride, int):
self.stride = (stride, stride)
elif isinstance(stride, tuple):
self.stride = stride
else:
raise TypeError("Wrong stride type.")
self.odd_padding = (0, 0, 0, 0)
if isinstance(padding, int):
self.padding = (padding, padding)
elif isinstance(padding, tuple) or isinstance(padding, list):
if len(padding) == 2:
self.padding = padding
elif len(padding) == 4:
_h_mask = padding[0] - padding[1]
_w_mask = padding[2] - padding[3]
# the odd paddding is the value that cannot be handled by the tuple padding (w, h) mode
# so we need to firstly handle the input, then use the nomal padding method.
self.odd_padding = (max(_h_mask, 0), max(-_h_mask, 0),
max(_w_mask, 0), max(-_w_mask, 0))
self.padding = (
padding[0] - self.odd_padding[0],
padding[2] - self.odd_padding[2],
)
else:
raise TypeError("Wrong padding value.")
if dilation != 1 and list(dilation) != [1, 1]:
raise ValueError("Not implemented yet")
self.inner_params = {
"cudnn_prefer": "fastest",
"workspace_MB_limit": 1024,
}
# TODO valid value of inner_params check
for kwarg in kwargs:
if kwarg not in self.inner_params:
raise TypeError("Keyword argument not understood:", kwarg)
else:
self.inner_params[kwarg] = kwargs[kwarg]