def __init__()

in cvnets/layers/conv_layer.py [0:0]


    def __init__(self, opts, in_channels: int, out_channels: int, kernel_size: int or tuple,
                 stride: Optional[int or tuple] = 1,
                 dilation: Optional[int] = 1, groups: Optional[int] = 1,
                 bias: Optional[bool] = False, padding_mode: Optional[str] = 'zeros',
                 use_norm: Optional[bool] = True, use_act: Optional[bool] = True,
                 padding: Optional[int or tuple] = (0, 0),
                 auto_padding: Optional[bool] = True):
        """
        Applies a 2D Transpose Convolution over an input signal composed of several input planes.
        :param opts: over an input signal composed of several input planes.
        :param in_channels: number of input channels
        :param out_channels: number of output channels
        :param kernel_size: kernel size
        :param stride: move the kernel by this amount during convolution operation
        :param dilation: Add zeros between kernel elements to increase the effective receptive field of the kernel.
        :param groups: Number of groups. If groups=in_channels=out_channels, then it is a depth-wise convolution
        :param bias: Add bias or not
        :param padding_mode: Padding mode. Default is zeros
        :param use_norm: Use normalization layer after convolution layer or not. Default is True.
        :param use_act: Use activation layer after convolution layer/convolution layer followed by batch normalization
                        or not. Default is True.
        :param padding: Padding
        :param auto_padding: Compute padding automatically
        """
        super(TransposeConvLayer, self).__init__()

        if use_norm:
            assert not bias, 'Do not use bias when using normalization layers.'

        if isinstance(kernel_size, int):
            kernel_size = (kernel_size, kernel_size)

        if isinstance(stride, int):
            stride = (stride, stride)

        if isinstance(dilation, (tuple, list)):
            dilation = dilation[0]

        assert isinstance(kernel_size, (tuple, list))
        assert isinstance(stride, (tuple, list))
        assert isinstance(dilation, int)

        if auto_padding:
            padding = (int((kernel_size[0] - 1)) * dilation, int((kernel_size[1] - 1)) * dilation)

        if in_channels % groups != 0:
            logger.error('Input channels are not divisible by groups. {}%{} != 0 '.format(in_channels, groups))
        if out_channels % groups != 0:
            logger.error('Output channels are not divisible by groups. {}%{} != 0 '.format(out_channels, groups))

        block = nn.Sequential()
        conv_layer = nn.ConvTranspose2d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size,
                                        stride=stride, padding=padding, dilation=dilation, groups=groups, bias=bias,
                                        padding_mode=padding_mode)

        block.add_module(name="conv", module=conv_layer)

        self.norm_name = None
        if use_norm:
            norm_layer = get_normalization_layer(opts=opts, num_features=out_channels)
            block.add_module(name="norm", module=norm_layer)
            self.norm_name = norm_layer.__class__.__name__

        self.act_name = None
        act_type = getattr(opts, "model.activation.name", "relu")

        if act_type is not None and use_act:
            neg_slope = getattr(opts, "model.activation.neg_slope", 0.1)
            inplace = getattr(opts, "model.activation.inplace", False)
            act_layer = get_activation_fn(act_type=act_type,
                                          inplace=inplace,
                                          negative_slope=neg_slope,
                                          num_parameters=out_channels)
            block.add_module(name="act", module=act_layer)
            self.act_name = act_layer.__class__.__name__

        self.block = block

        self.in_channels = in_channels
        self.out_channels = out_channels
        self.stride = stride
        self.groups = groups
        self.kernel_size = conv_layer.kernel_size
        self.bias = bias