def conv_helper()

in coremltools/converters/mil/backend/nn/op_mapping.py [0:0]


def conv_helper(const_context, builder, op):
    # v2 x: (n, C_in/groups, spatial_dims)
    x_name = make_input(const_context, builder, op.x)
    out_name = op.outputs[0].name

    is_conv1d = op.x.rank == 3
    is_conv2d = op.x.rank == 4
    is_conv3d = op.x.rank == 5
    if not (is_conv1d or is_conv2d or is_conv3d):
        raise ValueError(
            "Input tensor rank '{}' is not one of '{}'.".format(op.x.rank, (3, 4, 5),)
        )
    if is_conv1d:
        x_name = op.name + "_expand_dim"
        out_name += "_expanded"
        builder.add_expand_dims(
            name=x_name, input_name=op.x.name, output_name=x_name, axes=[-2],
        )
    # `x_name` is guaranteed to be (n, C_in/groups, spatial_dims) for 1D and 2D convolution
    # W_v1 wil be np.ndarray (if W is const at compile time) or None
    # (if W is not known at compile time).
    weights = None
    input_names = [x_name]
    if op.weight.val is not None:
        # v2 convolution (conv3d) expects weights to have shape (C_out, C_in/groups, spatial_dims)
        # v1 convolution expects (H, W, C_in/groups, C_out) or (D, H, W, C_in/groups, C_out)
        weights = op.weight.val
        if is_conv1d:
            weights = _np.expand_dims(op.weight.val, -2)
        if is_conv1d or is_conv2d:
            weights = _np.transpose(weights, [2, 3, 1, 0])
    else:
        # op.weight is not const at compile time.
        # When weight is dynamic, v1 convolution expects weight to be
        # (C_out, C_in/groups, H, W)
        # TODO 3D convolution doesn't support dynamic weights:
        if is_conv3d:
            raise ValueError("3D Convolution doesn't support dynamic weights.")
        weights_name = op.weight.name
        if is_conv1d:
            weights_name += "_expand_dim"
            builder.add_expand_dims(
                name=weights_name,
                input_name=op.weight.name,
                output_name=weights_name,
                axes=[-2],
            )
        input_names.append(weights_name)

    # padding
    padding_mode = op.pad_type.val
    pad = {}
    if padding_mode == "custom":
        if is_conv1d:
            padding_mode = "valid"
            pad["padding_top"] = 0
            pad["padding_bottom"] = 0
            pad["padding_left"] = op.pad.val[0]
            pad["padding_right"] = op.pad.val[1]
        elif is_conv2d:
            padding_mode = "valid"
            pad["padding_top"] = op.pad.val[0]
            pad["padding_bottom"] = op.pad.val[1]
            pad["padding_left"] = op.pad.val[2]
            pad["padding_right"] = op.pad.val[3]
        else:
            pad["padding_front"] = op.pad.val[0]
            pad["padding_back"] = op.pad.val[1]
            pad["padding_top"] = op.pad.val[2]
            pad["padding_bottom"] = op.pad.val[3]
            pad["padding_left"] = op.pad.val[4]
            pad["padding_right"] = op.pad.val[5]

    has_bias = op.bias is not None
    groups = op.groups.val

    strides = op.strides.val.tolist()
    dilations = op.dilations.val.tolist()
    if is_conv1d:
        dilations = dilations[:-1] + [1] + dilations[-1:]
        strides = strides[:-1] + [1] + strides[-1:]

    if weights is not None and op.op_type == "conv_quantized":
        nbits = op.nbits.val
        weights = _convert_array_to_nbit_quantized_bytes(weights.flatten(), nbits).tobytes()
        quantization_type = op.quantization_type.val
        quant_bias = op.quant_bias.val
        quant_scale = op.quant_scale.val
    else:
        quantization_type = None
        nbits = None
        quant_bias = None
        quant_scale = None

    if is_conv1d or is_conv2d:
        builder.add_convolution(
            name=out_name,
            kernel_channels=op.weight.shape[1],
            output_channels=op.weight.shape[0],
            height= 1 if is_conv1d else op.weight.shape[2],
            width= op.weight.shape[2] if is_conv1d else op.weight.shape[3],
            stride_height=strides[0],
            stride_width=strides[1],
            border_mode=padding_mode,
            groups=groups,
            W=weights,
            b=op.bias.val if has_bias else None,
            has_bias=has_bias,
            is_deconv=False,
            input_name=input_names,
            output_name=out_name,
            dilation_factors=dilations,
            quantization_type=quantization_type,
            nbits=nbits,
            quant_bias=quant_bias,
            quant_scale=quant_scale,
            **pad  # Python 2.7.16 will fail with a syntax error if a comma is included after `**pad`
        )

        # Squeeze added `Width` dimension for 1d case
        if is_conv1d:
            x_name = op.name + "expand_dim"
            builder.add_squeeze(
                name=op.name,
                input_name=out_name,
                output_name=op.outputs[0].name,
                axes=[-2],
            )

    if is_conv3d:
        builder.add_convolution3d(
            name=op.name,
            input_channels=op.weight.shape[1] * groups,
            output_channels=op.weight.shape[0],
            depth=op.weight.shape[2],
            height=op.weight.shape[3],
            width=op.weight.shape[4],
            W=op.weight.val,
            b=op.bias.val if has_bias else None,
            has_bias=has_bias,
            groups=groups,
            stride_depth=strides[0],
            stride_height=strides[1],
            stride_width=strides[2],
            dilation_depth=dilations[0],
            dilation_height=dilations[1],
            dilation_width=dilations[2],
            padding_mode=padding_mode,
            is_deconv=False,
            output_shape=None,
            input_name=input_names,
            output_name=out_name,
            **pad  # Python 2.7.16 will fail with a syntax error if a comma is included after `**pad`
        )