coremltools/converters/mil/experimental/passes/generic_conv_scale_fusion.py [148:185]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    conv_weight_type = conv_weight.dtype

    # create bias for conv if not exist
    if conv_bias is None:
        conv_bias = np.zeros(Cout)
    else:
        conv_bias = conv_bias.val
    conv_bias = conv_bias.astype(conv_weight_type)

    # get the original shape of weight and bias
    origin_weight_shape = conv_weight.shape
    origin_bias_shape = conv_bias.shape

    # update the weight/bias for conv layer
    if is_scalar:
        new_conv_bias = np.array(conv_bias * scale).astype(conv_weight_type)
        new_conv_weight = np.array(conv_weight * scale).astype(conv_weight_type)

    else:
        scale = np.reshape(scale, (Cout))
        new_conv_bias = np.array(conv_bias * scale).astype(conv_weight_type)
        new_conv_weight = []
        if is_deconv:
            conv_weight = np.transpose(conv_weight, [1, 0, 2] if is_conv_1d else [1, 0, 2, 3])
            conv_weight = np.reshape(conv_weight, [Cout, Cin // groups] + list(conv_weight.shape[2:]))

        for i in range(Cout):
            _conv_weight = conv_weight[i] * scale[i]
            new_conv_weight.append(_conv_weight)
        new_conv_weight = np.array(new_conv_weight).astype(conv_weight_type)

        if is_deconv:
            new_conv_weight = np.reshape(new_conv_weight, [Cout // groups, Cin] + list(new_conv_weight.shape[2:]))
            new_conv_weight = np.transpose(new_conv_weight, [1, 0, 2] if is_conv_1d else [1, 0, 2, 3])

    # make sure the updated weight and bias have the same shape as the original ones
    assert new_conv_weight.shape == origin_weight_shape, "conv weight should have the same shape before and after the fuse_conv_scale pass."
    assert new_conv_bias.shape == origin_bias_shape, "conv bias should have the same shape before and after the fuse_conv_scale pass."
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



coremltools/converters/mil/mil/passes/conv_scale_fusion.py [75:112]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    conv_weight_type = conv_weight.dtype

    # create bias for conv if not exist
    if conv_bias is None:
        conv_bias = np.zeros(Cout)
    else:
        conv_bias = conv_bias.val
    conv_bias = conv_bias.astype(conv_weight_type)

    # get the original shape of weight and bias
    origin_weight_shape = conv_weight.shape
    origin_bias_shape = conv_bias.shape

    # update the weight/bias for conv layer
    if is_scalar:
        new_conv_bias = np.array(conv_bias * scale).astype(conv_weight_type)
        new_conv_weight = np.array(conv_weight * scale).astype(conv_weight_type)

    else:
        scale = np.reshape(scale, (Cout))
        new_conv_bias = np.array(conv_bias * scale).astype(conv_weight_type)
        new_conv_weight = []
        if is_deconv:
            conv_weight = np.transpose(conv_weight, [1, 0, 2] if is_conv_1d else [1, 0, 2, 3])
            conv_weight = np.reshape(conv_weight, [Cout, Cin // groups] + list(conv_weight.shape[2:]))

        for i in range(Cout):
            _conv_weight = conv_weight[i] * scale[i]
            new_conv_weight.append(_conv_weight)
        new_conv_weight = np.array(new_conv_weight).astype(conv_weight_type)

        if is_deconv:
            new_conv_weight = np.reshape(new_conv_weight, [Cout // groups, Cin] + list(new_conv_weight.shape[2:]))
            new_conv_weight = np.transpose(new_conv_weight, [1, 0, 2] if is_conv_1d else [1, 0, 2, 3])

    # make sure the updated weight and bias have the same shape as the original ones
    assert new_conv_weight.shape == origin_weight_shape, "conv weight should have the same shape before and after the fuse_conv_scale pass."
    assert new_conv_bias.shape == origin_bias_shape, "conv bias should have the same shape before and after the fuse_conv_scale pass."
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



