Result NeuralNetworkSpecValidator::validateConvolution3DLayer()

in mlmodel/src/Validation/NeuralNetwork/NeuralNetworkLayerValidator.cpp [163:316]


Result NeuralNetworkSpecValidator::validateConvolution3DLayer(const Specification::NeuralNetworkLayer& layer) {
    HANDLE_RESULT_AND_RETURN_ON_ERROR(validateInputCount(layer, 1, 1));
    HANDLE_RESULT_AND_RETURN_ON_ERROR(validateOutputCount(layer, 1, 1));

    if (ndArrayInterpretation) {
        HANDLE_RESULT_AND_RETURN_ON_ERROR(validateInputOutputRankEquality(layer, "Convolution3D", blobNameToRank));
        HANDLE_RESULT_AND_RETURN_ON_ERROR(validateRankCount(layer, "Convolution3D", 5, -1, blobNameToRank));
    }

    const auto& params = layer.convolution3d();

    if (layer.input_size() != 1) {
        std::string err = "Convolution3D layer: '" + layer.name() +
            "', convolution3D does not support weight as input tensor.";
        return Result(ResultType::INVALID_MODEL_PARAMETERS, err);
    }

    // Validate input and output channels and number of convolution groups are positive
    int inputChannels = params.inputchannels();
    int outputChannels = params.outputchannels();
    int nGroups = params.ngroups();
    HANDLE_RESULT_AND_RETURN_ON_ERROR(validatePositive(inputChannels, "Input Channels"));
    HANDLE_RESULT_AND_RETURN_ON_ERROR(validatePositive(outputChannels, "Output Channels"));
    HANDLE_RESULT_AND_RETURN_ON_ERROR(validatePositive(nGroups, "Groups"));

    // Validate kernel is positive
    int kernelDepth = params.kerneldepth();
    int kernelHeight = params.kernelheight();
    int kernelWidth = params.kernelwidth();
    HANDLE_RESULT_AND_RETURN_ON_ERROR(validatePositive(kernelDepth, "Kernel Depth"));
    HANDLE_RESULT_AND_RETURN_ON_ERROR(validatePositive(kernelHeight, "Kernel Height"));
    HANDLE_RESULT_AND_RETURN_ON_ERROR(validatePositive(kernelWidth, "Kernel Width"));

    // Validate stride is positive
    HANDLE_RESULT_AND_RETURN_ON_ERROR(validatePositive(params.stridedepth(), "Stride Depth"));
    HANDLE_RESULT_AND_RETURN_ON_ERROR(validatePositive(params.strideheight(), "Stride Height"));
    HANDLE_RESULT_AND_RETURN_ON_ERROR(validatePositive(params.stridewidth(), "Stride Width"));

    // Validate dilation is positive
    HANDLE_RESULT_AND_RETURN_ON_ERROR(validatePositive(params.dilationdepth(), "Dilation Depth"));
    HANDLE_RESULT_AND_RETURN_ON_ERROR(validatePositive(params.dilationheight(), "Dilation Height"));
    HANDLE_RESULT_AND_RETURN_ON_ERROR(validatePositive(params.dilationwidth(), "Dilation Width"));

    // Validate padding
    int customPaddingFront = params.custompaddingfront();
    int customPaddingBack = params.custompaddingback();
    int customPaddingTop = params.custompaddingtop();
    int customPaddingBottom = params.custompaddingbottom();
    int customPaddingLeft = params.custompaddingleft();
    int customPaddingRight = params.custompaddingright();
    if (customPaddingFront < 0) {
        std::string err = "Custom Padding Front must be non-negative, got '" +
            std::to_string(customPaddingFront) + "'.";
        return Result(ResultType::INVALID_MODEL_PARAMETERS, err);
    }
    if (customPaddingBack < 0) {
        std::string err = "Custom Padding Back must be non-negative, got '" +
            std::to_string(customPaddingBack) + "'.";
        return Result(ResultType::INVALID_MODEL_PARAMETERS, err);
    }
    if (customPaddingTop < 0) {
        std::string err = "Custom Padding Top must be non-negative, got '" +
            std::to_string(customPaddingTop) + "'.";
        return Result(ResultType::INVALID_MODEL_PARAMETERS, err);
    }
    if (customPaddingBottom < 0) {
        std::string err = "Custom Padding Bottom must be non-negative, got '" +
            std::to_string(customPaddingBottom) + "'.";
        return Result(ResultType::INVALID_MODEL_PARAMETERS, err);
    }
    if (customPaddingLeft < 0) {
        std::string err = "customPadding Left must be non-negative, got '" +
            std::to_string(customPaddingLeft) + "'.";
        return Result(ResultType::INVALID_MODEL_PARAMETERS, err);
    }
    if (customPaddingRight < 0) {
        std::string err = "customPadding Right must be non-negative, got '" +
            std::to_string(customPaddingRight) + "'.";
        return Result(ResultType::INVALID_MODEL_PARAMETERS, err);
    }

    WeightParamType weightsValueType, biasValueType;
    weightsValueType = valueType(params.weights());
    biasValueType = valueType(params.bias());
    bool has_bias = params.hasbias();

    // Check weight/bias value types. Only float32 or float16 parameters can be populated at any time
    if ((weightsValueType == UNSPECIFIED) || (has_bias && biasValueType == UNSPECIFIED)) {
        std::string err = "Convolution3D layer '" + layer.name() +
            "'  has invalid weights/bias fields.";
        return Result(ResultType::INVALID_MODEL_PARAMETERS, err);
    }
    if (has_bias){
        if ((weightsValueType == CoreML::FLOAT16 && biasValueType == CoreML::FLOAT32) ||
            (weightsValueType == CoreML::FLOAT32 && biasValueType == CoreML::FLOAT16)) {
            return Result(ResultType::INVALID_MODEL_PARAMETERS, "Convolution3D layer '" + layer.name() +
                          "' has unmatched precisions of weights/bias They should either be half or full precision.");
        }
    }

    bool is_deconv = params.isdeconvolution();

    if (params.outputshape_size() != 0) {
        if (!is_deconv) {
            std::string err = "Deconvolution3D Layer '" + layer.name() + "' Output Shape is supported for Deconvolution layer.";
            return Result(ResultType::INVALID_MODEL_PARAMETERS, err);
        } else if (params.outputshape_size() != 3) {
            std::string err = "Deconvolution3D layer: '" + layer.name() + "' , if set, output shape must be of length 3.";
            return Result(ResultType::INVALID_MODEL_PARAMETERS, err);
        }
    }
    // Manually check if weights are quantized--we don't currently support them and
    // `validateGeneralWeightParams` allows them
    if (weightsValueType == QUINT) {
        std::string err = "Layer '" + layer.name() + "' has invalid weights field. Quantized " +
            "weights are not supported.";
        return Result(ResultType::INVALID_MODEL_PARAMETERS, err);
    }
    uint64_t expected_weight_size = 0;
    if (is_deconv) {
        expected_weight_size = static_cast<uint64_t>((outputChannels / nGroups) * inputChannels * kernelDepth * kernelHeight * kernelWidth);
    } else {
        expected_weight_size = static_cast<uint64_t>(outputChannels * (inputChannels / nGroups) * kernelDepth * kernelHeight * kernelWidth);
    }
    HANDLE_RESULT_AND_RETURN_ON_ERROR(validateGeneralWeightParams(params.weights(), expected_weight_size, uint64_t(outputChannels),
                                                                  "Convolution3D ", layer.name(), "weights"));

    // Check the bias
    int bias_size = 0;
    if (has_bias) {
        if (biasValueType == FLOAT32 || biasValueType == FLOAT16){
            if (biasValueType == FLOAT32){
                bias_size = params.bias().floatvalue().size();
            } else {
                bias_size = int(params.bias().float16value().size() / 2);
            }
            if (bias_size != outputChannels) {
                std::string err = "Convolution3D layer '" + layer.name() +
                    "' has a bias vector of size " + std::to_string(bias_size) + " but should be " +
                    std::to_string(outputChannels) + ".";
                return Result(ResultType::INVALID_MODEL_PARAMETERS, err);
            }
        } else if (biasValueType == QUINT){
            // We don't currently support quantized parameters.
            std::string err = "Layer '" + layer.name() + "' has invalid bias field. Quantized " +
                "bias is not supported.";
            return Result(ResultType::INVALID_MODEL_PARAMETERS, err);
        } else { // EMPTY
            return Result(ResultType::INVALID_MODEL_PARAMETERS, "Layer " + layer.name() +
                          "has not specified bias.");
        }
    }
    return Result();
}