in mlmodel/src/Validation/NeuralNetwork/NeuralNetworkLayerValidator.cpp [18:161]
Result NeuralNetworkSpecValidator::validateConvolutionLayer(const Specification::NeuralNetworkLayer& layer) {
HANDLE_RESULT_AND_RETURN_ON_ERROR(validateInputCount(layer, 1, 2));
HANDLE_RESULT_AND_RETURN_ON_ERROR(validateOutputCount(layer, 1, 1));
if (ndArrayInterpretation) {
HANDLE_RESULT_AND_RETURN_ON_ERROR(validateInputOutputRankEquality(layer, "Convolution", blobNameToRank));
HANDLE_RESULT_AND_RETURN_ON_ERROR(validateRankCount(layer, "Convolution", 4, -1, blobNameToRank));
} else {
if (layer.input_size() != 1) {
std::string err = "Convolution Layer '" + layer.name() +
"' does not support weight as input tensor when RANK5_ARRAY_MAPPING == true.";
return Result(ResultType::INVALID_MODEL_PARAMETERS, err);
}
}
// We need to check if the ConvolutionPaddingType is set
if (layer.convolution().ConvolutionPaddingType_case() == Specification::ConvolutionLayerParams::ConvolutionPaddingTypeCase::CONVOLUTIONPADDINGTYPE_NOT_SET) {
std::string err = "Padding type for convolution layer '" + layer.name() + "' is not set.";
return Result(ResultType::INVALID_MODEL_PARAMETERS, err);
}
const auto& params = layer.convolution();
bool is_deconv = params.isdeconvolution();
if (is_deconv && layer.input_size() != 1) {
std::string err = "Deconvolution Layer '" + layer.name() + "' does not support weight as input tensor.";
return Result(ResultType::INVALID_MODEL_PARAMETERS, err);
}
if (layer.input_size() != 1 && (
((params.dilationfactor_size() > 0 && params.dilationfactor(0) > 1) ||
(params.dilationfactor_size() > 1 && params.dilationfactor(1) > 1))
)) {
std::string err = "Convolution layer: '" + layer.name() + "' , dilated convolution does not support weight as input tensor.";
return Result(ResultType::INVALID_MODEL_PARAMETERS, err);
}
uint64_t kernelChannels = params.kernelchannels();
uint64_t outputChannels = params.outputchannels();
uint64_t nGroups = params.ngroups();
if (nGroups == 0) {
// default value specified in protobuf
nGroups = 1;
}
uint64_t kernelHeight;
if (params.kernelsize_size() > 0) {
kernelHeight = params.kernelsize(0);
}
else {
// this is the default specified in the protobuf file
kernelHeight = 3;
}
uint64_t kernelWidth;
if (params.kernelsize_size() > 1) {
kernelWidth = params.kernelsize(1);
}
else {
kernelWidth = 3;
}
if (layer.input_size() > 1) {
return Result();
}
bool has_bias = params.hasbias();
if (has_bias && layer.input_size() != 1) {
std::string err = "Convolution layer: '" + layer.name() + "' with dynamic weight does not support static bias.";
return Result(ResultType::INVALID_MODEL_PARAMETERS, err);
}
WeightParamType weightsValueType, biasValueType;
weightsValueType = valueType(params.weights());
biasValueType = valueType(params.bias());
// Check weight/bias value types. Only float32 or float16 parameters can be populated at any time
if ( (weightsValueType == UNSPECIFIED) || (has_bias && biasValueType == UNSPECIFIED)) {
std::string err = "Convolution layer '" + layer.name() + "' has invalid weights/bias fields.";
return Result(ResultType::INVALID_MODEL_PARAMETERS, err);
}
if (has_bias){
if ((weightsValueType == CoreML::FLOAT16 && biasValueType == CoreML::FLOAT32) ||
(weightsValueType == CoreML::FLOAT32 && biasValueType == CoreML::FLOAT16)){
return Result(ResultType::INVALID_MODEL_PARAMETERS, "Convolution layer " + layer.name() +
"has unmatched precisions of weights/bias They should either be half or full precision.");
}
}
// Get populated weight and bias sizes
// Check weights
uint64_t expected_weight_size = 0;
// conv: outputChannels, kernelChannels, kernelHeight, kernelWidth
// deconv: kernelChannels, outputChannels / nGroups, kernelHeight, kernelWidth
if (is_deconv) {
expected_weight_size = kernelChannels * (outputChannels / nGroups) * kernelHeight * kernelWidth;
}
else {
expected_weight_size = outputChannels * kernelChannels * kernelHeight * kernelWidth;
}
uint64_t weight_size = 0;
if (weightsValueType == FLOAT32 || weightsValueType == FLOAT16) {
if (weightsValueType == FLOAT32) {
weight_size = static_cast<uint64_t>(params.weights().floatvalue().size());
} else {
weight_size = static_cast<uint64_t>(params.weights().float16value().size() / 2);
}
if (weight_size != expected_weight_size) {
if (is_deconv) {
std::string err = "Deconvolution layer '" + layer.name() + "' has weight matrix of size " + std::to_string(weight_size) + " to encode a " + std::to_string(kernelChannels) + " × " + std::to_string(outputChannels/nGroups) + " × " + std::to_string(kernelHeight) + " × " + std::to_string(kernelWidth) + " convolution.";
return Result(ResultType::INVALID_MODEL_PARAMETERS, err);
}
else {
std::string err = "Convolution layer '" + layer.name() + "' has weight matrix of size " + std::to_string(weight_size) + " to encode a " + std::to_string(outputChannels) + " × " + std::to_string(kernelChannels) + " × " + std::to_string(kernelHeight) + " × " + std::to_string(kernelWidth) + " convolution.";
return Result(ResultType::INVALID_MODEL_PARAMETERS, err);
}
}
} // if (weightsValueType == FLOAT32 || weightsValueType == FLOAT16)
else if (weightsValueType == QUINT) {
HANDLE_RESULT_AND_RETURN_ON_ERROR(validateGeneralWeightParams(params.weights(), expected_weight_size, outputChannels, "Convolution", layer.name(), "weight"));
} else { // EMPTY
return Result(ResultType::INVALID_MODEL_PARAMETERS, "Layer " + layer.name() + "has not specified weights.");
}
// Check the bias
uint64_t bias_size = 0;
if (has_bias) {
if (biasValueType == FLOAT32 || biasValueType == FLOAT16){
if (biasValueType == FLOAT32){
bias_size = static_cast<uint64_t>(params.bias().floatvalue().size());
} else {
bias_size = static_cast<uint64_t>(params.bias().float16value().size() / 2);
}
if (bias_size != outputChannels) {
std::string err = "Convolution layer '" + layer.name() + "' has a bias vector of size " +
std::to_string(bias_size) + " but should be " + std::to_string(outputChannels) + ".";
return Result(ResultType::INVALID_MODEL_PARAMETERS, err);
}
} else if (biasValueType == QUINT){
// quantization of bias vector should be 1
HANDLE_RESULT_AND_RETURN_ON_ERROR(validateGeneralWeightParams(params.bias(), outputChannels, 1, "Convolution", layer.name(), "bias"));
} else { // EMPTY
return Result(ResultType::INVALID_MODEL_PARAMETERS, "Layer " + layer.name() + "has not specified bias.");
}
}
return Result();
}