in mlmodel/src/Validation/NeuralNetwork/NeuralNetworkValidator.cpp [466:682]
Result validateNeuralNetworkTopLevel(const Specification::ModelDescription& interface,
const T& nn, std::set<std::string>& outputBlobNames,
bool isUpdatable) {
Result r;
// First calculate the value of the flag "ndArrayInterpretation"
// ndArrayInterpretation == False ==> iOS 11/12 (old) execution path can be used, i.e. all tensors are static rank 5.
// ndArrayInterpretation == True ==> Tensors can have any rank (including 5).
bool ndArrayInterpretation = false;
bool hasNonIOS12Layer = false;
bool hasNewArrayShapeMapping = false;
bool hasNewImageShapeMapping = false;
bool hasMultiArrayInput = false;
for (const auto& input: interface.input()) {
if (input.type().Type_case() == Specification::FeatureType::kMultiArrayType) {
hasMultiArrayInput = true;
break;
}
}
if (nn.arrayinputshapemapping() != Specification::NeuralNetworkMultiArrayShapeMapping::RANK5_ARRAY_MAPPING) {
hasNewArrayShapeMapping = true;
}
if (nn.imageinputshapemapping() != Specification::NeuralNetworkImageShapeMapping::RANK5_IMAGE_MAPPING) {
hasNewImageShapeMapping = true;
}
for (const auto &layer: nn.layers()) {
if (!isIOS12NeuralNetworkLayer(layer)) {
hasNonIOS12Layer = true;
break;
}
}
if (hasNonIOS12Layer || hasNewArrayShapeMapping || hasNewImageShapeMapping) {
ndArrayInterpretation = true;
}
if (hasNonIOS12Layer && !hasNewArrayShapeMapping && hasMultiArrayInput) {
return Result(ResultType::INVALID_MODEL_INTERFACE,
"Neural Network Multi-Array input shape mapping cannot be 'RANK5_ARRAY_MAPPING' if the network contains a layer added in version 4 (iOS 13) or later. Use 'EXACT_ARRAY_MAPPING' instead.");
}
if (!hasNewArrayShapeMapping && hasNewImageShapeMapping && hasMultiArrayInput) {
return Result(ResultType::INVALID_MODEL_INTERFACE,
"Neural Network Multi-Array input shape mapping cannot be 'RANK5_ARRAY_MAPPING' if the image input Shape mapping is not 'RANK5_IMAGE_MAPPING'");
}
//==================== End of logic to determine the value of "ndArrayInterpretation" ======================
if (interface.input_size() == 0) {
return Result(ResultType::INVALID_MODEL_INTERFACE,
"Neural networks require at least one input.");
}
if (interface.output_size() == 0) {
return Result(ResultType::INVALID_MODEL_INTERFACE,
"Neural networks produce at least one output.");
}
if (nn.layers().size() == 0) {
return Result(ResultType::INVALID_MODEL_PARAMETERS,
"Neural networks require at least one layer.");
}
if (std::all_of(interface.input().begin(), interface.input().end(),
[](const Specification::FeatureDescription& input) {
return input.type().isoptional();
})) {
return Result(ResultType::INVALID_MODEL_INTERFACE,
"Neural networks require at least one non-optional input.");
}
// Check the input types
HANDLE_RESULT_AND_RETURN_ON_ERROR(validateInputOutputTypes(interface.input(), ResultReason::MODEL_INPUT_TYPE_INVALID, "inputs"));
std::map<std::string, int> ioBlobNameToRank; // to collect ranks of input/output blobs from the shapes present in the description
// populate "ioBlobNameToRank"
if (ndArrayInterpretation) {
for (const auto& input: interface.input()) {
if (input.type().Type_case() == Specification::FeatureType::kMultiArrayType) {
if (nn.arrayinputshapemapping() == Specification::NeuralNetworkMultiArrayShapeMapping::RANK5_ARRAY_MAPPING) {
ioBlobNameToRank[input.name()] = 5;
} else {
ioBlobNameToRank[input.name()] = input.type().multiarraytype().shape_size();
}
} else if (input.type().Type_case() == Specification::FeatureType::kImageType) {
if (nn.imageinputshapemapping() == Specification::NeuralNetworkImageShapeMapping::RANK5_IMAGE_MAPPING) {
ioBlobNameToRank[input.name()] = 5;
} else {
ioBlobNameToRank[input.name()] = 4;
}
}
}
for (const auto& output: interface.output()) {
if (output.type().Type_case() == Specification::FeatureType::kMultiArrayType) {
if (output.type().multiarraytype().shape_size()) {
ioBlobNameToRank[output.name()] = output.type().multiarraytype().shape_size();
}
}
}
}
// Collect Model input names and do some checking
// inputBlobs: For each named data blob, the name of the node which produced it (there can be multiple in if-else branches)
std::map<std::string, std::set<std::string>> inputBlobs;
for (const auto& input: interface.input()) {
// For input blobs, we'll give them a dummy producing layer name
inputBlobs[input.name()] = {"__input"};
if (input.type().Type_case() == Specification::FeatureType::kMultiArrayType) {
if (!ndArrayInterpretation) {
// only vector-like (rank 1) or image-like (rank 3) inputs are allowed
bool validShapeFound = false;
if (input.type().multiarraytype().shape().size() > 0) {
if (!(input.type().multiarraytype().shape().size() == 1
|| input.type().multiarraytype().shape().size() == 3)) {
return Result(ResultType::INVALID_MODEL_INTERFACE, "Input MLMultiArray to neural networks must have dimension 1 (vector) or 3 (image-like arrays).");
}
else {
validShapeFound = true;
}
}
bool flexibilityIsRank1or3 = true;
switch (input.type().multiarraytype().ShapeFlexibility_case()) {
case CoreML::Specification::ArrayFeatureType::kEnumeratedShapes:
for (const auto &shape : input.type().multiarraytype().enumeratedshapes().shapes()) {
if(shape.shape_size() != 1 && shape.shape_size() != 3) {
flexibilityIsRank1or3 = false;
break;
}
}
break;
case CoreML::Specification::ArrayFeatureType::kShapeRange:
flexibilityIsRank1or3 = (input.type().multiarraytype().shaperange().sizeranges_size() == 1 ||
input.type().multiarraytype().shaperange().sizeranges_size() == 3);
break;
case CoreML::Specification::ArrayFeatureType::SHAPEFLEXIBILITY_NOT_SET:
flexibilityIsRank1or3 = false;
break;
}
if (!flexibilityIsRank1or3 && !validShapeFound) {
return Result(ResultType::INVALID_MODEL_INTERFACE, "Input MLMultiArray to neural networks must have dimension 1 (vector) or 3 (image-like arrays).");
} else if (flexibilityIsRank1or3) {
validShapeFound = true;
}
if (!validShapeFound) {
return Result(ResultType::INVALID_MODEL_INTERFACE, "Input MLMultiArray to neural networks must have dimension 1 (vector) or 3 (image-like arrays).");
}
} else { // validate input shape when "ndArrayInterpretation" is True
if (!(r = validateNdMultiArrayInputType(input.type().multiarraytype())).good()) {
return r;
}
} // if else block on spec version to check validity of input shape
}
}
// validate the Neural Network message
// create an object to validate neural network message
NeuralNetworkSpecValidator validator(inputBlobs, ioBlobNameToRank, ndArrayInterpretation, 0, ioBlobNameToRank);
r = validator.validateNeuralNetwork(nn);
if (!r.good()) {
return r;
}
// gather all output blobs of the graph
for (auto& blob: validator.blobs){
if (inputBlobs.find(blob.first) == inputBlobs.end()) {
outputBlobNames.insert(blob.first);
} else {
// if we are here, this means this blob is also present in the set of "inputBlobs"
// but it can still be a genuine output blob if multiple layers are generating it (e.g. copy layer)
if (blob.second.size() > 1) {
outputBlobNames.insert(blob.first);
}
}
}
// Call the shaper: compatibility with iOS 12
if (!ndArrayInterpretation) {
// Compute the shapes
try {
NeuralNetworkShaper shaper(interface, nn.layers());
}
catch(std::runtime_error& e) {
std::string err = std::string("Error determining network blob shapes: ") + std::string(e.what());
return Result(ResultType::POTENTIALLY_INVALID_NEURAL_NETWORK_SHAPES, err);
}
}
if (!r.good()) {
return r;
}
if (isUpdatable) {
r = validateUpdatableNeuralNetwork(nn);
if (!r.good()) { return r; }
r = validateTrainingInputs(interface, nn);
if (!r.good()) { return r; }
}
return r;
}