in coremltools/converters/mil/backend/mil/load.py [0:0]
def load(prog, weights_dir, resume_on_errors=False, **kwargs):
if "main" not in prog.functions:
raise ValueError("main function not found in program")
mil_passes.mil_backend_passes(prog)
# if user has specified "ClassifierConfig", then add the "classify" op to the prog
classifier_config = kwargs.get("classifier_config", None)
predicted_feature_name = None
predicted_probabilities_name = None
if classifier_config is not None:
predicted_feature_name, predicted_probabilities_name = _add_classify_op(prog, classifier_config)
input_types = prog.main_input_types
weight_path = os.path.join(weights_dir, _WEIGHTS_FILE_NAME)
blob_writer = BlobWriter(weight_path)
function_protos = {}
for func_name, func in prog.functions.items():
function_protos[func_name] = convert_function(func, prog.parameters,
blob_writer)
proto = pm.Program(
version=1,
functions=function_protos,
)
input_features = []
output_features = []
symbolic_inputs = []
image_input_names = {} # these are the model inputs marked as image by the user
input_shape_map = {}
for input_type in input_types:
if isinstance(input_type, ImageType):
image_input_names[input_type.name] = input_type
# error checking for input(s) marked as images
if input_type.name not in list(prog.functions["main"].inputs.keys()):
msg = "Provided image input '{}' is not one of the inputs of the MIL program"
raise ValueError(msg.format(input_type.name))
input_shape_map[input_type.name] = input_type
for name, var in prog.functions["main"].inputs.items():
input_feature_type = ft.FeatureType()
# error checking for input(s) marked as images
# an image input must be of type tensor in program proto
# (since an image type does not exist in MIL program)
if name in image_input_names and \
not types.is_tensor(var.sym_type):
raise ValueError("For the image input, '{}', its type in the MIL program must be tensor. "
"Instead it is {}.".format(name, var.sym_type.__type_info__()))
if types.is_tensor(var.sym_type):
shape = var.sym_type.get_shape()
if any_variadic(shape):
raise ValueError("Variable rank model inputs are not supported!")
if any_symbolic(shape):
symbolic_inputs.append(name)
# We extract the default input shape given by user first
if name in input_shape_map:
shape = input_shape_map[name].shape.default
else:
logging.warning("Input shape not fully specified by enumerated shapes or range dim! 1 will be used for dimension not specified instead.")
# If no input shape is provided (ex. auto conversion of -1 in Tensorflow)
shape = [1 if is_symbolic(d) else d for d in shape]
if name not in image_input_names:
# make a feature type of Type "multiArrayType"
array_type = ft.ArrayFeatureType(shape=shape, dataType=cast_to_framework_io_dtype(var, False))
input_feature_type.multiArrayType.CopyFrom(array_type)
else:
if len(shape) < 3:
raise ValueError("Image input, '{}', must have rank at least 3. Instead it has rank {}".
format(name, len(shape)))
# make a feature type of Type "imageType"
input_type = image_input_names[name]
if not input_type.channel_first:
raise ValueError("Image input, '{}', must be in the channel_first format".
format(name))
if input_type.color_layout == "G":
clr_space = ft.ImageFeatureType.ColorSpace.GRAYSCALE
elif input_type.color_layout == "BGR":
clr_space = ft.ImageFeatureType.ColorSpace.BGR
else:
clr_space = ft.ImageFeatureType.ColorSpace.RGB
image_type = ft.ImageFeatureType(width=shape[-1],
height=shape[-2],
colorSpace=clr_space)
input_feature_type.imageType.CopyFrom(image_type)
input_features.append(
ml.FeatureDescription(name=name, type=input_feature_type)
)
elif types.is_scalar(var.sym_type):
array_type = ft.ArrayFeatureType(shape=[1], dataType=cast_to_framework_io_dtype(var, False))
input_feature_type.multiArrayType.CopyFrom(array_type)
input_features.append(ml.FeatureDescription(name=var.name, type=input_feature_type))
else:
raise NotImplementedError()
for var in prog.functions["main"].outputs:
output_feature_type = ft.FeatureType()
if types.is_tensor(var.sym_type) or types.is_primitive(var.sym_type):
dataType = None
if classifier_config is None or var.name != predicted_feature_name:
# Not a classifier output, make sure model output type matches with ML Program type.
dataType = cast_to_framework_io_dtype(var, True)
else:
# Classifier outputs are set up separately, so default to fp32 for now.
dataType = ft.ArrayFeatureType.ArrayDataType.FLOAT32
array_type = ft.ArrayFeatureType(shape=None, dataType=dataType)
output_feature_type.multiArrayType.CopyFrom(array_type)
output_features.append(ml.FeatureDescription(name=var.name, type=output_feature_type))
elif (types.is_dict(var.sym_type)):
output_feature_type.dictionaryType.MergeFromString(b"")
keytype, valtype = var.sym_type.T
if types.is_str(keytype):
output_feature_type.dictionaryType.stringKeyType.MergeFromString(b"")
elif (keytype == types.int64):
output_feature_type.dictionaryType.int64KeyType.MergeFromString(b"")
else:
raise ValueError("Dictionary key type not supported.")
output_features.append(ml.FeatureDescription(name=var.name, type=output_feature_type))
else:
raise NotImplementedError()
# Model description
desc = ml.ModelDescription(input=input_features, output=output_features)
if classifier_config is not None:
desc.predictedFeatureName = predicted_feature_name
desc.predictedProbabilitiesName = predicted_probabilities_name
# Manually edit output type of predictedFeatureName.
# It doesn't use MLMultiArray and really uses a "primitive" type.
for output in desc.output:
if output.name == predicted_feature_name:
if type(classifier_config.class_labels[0]) == int:
output.type.int64Type.MergeFromString(b"")
else:
output.type.stringType.MergeFromString(b"")
break
# Create ML Model
model = ml.Model(description=desc, specificationVersion=_SPECIFICATION_VERSION_IOS_15)
model.mlProgram.CopyFrom(proto)
# Set symbolic shapes
for input_name in symbolic_inputs:
input_type = input_shape_map.get(input_name, None)
if isinstance(input_type, ImageType):
if isinstance(input_type.shape, EnumeratedShapes):
enumerated_shapes = []
for s in input_type.shape.shapes:
enumerated_shapes.append(
NeuralNetworkImageSize(
height=s.shape[-2], width=s.shape[-1]
)
)
add_enumerated_image_sizes(
model, input_name, sizes=enumerated_shapes
)
else:
img_range = NeuralNetworkImageSizeRange()
H = input_type.shape.shape[-2]
W = input_type.shape.shape[-1]
if isinstance(H, RangeDim):
img_range.add_height_range((H.lower_bound, H.upper_bound))
elif is_symbolic(H):
img_range.add_height_range((1, -1))
else:
img_range.add_height_range((H, H))
if isinstance(W, RangeDim):
img_range.add_width_range((W.lower_bound, W.upper_bound))
elif is_symbolic(W):
img_range.add_width_range((1, -1))
else:
img_range.add_width_range((W, W))
update_image_size_range(
model, input_name, img_range
)
elif isinstance(input_type, TensorType):
if isinstance(input_type.shape, EnumeratedShapes):
add_multiarray_ndshape_enumeration(
model, input_name, [tuple(s.shape) for s in input_type.shape.shapes]
)
else:
lb = []
ub = []
for s in input_type.shape.shape:
if isinstance(s, RangeDim):
lb.append(s.lower_bound)
ub.append(s.upper_bound)
elif is_symbolic(s):
lb.append(1)
ub.append(-1)
else:
lb.append(s)
ub.append(s)
set_multiarray_ndshape_range(
model, input_name, lower_bounds=lb, upper_bounds=ub
)
elif input_type is None:
sym_type = prog.functions["main"].inputs[input_name].sym_type
lb = []
ub = []
for s in sym_type.get_shape():
if is_symbolic(s):
lb.append(1)
ub.append(-1)
else:
lb.append(s)
ub.append(s)
set_multiarray_ndshape_range(
model, input_name, lower_bounds=lb, upper_bounds=ub
)
# Set optional inputs
_set_optional_inputs(model, input_types)
return model