in model_optimizer_pkg/model_optimizer_pkg/model_optimizer_node.py [0:0]
def run_optimizer(self, mo_path, common_params, platform_parms):
"""Helper method that combines the common commands with the platform specific
commands.
Args:
mo_path (str): Path to intel"s model optimizer for a given platform
(mxnet, caffe, or tensor flow).
common_params (dict): Dictionary containing the cli flags common to all
model optimizer.
platform_parms (dict): Dictionary containing the cli flags for the specific
platform.
Raises:
Exception: Custom exception if the model file is not present.
Returns:
tuple: Tuple whose first value is the error code and second value
is a string to the location of the converted model if any.
"""
if not os.path.isfile(common_params[constants.MOKeys.MODEL_PATH]):
raise Exception(f"Model file {common_params[constants.MOKeys.MODEL_PATH]} not found")
cmd = f"{constants.PYTHON_BIN} {constants.INTEL_PATH}{mo_path}"
# Construct the cli command
for flag, value in dict(common_params, **platform_parms).items():
cmd += f" {flag} {value}"
self.get_logger().info(f"Model optimizer command: {cmd}")
tokenized_cmd = shlex.split(cmd)
retry_count = 0
# Retry running the optimizer if it fails due to any error
# The optimizer command is run for MAX_OPTIMIZER_RETRY_COUNT + 1 times
while retry_count <= constants.MAX_OPTIMIZER_RETRY_COUNT:
self.get_logger().info(f"Optimizing model: {retry_count} of "
f"{constants.MAX_OPTIMIZER_RETRY_COUNT} trials")
proc = subprocess.Popen(tokenized_cmd, stderr=subprocess.PIPE)
_, std_err = proc.communicate()
if not proc.returncode:
return 0, os.path.join(common_params[constants.MOKeys.OUT_DIR],
f"{common_params[constants.MOKeys.MODEL_NAME]}.xml")
std_err = re.sub(r", question #\d+", "", std_err.decode("utf-8"))
self.get_logger().error(f"Model optimizer error info: {std_err}")
retry_count += 1
# Return error code 1, which means that the model optimizer failed even after retries.
return 1, ""