in model_optimizer_pkg/model_optimizer_pkg/model_optimizer_node.py [0:0]
def model_optimizer(self, req, res):
"""Callback for the model_optimizer_server service. Handles calling the Intel OpenVino
model optimizer script with appropriate parameters set for the specific model details
passed in the request data.
Args:
req (ModelOptimizeSrv.Request): Request object with the model details required to
run the optimizer set.
res (ModelOptimizeSrv.Response): Response object with error(int) flag to indicate
successful execution of the optimizer script and
artifact_path(str) with the path where the
intermediate representaiton xml files are created
for the model.
Returns:
ModelOptimizeSrv.Response: Response object with error(int) flag to indicate
successful execution of the optimizer script and
artifact_path(str) with the path where the intermediate
representaiton xml files are created for the model.
"""
self.get_logger().info("model_optimizer")
try:
aux_param = {"--fuse": "OFF", "--img-format": req.img_format}
error_code, artifact_path = self.optimize_tf_model(req.model_name,
req.model_metadata_sensors,
req.training_algorithm,
req.width,
req.height,
req.lidar_channels,
aux_param)
res.error = error_code
res.artifact_path = artifact_path
except Exception as ex:
res.error = 1
self.get_logger().error(f"Error while optimizing model: {ex}")
return res