in inference_pkg/src/intel_inference_eng.cpp [203:233]
bool RLInferenceModel::loadModel(const char* artifactPath,
std::shared_ptr<InferTask::ImgProcessBase> imgProcess) {
if (doInference_) {
RCLCPP_ERROR(inferenceNode->get_logger(), "Please stop inference prior to loading a model");
return false;
}
if (!imgProcess) {
RCLCPP_ERROR(inferenceNode->get_logger(), "Invalid image processing algorithm");
return false;
}
// Set the image processing algorithms
imgProcess_ = imgProcess;
// Load the model
try {
inferRequest_ = setMultiHeadModel(artifactPath, "CPU", core_, inputNamesArr_,
outputName_, InferenceEngine::Precision::FP32,
InferenceEngine::Precision::FP32, inferenceNode);
for(size_t i = 0; i != inputNamesArr_.size(); ++i) {
auto input = inferRequest_.GetBlob(inputNamesArr_[i]);
std::unordered_map<std::string, int> params_ = {{"width", input->getTensorDesc().getDims()[3]},
{"height", input->getTensorDesc().getDims()[2]},
{"channels", input->getTensorDesc().getDims()[1]}};
paramsArr_.push_back(params_);
}
}
catch (const std::exception &ex) {
RCLCPP_ERROR(inferenceNode->get_logger(), "Model failed to load: %s", ex.what());
return false;
}
return true;
}