in inference_pkg/src/inference_node.cpp [81:107]
void InferStateHdl(const std::shared_ptr<rmw_request_id_t> request_header,
std::shared_ptr<deepracer_interfaces_pkg::srv::InferenceStateSrv::Request> req,
std::shared_ptr<deepracer_interfaces_pkg::srv::InferenceStateSrv::Response> res) {
(void)request_header;
auto itInferTask = taskList_.find(req->task_type);
res->error = 1;
if (itInferTask != taskList_.end()) {
if (!itInferTask->second) {
RCLCPP_INFO(this->get_logger(), "Please load a model before starting inference");
res->error = 0;
return;
}
if (req->start) {
itInferTask->second->startInference();
RCLCPP_INFO(this->get_logger(), "Inference task (enum %d) has started", req->task_type);
}
else {
itInferTask->second->stopInference();
RCLCPP_INFO(this->get_logger(), "Inference task (enum %d) has stopped", req->task_type);
}
res->error = 0;
}
}