in models/01_YoloV5/01_Pytorch/processing_cpp/include/dlrclient.hpp [93:120]
inline void run_inference(const std::vector<std::vector<float>>& input, std::vector<std::vector<float>>& output) {
if (output.size() != 0 ) {
throw std::runtime_error("You need to provide an empty output vector");
} // if
if (this->num_inputs != input.size()) {
throw std::runtime_error("Invalid input elements. Expected: " + std::to_string(this->num_inputs) + " Received: " + std::to_string(input.size()));
} // if
for ( int i=0; i < this->num_inputs; ++i ) {
const std::vector<int64_t>& shape = this->input_shapes[i];
if (SetDLRInput(handle, this->input_names[i].c_str(), shape.data(), input[i].data(), shape.size()) != 0) {
const char* err = DLRGetLastError();
throw std::runtime_error(err);
} // if
} // for
if (RunDLRModel(handle) != 0) {
const char* err = DLRGetLastError();
throw std::runtime_error(err);
} // if
for ( int i=0; i < this->num_outputs; ++i ) {
std::vector<float> buf;
output.push_back(buf);
output[i].resize(this->output_sizes[i], 0);
if (GetDLROutput(handle, i, output[i].data()) != 0) {
const char* err = DLRGetLastError();
throw std::runtime_error(err);
} // if
} // for
}