bool Executable::Call()

in ngraph_bridge/executable.cc [151:238]


bool Executable::Call(const vector<shared_ptr<runtime::Tensor>>& inputs,
                      vector<shared_ptr<runtime::Tensor>>& outputs) {
  if (m_trivial_fn) {
    NGRAPH_VLOG(2) << "Calling trivial IE function with inputs="
                   << inputs.size() << " outputs=" << outputs.size();
    return CallTrivial(inputs, outputs);
  }

  // Check if the number of inputs that the CNN network expects is equal to the
  // sum of the
  // inputs specified and the inputs we hoisted, if any.
  InferenceEngine::InputsDataMap input_info = m_network.getInputsInfo();
  if (input_info.size() > (inputs.size() + m_hoisted_params.size())) {
    throw runtime_error("Function inputs (" + to_string(input_info.size()) +
                        ") number greater than number of given inputs (" +
                        to_string(inputs.size() + m_hoisted_params.size()) +
                        ")");
  }

  //  Prepare input blobs
  auto func = m_network.getFunction();
  auto parameters = func->get_parameters();
  int j = 0;
  for (int i = 0; i < inputs.size(); i++) {
    if (find(m_skipped_inputs.begin(), m_skipped_inputs.end(), i) !=
        m_skipped_inputs.end()) {
      continue;
    }
    auto input_name = parameters[j++]->get_friendly_name();
    if (input_info.find(input_name) == input_info.end()) {
      NGRAPH_VLOG(1) << "Skipping unused input " << input_name;
      continue;
    }
    shared_ptr<IETensor> tv = static_pointer_cast<IETensor>(inputs[i]);
    m_infer_req.SetBlob(input_name, tv->get_blob());
  }

  for (const auto& it : m_hoisted_params) {
    auto input_name = it.first;
    if (input_info.find(input_name) == input_info.end()) {
      NGRAPH_VLOG(1) << "Skipping unused hoisted param " << input_name;
      continue;
    }
    shared_ptr<IETensor> tv = static_pointer_cast<IETensor>(it.second);
    m_infer_req.SetBlob(input_name, tv->get_blob());
  }

  InferenceEngine::OutputsDataMap output_info = m_network.getOutputsInfo();
  if (outputs.size() == 0 && output_info.size() > 0) {
    outputs.resize(output_info.size(), nullptr);
  }

  auto get_output_name = [](std::shared_ptr<ngraph::Node> node) {
    // Since IE has no "result" nodes, we set the blob corresponding to the
    // parent of this result node
    auto parent = node->input_value(0).get_node_shared_ptr();
    auto name = parent->get_friendly_name();
    // if parent has multiple outputs, correctly identify the output feeding
    // into this result
    if (parent->outputs().size() > 1) {
      name += "." + to_string(node->input_value(0).get_index());
    }
    return name;
  };

  //  Prepare output blobs
  auto results = func->get_results();
  for (int i = 0; i < results.size(); i++) {
    if (outputs[i] != nullptr) {
      NGRAPH_VLOG(4) << "Executable::call() SetBlob()";
      shared_ptr<IETensor> tv = static_pointer_cast<IETensor>(outputs[i]);
      m_infer_req.SetBlob(get_output_name(results[i]), tv->get_blob());
    }
  }

  m_infer_req.Infer();

  // Set dynamic output blobs
  for (int i = 0; i < results.size(); i++) {
    if (outputs[i] == nullptr) {
      NGRAPH_VLOG(4) << "Executable::call() GetBlob()";
      auto blob = m_infer_req.GetBlob(get_output_name(results[i]));
      outputs[i] = make_shared<IETensor>(blob);
    }
  }

  return true;
}