in source/neuropod/backends/torchscript/torch_backend.cc [235:273]
void TorchNeuropodBackend::load_model_internal()
{
// Get the model from the neuropod
auto graph_stream = loader_->get_istream_for_file("0/data/model.pt");
// Custom ops
// Make sure we don't load a custom op twice
std::vector<std::string> custom_ops;
for (const auto &item : model_config_->custom_ops)
{
const auto path = "0/ops/" + item;
const auto hash = loader_->get_hash_for_file(path);
// Don't load a custom op if we've already loaded it
std::lock_guard<std::mutex> lock(loaded_op_mutex);
if (loaded_op_hashes.count(hash) == 0)
{
custom_ops.emplace_back(loader_->get_file_path(path));
loaded_op_hashes.insert(hash);
}
}
model_ = load_model_from_path(*graph_stream,
custom_ops,
// Load the model onto the appropriate device (ideally a GPU if we have one available)
// Note: this uses the options set in the initializer list above
get_torch_device(DeviceType::GPU));
if (!model_)
{
NEUROPOD_ERROR("Failed to load TorchScript graph for neuropod {}", neuropod_path_);
}
for (const auto &tensor_spec : model_config_->outputs)
{
output_specs_.emplace_back(tensor_spec);
}
}