void populate_default_backend_map()

in source/neuropod/internal/backend_registration.cc [56:127]


void populate_default_backend_map(const std::string &neuropod_version, std::vector<BackendLoadSpec> &out)
{
    // A structure to store some basic info about frameworks we support
    struct FrameworkInfo
    {
        std::string              type;
        std::string              soname;
        bool                     has_gpu_version;
        std::vector<std::string> versions;
    };

    // Information about frameworks that we use to generate a list of `BackendLoadSpec`
    const std::vector<FrameworkInfo> frameworks = {
        {"torchscript",
         "libneuropod_torchscript_backend.so",
         true,
         {"1.1.0", "1.2.0", "1.3.0", "1.4.0", "1.5.0", "1.6.0", "1.7.0", "1.8.1", "1.9.0", "1.10.2"}},
        {"tensorflow",
         "libneuropod_tensorflow_backend.so",
         true,
         {"1.12.0", "1.13.1", "1.14.0", "1.15.0", "2.2.0", "2.5.0", "2.6.2"}},
        {"python", "libneuropod_pythonbridge_backend.so", false, {"2.7", "3.5", "3.6", "3.7", "3.8"}}};

    // Base directory for Neuropod backends
    std::string neuropod_base_dir = "/usr/local/lib/neuropod";
    if (auto base_dir = std::getenv("NEUROPOD_BASE_DIR"))
    {
        neuropod_base_dir = base_dir;
    }

    // Because the returned vector is in reverse priority order,
    // the ordering below means we'd prefer to load a newer, GPU capable version of a framework if one is available.
    // It also prioritizes non-absoltute so paths (i.e. controlled by LD_LIBRARY_PATH) over absolute ones in
    // `/usr/local/lib/neuropod`. This is so we don't break existing behavior.
    for (const auto &is_absolute_path : {false, true})
    {
        for (const auto &is_gpu : {false, true})
        {
            for (const auto &framework : frameworks)
            {
                if (is_gpu && !framework.has_gpu_version)
                {
                    // This framework doesn't have a GPU specific version
                    continue;
                }

                for (const auto &version : framework.versions)
                {
                    BackendLoadSpec item;
                    item.type    = framework.type;
                    item.version = version;

                    if (is_absolute_path)
                    {
                        // Ex:
                        //  "/usr/local/lib/neuropod/0.2.0/backends/torchscript_1.4.0/libneuropod_torchscript_backend.so"
                        // Ex:
                        //  "/usr/local/lib/neuropod/0.2.0/backends/torchscript_1.4.0_gpu/libneuropod_torchscript_backend.so"
                        item.path = neuropod_base_dir + ("/" + neuropod_version + "/backends/") + framework.type + "_" +
                                    version + (is_gpu ? "_gpu" : "") + "/" + framework.soname;
                    }
                    else
                    {
                        item.path = framework.soname;
                    }

                    out.emplace_back(std::move(item));
                }
            }
        }
    }
}