std::unique_ptr load_model_config()

in source/neuropod/internal/config_utils.cc [154:269]


std::unique_ptr<ModelConfig> load_model_config(std::istream &input_stream)
{
    // Parse it
    Json::CharReaderBuilder rbuilder;
    Json::Value             obj;

    std::string parse_err;
    bool        parsingSuccessful = Json::parseFromStream(rbuilder, input_stream, &obj, &parse_err);

    if (!parsingSuccessful)
    {
        throw_neuropod_config_error("Error parsing JSON: " + parse_err);
    }

    // Make sure that name and platform are strings
    if (!obj["name"].isString() || !obj["platform"].isString())
    {
        throw_neuropod_config_error("'name' and 'platform' must be strings.");
    }

    const std::string  name        = obj["name"].asString();
    const std::string  platform    = obj["platform"].asString();
    const Json::Value &input_spec  = obj["input_spec"];
    const Json::Value &output_spec = obj["output_spec"];

    // By default, any version is okay
    std::string platform_version_semver = "*";
    if (obj.isMember("platform_version_semver"))
    {
        platform_version_semver = obj["platform_version_semver"].asString();
    }

    // Make sure that it's a valid semver version or range
    if (!semver::valid(platform_version_semver))
    {
        throw_neuropod_config_error("The provided platform version (" + platform_version_semver +
                                    ") was not a valid semver version or range. See https://semver.org/ and "
                                    "https://docs.npmjs.com/misc/semver#ranges");
    }

    // Get the inputs
    std::vector<TensorSpec> inputs;
    for (const auto &spec : input_spec)
    {
        // Make sure name is a string
        if (!spec["name"].isString())
        {
            throw_neuropod_config_error("'name' must be a string.");
        }

        inputs.emplace_back(
            spec["name"].asString(), get_dims_from_json(spec["shape"]), convert_to_tensor_type(spec["dtype"]));
    }

    // Get the outputs
    std::vector<TensorSpec> outputs;
    for (const auto &spec : output_spec)
    {
        // Make sure name is a string
        if (!spec["name"].isString())
        {
            throw_neuropod_config_error("'name' must be a string.");
        }

        outputs.emplace_back(
            spec["name"].asString(), get_dims_from_json(spec["shape"]), convert_to_tensor_type(spec["dtype"]));
    }

    // Get the list of custom ops if any
    std::vector<std::string> custom_ops;
    if (obj.isMember("custom_ops"))
    {
        const Json::Value &items = obj["custom_ops"];
        for (const auto &item : items)
        {
            custom_ops.emplace_back(item.asString());
        }
    }

    // Load the device mapping if any
    std::unordered_map<std::string, NeuropodDeviceType> input_tensor_device;
    if (obj.isMember("input_tensor_device"))
    {
        const Json::Value &device_mapping = obj["input_tensor_device"];
        const auto         names          = device_mapping.getMemberNames();
        for (const auto &tensor_name : names)
        {
            const auto type = device_mapping[tensor_name].asString();
            if (type == "GPU")
            {
                input_tensor_device[tensor_name] = DeviceType::GPU;
            }
            else if (type == "CPU")
            {
                input_tensor_device[tensor_name] = DeviceType::CPU;
            }
            else
            {
                throw_neuropod_config_error("Invalid device type: " + type);
            }
        }
    }
    else
    {
        // Default all the tensors to GPU
        // TODO(vip): Remove this on version increase
        for (const auto &input : inputs)
        {
            input_tensor_device[input.name] = DeviceType::GPU;
        }
    }

    // Not directly using make_unique because of brace initialization
    return stdx::make_unique<ModelConfig>(
        ModelConfig{name, platform, platform_version_semver, inputs, outputs, custom_ops, input_tensor_device});
}