tensorflow/inference/docker/build_artifacts/sagemaker/tfs_utils.py [28:119]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
logging.basicConfig(level=logging.INFO)
log = logging.getLogger(__name__)

DEFAULT_CONTENT_TYPE = "application/json"
DEFAULT_ACCEPT_HEADER = "application/json"
CUSTOM_ATTRIBUTES_HEADER = "X-Amzn-SageMaker-Custom-Attributes"

Context = namedtuple(
    "Context",
    "model_name, model_version, method, rest_uri, grpc_port, channel, "
    "custom_attributes, request_content_type, accept_header, content_length",
)


def parse_request(req, rest_port, grpc_port, default_model_name, model_name=None, channel=None):
    tfs_attributes = parse_tfs_custom_attributes(req)
    tfs_uri = make_tfs_uri(rest_port, tfs_attributes, default_model_name, model_name)

    if not model_name:
        model_name = tfs_attributes.get("tfs-model-name")

    context = Context(
        model_name,
        tfs_attributes.get("tfs-model-version"),
        tfs_attributes.get("tfs-method"),
        tfs_uri,
        grpc_port,
        channel,
        req.get_header(CUSTOM_ATTRIBUTES_HEADER),
        req.get_header("Content-Type") or DEFAULT_CONTENT_TYPE,
        req.get_header("Accept") or DEFAULT_ACCEPT_HEADER,
        req.content_length,
    )

    data = req.stream
    return data, context


def make_tfs_uri(port, attributes, default_model_name, model_name=None):
    log.info("sagemaker tfs attributes: \n{}".format(attributes))

    tfs_model_name = model_name or attributes.get("tfs-model-name", default_model_name)
    tfs_model_version = attributes.get("tfs-model-version")
    tfs_method = attributes.get("tfs-method", "predict")

    uri = "http://localhost:{}/v1/models/{}".format(port, tfs_model_name)
    if tfs_model_version:
        uri += "/versions/" + tfs_model_version
    uri += ":" + tfs_method
    return uri


def parse_tfs_custom_attributes(req):
    attributes = {}
    header = req.get_header(CUSTOM_ATTRIBUTES_HEADER)
    if header:
        matches = re.findall(r"(tfs-[a-z\-]+=[^,]+)", header)
        attributes = dict(attribute.split("=") for attribute in matches)
    return attributes


def create_tfs_config_individual_model(model_name, base_path):
    config = "model_config_list: {\n"
    config += "  config: {\n"
    config += "    name: '{}'\n".format(model_name)
    config += "    base_path: '{}'\n".format(base_path)
    config += "    model_platform: 'tensorflow'\n"

    config += "    model_version_policy: {\n"
    config += "      specific: {\n"
    for version in find_model_versions(base_path):
        config += "        versions: {}\n".format(version)
    config += "      }\n"
    config += "    }\n"

    config += "  }\n"
    config += "}\n"
    return config


def tfs_command(
    tfs_grpc_port,
    tfs_rest_port,
    tfs_config_path,
    tfs_enable_batching,
    tfs_batching_config_file,
    tfs_intra_op_parallelism=None,
    tfs_inter_op_parallelism=None,
    tfs_enable_gpu_memory_fraction=False,
    tfs_gpu_memory_fraction=None,
):
    cmd = (
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



tensorflow/inference/docker/build_artifacts/sagemaker_neuron/tfs_utils.py [27:118]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
logging.basicConfig(level=logging.INFO)
log = logging.getLogger(__name__)

DEFAULT_CONTENT_TYPE = "application/json"
DEFAULT_ACCEPT_HEADER = "application/json"
CUSTOM_ATTRIBUTES_HEADER = "X-Amzn-SageMaker-Custom-Attributes"

Context = namedtuple(
    "Context",
    "model_name, model_version, method, rest_uri, grpc_port, channel, "
    "custom_attributes, request_content_type, accept_header, content_length",
)


def parse_request(req, rest_port, grpc_port, default_model_name, model_name=None, channel=None):
    tfs_attributes = parse_tfs_custom_attributes(req)
    tfs_uri = make_tfs_uri(rest_port, tfs_attributes, default_model_name, model_name)

    if not model_name:
        model_name = tfs_attributes.get("tfs-model-name")

    context = Context(
        model_name,
        tfs_attributes.get("tfs-model-version"),
        tfs_attributes.get("tfs-method"),
        tfs_uri,
        grpc_port,
        channel,
        req.get_header(CUSTOM_ATTRIBUTES_HEADER),
        req.get_header("Content-Type") or DEFAULT_CONTENT_TYPE,
        req.get_header("Accept") or DEFAULT_ACCEPT_HEADER,
        req.content_length,
    )

    data = req.stream
    return data, context


def make_tfs_uri(port, attributes, default_model_name, model_name=None):
    log.info("sagemaker tfs attributes: \n{}".format(attributes))

    tfs_model_name = model_name or attributes.get("tfs-model-name", default_model_name)
    tfs_model_version = attributes.get("tfs-model-version")
    tfs_method = attributes.get("tfs-method", "predict")

    uri = "http://localhost:{}/v1/models/{}".format(port, tfs_model_name)
    if tfs_model_version:
        uri += "/versions/" + tfs_model_version
    uri += ":" + tfs_method
    return uri


def parse_tfs_custom_attributes(req):
    attributes = {}
    header = req.get_header(CUSTOM_ATTRIBUTES_HEADER)
    if header:
        matches = re.findall(r"(tfs-[a-z\-]+=[^,]+)", header)
        attributes = dict(attribute.split("=") for attribute in matches)
    return attributes


def create_tfs_config_individual_model(model_name, base_path):
    config = "model_config_list: {\n"
    config += "  config: {\n"
    config += "    name: '{}'\n".format(model_name)
    config += "    base_path: '{}'\n".format(base_path)
    config += "    model_platform: 'tensorflow'\n"

    config += "    model_version_policy: {\n"
    config += "      specific: {\n"
    for version in find_model_versions(base_path):
        config += "        versions: {}\n".format(version)
    config += "      }\n"
    config += "    }\n"

    config += "  }\n"
    config += "}\n"
    return config


def tfs_command(
    tfs_grpc_port,
    tfs_rest_port,
    tfs_config_path,
    tfs_enable_batching,
    tfs_batching_config_file,
    tfs_intra_op_parallelism=None,
    tfs_inter_op_parallelism=None,
    tfs_enable_gpu_memory_fraction=False,
    tfs_gpu_memory_fraction=None,
):
    cmd = (
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



