tensorflow/inference/docker/build_artifacts/sagemaker/python_service.py [108:168]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
        lower, upper = port_range.split('-')
        lower = int(lower)
        upper = lower + int((int(upper) - lower) * 0.9)  # only utilizing 90% of the ports
        rest_port = lower
        grpc_port = (lower + upper) // 2
        tfs_ports = {
            "rest_port": [port for port in range(rest_port, grpc_port)],
            "grpc_port": [port for port in range(grpc_port, upper)],
        }
        return tfs_ports

    def _ports_available(self):
        with lock():
            rest_ports = self._tfs_ports["rest_port"]
            grpc_ports = self._tfs_ports["grpc_port"]
        return len(rest_ports) > 0 and len(grpc_ports) > 0

    def _handle_load_model_post(self, res, data):  # noqa: C901
        model_name = data["model_name"]
        base_path = data["url"]

        # model is already loaded
        if model_name in self._model_tfs_pid:
            res.status = falcon.HTTP_409
            res.body = json.dumps({
                "error": "Model {} is already loaded.".format(model_name)
            })

        # check if there are available ports
        if not self._ports_available():
            res.status = falcon.HTTP_507
            res.body = json.dumps({
                "error": "Memory exhausted: no available ports to load the model."
            })
        with lock():
            self._model_tfs_rest_port[model_name] = self._tfs_ports["rest_port"].pop()
            self._model_tfs_grpc_port[model_name] = self._tfs_ports["grpc_port"].pop()

        # validate model files are in the specified base_path
        if self.validate_model_dir(base_path):
            try:
                tfs_config = tfs_utils.create_tfs_config_individual_model(model_name, base_path)
                tfs_config_file = "/sagemaker/tfs-config/{}/model-config.cfg".format(model_name)
                log.info("tensorflow serving model config: \n%s\n", tfs_config)
                os.makedirs(os.path.dirname(tfs_config_file))
                with open(tfs_config_file, "w") as f:
                    f.write(tfs_config)

                batching_config_file = "/sagemaker/batching/{}/batching-config.cfg".format(
                    model_name)
                if self._tfs_enable_batching:
                    tfs_utils.create_batching_config(batching_config_file)

                cmd = tfs_utils.tfs_command(
                    self._model_tfs_grpc_port[model_name],
                    self._model_tfs_rest_port[model_name],
                    tfs_config_file,
                    self._tfs_enable_batching,
                    batching_config_file,
                )
                p = subprocess.Popen(cmd.split())
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



tensorflow/inference/docker/build_artifacts/sagemaker_neuron/python_service.py [95:155]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
        lower, upper = port_range.split('-')
        lower = int(lower)
        upper = lower + int((int(upper) - lower) * 0.9)  # only utilizing 90% of the ports
        rest_port = lower
        grpc_port = (lower + upper) // 2
        tfs_ports = {
            "rest_port": [port for port in range(rest_port, grpc_port)],
            "grpc_port": [port for port in range(grpc_port, upper)],
        }
        return tfs_ports

    def _ports_available(self):
        with lock():
            rest_ports = self._tfs_ports["rest_port"]
            grpc_ports = self._tfs_ports["grpc_port"]
        return len(rest_ports) > 0 and len(grpc_ports) > 0

    def _handle_load_model_post(self, res, data):  # noqa: C901
        model_name = data["model_name"]
        base_path = data["url"]

        # model is already loaded
        if model_name in self._model_tfs_pid:
            res.status = falcon.HTTP_409
            res.body = json.dumps({
                "error": "Model {} is already loaded.".format(model_name)
            })

        # check if there are available ports
        if not self._ports_available():
            res.status = falcon.HTTP_507
            res.body = json.dumps({
                "error": "Memory exhausted: no available ports to load the model."
            })
        with lock():
            self._model_tfs_rest_port[model_name] = self._tfs_ports["rest_port"].pop()
            self._model_tfs_grpc_port[model_name] = self._tfs_ports["grpc_port"].pop()

        # validate model files are in the specified base_path
        if self.validate_model_dir(base_path):
            try:
                tfs_config = tfs_utils.create_tfs_config_individual_model(model_name, base_path)
                tfs_config_file = "/sagemaker/tfs-config/{}/model-config.cfg".format(model_name)
                log.info("tensorflow serving model config: \n%s\n", tfs_config)
                os.makedirs(os.path.dirname(tfs_config_file))
                with open(tfs_config_file, "w") as f:
                    f.write(tfs_config)

                batching_config_file = "/sagemaker/batching/{}/batching-config.cfg".format(
                    model_name)
                if self._tfs_enable_batching:
                    tfs_utils.create_batching_config(batching_config_file)

                cmd = tfs_utils.tfs_command(
                    self._model_tfs_grpc_port[model_name],
                    self._model_tfs_rest_port[model_name],
                    tfs_config_file,
                    self._tfs_enable_batching,
                    batching_config_file,
                )
                p = subprocess.Popen(cmd.split())
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



