tensorflow/inference/docker/build_artifacts/sagemaker/python_service.py [173:221]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
                log.info("started tensorflow serving (pid: %d)", p.pid)
                # update model name <-> tfs pid map
                self._model_tfs_pid[model_name] = p

                res.status = falcon.HTTP_200
                res.body = json.dumps({
                    "success":
                        "Successfully loaded model {}, "
                        "listening on rest port {} "
                        "and grpc port {}.".format(model_name,
                                                   self._model_tfs_rest_port,
                                                   self._model_tfs_grpc_port,)
                })
            except MultiModelException as multi_model_exception:
                self._cleanup_config_file(tfs_config_file)
                self._cleanup_config_file(batching_config_file)
                if multi_model_exception.code == 409:
                    res.status = falcon.HTTP_409
                    res.body = multi_model_exception.msg
                elif multi_model_exception.code == 408:
                    res.status = falcon.HTTP_408
                    res.body = multi_model_exception.msg
                else:
                    raise MultiModelException(falcon.HTTP_500, multi_model_exception.msg)
            except FileExistsError as e:
                res.status = falcon.HTTP_409
                res.body = json.dumps({
                    "error": "Model {} is already loaded. {}".format(model_name, str(e))
                })
            except OSError as os_error:
                self._cleanup_config_file(tfs_config_file)
                self._cleanup_config_file(batching_config_file)
                if os_error.errno == 12:
                    raise MultiModelException(falcon.HTTP_507,
                                              "Memory exhausted: "
                                              "not enough memory to start TFS instance")
                else:
                    raise MultiModelException(falcon.HTTP_500, os_error.strerror)
        else:
            res.status = falcon.HTTP_404
            res.body = json.dumps({
                "error":
                    "Could not find valid base path {} for servable {}".format(base_path,
                                                                               model_name)
            })

    def _cleanup_config_file(self, config_file):
        if os.path.exists(config_file):
            os.remove(config_file)
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



tensorflow/inference/docker/build_artifacts/sagemaker_neuron/python_service.py [158:206]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
                log.info("started tensorflow serving (pid: %d)", p.pid)
                # update model name <-> tfs pid map
                self._model_tfs_pid[model_name] = p

                res.status = falcon.HTTP_200
                res.body = json.dumps({
                    "success":
                        "Successfully loaded model {}, "
                        "listening on rest port {} "
                        "and grpc port {}.".format(model_name,
                                                   self._model_tfs_rest_port,
                                                   self._model_tfs_grpc_port,)
                })
            except MultiModelException as multi_model_exception:
                self._cleanup_config_file(tfs_config_file)
                self._cleanup_config_file(batching_config_file)
                if multi_model_exception.code == 409:
                    res.status = falcon.HTTP_409
                    res.body = multi_model_exception.msg
                elif multi_model_exception.code == 408:
                    res.status = falcon.HTTP_408
                    res.body = multi_model_exception.msg
                else:
                    raise MultiModelException(falcon.HTTP_500, multi_model_exception.msg)
            except FileExistsError as e:
                res.status = falcon.HTTP_409
                res.body = json.dumps({
                    "error": "Model {} is already loaded. {}".format(model_name, str(e))
                })
            except OSError as os_error:
                self._cleanup_config_file(tfs_config_file)
                self._cleanup_config_file(batching_config_file)
                if os_error.errno == 12:
                    raise MultiModelException(falcon.HTTP_507,
                                              "Memory exhausted: "
                                              "not enough memory to start TFS instance")
                else:
                    raise MultiModelException(falcon.HTTP_500, os_error.strerror)
        else:
            res.status = falcon.HTTP_404
            res.body = json.dumps({
                "error":
                    "Could not find valid base path {} for servable {}".format(base_path,
                                                                               model_name)
            })

    def _cleanup_config_file(self, config_file):
        if os.path.exists(config_file):
            os.remove(config_file)
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



