in tensorflow/inference/docker/build_artifacts/sagemaker_neuron/python_service.py [0:0]
def _handle_invocation_post(self, req, res, model_name=None):
if SAGEMAKER_MULTI_MODEL_ENABLED:
if model_name:
if model_name not in self._model_tfs_rest_port:
res.status = falcon.HTTP_404
res.body = json.dumps({
"error": "Model {} is not loaded yet.".format(model_name)
})
return
else:
log.info("model name: {}".format(model_name))
rest_port = self._model_tfs_rest_port[model_name]
log.info("rest port: {}".format(str(self._model_tfs_rest_port[model_name])))
grpc_port = self._model_tfs_grpc_port[model_name]
log.info("grpc port: {}".format(str(self._model_tfs_grpc_port[model_name])))
data, context = tfs_utils.parse_request(req, rest_port, grpc_port,
self._tfs_default_model_name,
model_name)
else:
res.status = falcon.HTTP_400
res.body = json.dumps({
"error": "Invocation request does not contain model name."
})
else:
data, context = tfs_utils.parse_request(req, self._tfs_rest_port, self._tfs_grpc_port,
self._tfs_default_model_name)
try:
res.status = falcon.HTTP_200
#with lock():
res.body, res.content_type = self._handlers(data, context)
except Exception as e: # pylint: disable=broad-except
log.exception("exception handling request: {}".format(e))
res.status = falcon.HTTP_500
res.body = json.dumps({
"error": str(e)
}).encode("utf-8") # pylint: disable=E1101