in tensorflow/inference/docker/build_artifacts/sagemaker/python_service.py [0:0]
def on_get(self, req, res, model_name=None): # pylint: disable=W0613
with lock():
self._sync_local_mme_instance_status()
if model_name is None:
models_info = {}
uri = "http://localhost:{}/v1/models/{}"
for model, tfs_instance_status in self._mme_tfs_instances_status.items():
try:
info = json.loads(
requests.get(
uri.format(tfs_instance_status[0].rest_port, model)
).content
)
models_info[model] = info
except ValueError as e:
log.exception("exception handling request: {}".format(e))
res.status = falcon.HTTP_500
res.body = json.dumps({"error": str(e)}).encode("utf-8")
res.status = falcon.HTTP_200
res.body = json.dumps(models_info)
else:
if model_name not in self._mme_tfs_instances_status:
res.status = falcon.HTTP_404
res.body = json.dumps(
{"error": "Model {} is loaded yet.".format(model_name)}
).encode("utf-8")
else:
port = self._mme_tfs_instances_status[model_name].rest_port
uri = "http://localhost:{}/v1/models/{}".format(port, model_name)
try:
info = requests.get(uri)
res.status = falcon.HTTP_200
res.body = json.dumps({"model": info}).encode("utf-8")
except ValueError as e:
log.exception("exception handling GET models request.")
res.status = falcon.HTTP_500
res.body = json.dumps({"error": str(e)}).encode("utf-8")