def on_get()

in tensorflow/inference/docker/build_artifacts/sagemaker_neuron/python_service.py [0:0]


    def on_get(self, req, res, model_name=None):  # pylint: disable=W0613
        if model_name is None:
            models_info = {}
            uri = "http://localhost:{}/v1/models/{}"
            for model, port in self._model_tfs_rest_port.items():
                try:
                    info = json.loads(requests.get(uri.format(port, model)).content)
                    models_info[model] = info
                except ValueError as e:
                    log.exception("exception handling request: {}".format(e))
                    res.status = falcon.HTTP_500
                    res.body = json.dumps({"error": str(e)}).encode("utf-8")
            res.status = falcon.HTTP_200
            res.body = json.dumps(models_info)
        else:
            if model_name not in self._model_tfs_rest_port:
                res.status = falcon.HTTP_404
                res.body = json.dumps(
                    {"error": "Model {} is loaded yet.".format(model_name)}
                ).encode("utf-8")
            else:
                port = self._model_tfs_rest_port[model_name]
                uri = "http://localhost:{}/v1/models/{}".format(port, model_name)
                try:
                    info = requests.get(uri)
                    res.status = falcon.HTTP_200
                    res.body = json.dumps({"model": info}).encode("utf-8")
                except ValueError as e:
                    log.exception("exception handling GET models request.")
                    res.status = falcon.HTTP_500
                    res.body = json.dumps({"error": str(e)}).encode("utf-8")