tensorflow/inference/docker/build_artifacts/sagemaker/python_service.py [397:467]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
        else:
            # Randomly pick port used for routing incoming request.
            grpc_port = self._pick_port(self._tfs_grpc_ports)
            rest_port = self._pick_port(self._tfs_rest_ports)
            data, context = tfs_utils.parse_request(
                req,
                rest_port,
                grpc_port,
                self._tfs_default_model_name,
                channel=self._channels[grpc_port],
            )

        try:
            res.status = falcon.HTTP_200
            handlers = self._handlers
            if SAGEMAKER_MULTI_MODEL_ENABLED and model_name in self.model_handlers:
                log.info(
                    "Model-specific inference script for the model {} exists, importing handlers.".format(
                        model_name
                    )
                )
                handlers = self.model_handlers[model_name]
            elif not self._default_handlers_enabled:
                log.info(
                    "Universal inference script exists at path {}, importing handlers.".format(
                        INFERENCE_SCRIPT_PATH
                    )
                )
            else:
                log.info(
                    "Model-specific inference script and universal inference script both do not exist, using default handlers."
                )
            res.body, res.content_type = handlers(data, context)
        except Exception as e:  # pylint: disable=broad-except
            log.exception("exception handling request: {}".format(e))
            res.status = falcon.HTTP_500
            res.body = json.dumps({"error": str(e)}).encode("utf-8")  # pylint: disable=E1101

    def _setup_channel(self, grpc_port):
        if grpc_port not in self._channels:
            log.info("Creating grpc channel for port: %s", grpc_port)
            self._channels[grpc_port] = grpc.insecure_channel("localhost:{}".format(grpc_port))

    def _import_handlers(self, inference_script=INFERENCE_SCRIPT_PATH):
        spec = importlib.util.spec_from_file_location("inference", inference_script)
        inference = importlib.util.module_from_spec(spec)
        spec.loader.exec_module(inference)

        _custom_handler, _custom_input_handler, _custom_output_handler = None, None, None
        if hasattr(inference, "handler"):
            _custom_handler = inference.handler
        elif hasattr(inference, "input_handler") and hasattr(inference, "output_handler"):
            _custom_input_handler = inference.input_handler
            _custom_output_handler = inference.output_handler
        else:
            raise NotImplementedError("Handlers are not implemented correctly in user script.")

        return _custom_handler, _custom_input_handler, _custom_output_handler

    def _make_handler(self, custom_handler, custom_input_handler, custom_output_handler):
        if custom_handler:
            return custom_handler

        def handler(data, context):
            processed_input = custom_input_handler(data, context)
            response = requests.post(context.rest_uri, data=processed_input)
            return custom_output_handler(response, context)

        return handler

    def on_get(self, req, res, model_name=None):  # pylint: disable=W0613
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



tensorflow/inference/docker/build_artifacts/sagemaker_neuron/python_service.py [295:365]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
        else:
            # Randomly pick port used for routing incoming request.
            grpc_port = self._pick_port(self._tfs_grpc_ports)
            rest_port = self._pick_port(self._tfs_rest_ports)
            data, context = tfs_utils.parse_request(
                req,
                rest_port,
                grpc_port,
                self._tfs_default_model_name,
                channel=self._channels[grpc_port],
            )

        try:
            res.status = falcon.HTTP_200
            handlers = self._handlers
            if SAGEMAKER_MULTI_MODEL_ENABLED and model_name in self.model_handlers:
                log.info(
                    "Model-specific inference script for the model {} exists, importing handlers.".format(
                        model_name
                    )
                )
                handlers = self.model_handlers[model_name]
            elif not self._default_handlers_enabled:
                log.info(
                    "Universal inference script exists at path {}, importing handlers.".format(
                        INFERENCE_SCRIPT_PATH
                    )
                )
            else:
                log.info(
                    "Model-specific inference script and universal inference script both do not exist, using default handlers."
                )
            res.body, res.content_type = handlers(data, context)
        except Exception as e:  # pylint: disable=broad-except
            log.exception("exception handling request: {}".format(e))
            res.status = falcon.HTTP_500
            res.body = json.dumps({"error": str(e)}).encode("utf-8")  # pylint: disable=E1101

    def _setup_channel(self, grpc_port):
        if grpc_port not in self._channels:
            log.info("Creating grpc channel for port: %s", grpc_port)
            self._channels[grpc_port] = grpc.insecure_channel("localhost:{}".format(grpc_port))

    def _import_handlers(self, inference_script=INFERENCE_SCRIPT_PATH):
        spec = importlib.util.spec_from_file_location("inference", inference_script)
        inference = importlib.util.module_from_spec(spec)
        spec.loader.exec_module(inference)

        _custom_handler, _custom_input_handler, _custom_output_handler = None, None, None
        if hasattr(inference, "handler"):
            _custom_handler = inference.handler
        elif hasattr(inference, "input_handler") and hasattr(inference, "output_handler"):
            _custom_input_handler = inference.input_handler
            _custom_output_handler = inference.output_handler
        else:
            raise NotImplementedError("Handlers are not implemented correctly in user script.")

        return _custom_handler, _custom_input_handler, _custom_output_handler

    def _make_handler(self, custom_handler, custom_input_handler, custom_output_handler):
        if custom_handler:
            return custom_handler

        def handler(data, context):
            processed_input = custom_input_handler(data, context)
            response = requests.post(context.rest_uri, data=processed_input)
            return custom_output_handler(response, context)

        return handler

    def on_get(self, req, res, model_name=None):  # pylint: disable=W0613
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



