def preprocess()

in community-content/cpr-examples/timm_serving/timm_serving/predictor.py [0:0]


    def preprocess(self, request_dict: Dict[str, List[str]]) -> torch.Tensor:
        """Performs preprocessing.

        By default, the server expects a request body consisting of a valid JSON
        object. This will be parsed by the handler before it's evaluated by the
        preprocess method.

        Args:
          request_dict: Parsed request body. We expect that the input consists of
            a list of base64-encoded image files under the "instances" key. (Any
            image format that PIL.image.open can handle is okay.)

        Returns:
          torch.Tensor containing the preprocessed images as a batch. If GPU is
          available, the result tensor will be stored on GPU.
        """

        if "instances" not in request_dict:
            raise HTTPException(
                status_code=400,
                detail='Request must contain "instances" as a top-level key.',
            )

        tensors = []

        for (i, image) in enumerate(request_dict["instances"]):
            # We use Base64 encoding to handle image data.
            # This is probably the best we can do while still using JSON input.
            # Overriding the input format requires building a custom Handler.
            try:
                image_bytes = base64.b64decode(image, validate=True)
            except (binascii.Error, TypeError) as e:
                raise HTTPException(
                    status_code=400,
                    detail=f"Base64 decoding of the input image at index {i} failed:"
                    f" {str(e)}",
                )

            try:
                pil_image = PIL.Image.open(io.BytesIO(image_bytes)).convert("RGB")
            except PIL.UnidentifiedImageError:
                raise HTTPException(
                    status_code=400,
                    detail=f"The input image at index {i} could not be identified as an"
                    " image file.",
                )

            tensors.append(self._transform(pil_image))

        with torch.inference_mode():
            result = torch.stack(tensors)
            if self._cuda:
                result = result.cuda()
        return result