def _process_req()

in docker_images/latent-to-image/app/pipelines/latent_to_image.py [0:0]


    def _process_req(self, inputs, **kwargs):
        needs_upcasting = (
            self.vae.dtype == torch.float16 and self.vae.config.force_upcast
        )
        if needs_upcasting:
            self.vae = self.vae.to(torch.float32)
            inputs = inputs.to(self.device, torch.float32)
        else:
            inputs = inputs.to(self.device, self.dtype)

        # unscale/denormalize the latents
        # denormalize with the mean and std if available and not None
        has_latents_mean = (
            hasattr(self.vae.config, "latents_mean")
            and self.vae.config.latents_mean is not None
        )
        has_latents_std = (
            hasattr(self.vae.config, "latents_std")
            and self.vae.config.latents_std is not None
        )
        if has_latents_mean and has_latents_std:
            latents_mean = (
                torch.tensor(self.vae.config.latents_mean)
                .view(1, 4, 1, 1)
                .to(inputs.device, inputs.dtype)
            )
            latents_std = (
                torch.tensor(self.vae.config.latents_std)
                .view(1, 4, 1, 1)
                .to(inputs.device, inputs.dtype)
            )
            inputs = (
                inputs * latents_std / self.vae.config.scaling_factor + latents_mean
            )
        else:
            inputs = inputs / self.vae.config.scaling_factor

        with torch.no_grad():
            image = self.vae.decode(inputs, return_dict=False)[0]

        if needs_upcasting:
            self.vae.to(dtype=torch.float16)

        image = self.image_processor.postprocess(image, output_type="pil")

        return image[0]