def _process_req()

in docker_images/diffusers/app/pipelines/image_to_image.py [0:0]


    def _process_req(self, image, prompt, **kwargs):
        # only one image per prompt is supported
        kwargs["num_images_per_prompt"] = 1
        if isinstance(
            self.ldm,
            (
                StableDiffusionPipeline,
                StableDiffusionImg2ImgPipeline,
                AltDiffusionPipeline,
                AltDiffusionImg2ImgPipeline,
                StableDiffusionControlNetPipeline,
                StableDiffusionInstructPix2PixPipeline,
                StableDiffusionUpscalePipeline,
                StableDiffusionLatentUpscalePipeline,
                StableDiffusionDepth2ImgPipeline,
            ),
        ):
            if "num_inference_steps" not in kwargs:
                kwargs["num_inference_steps"] = int(
                    os.getenv("DEFAULT_NUM_INFERENCE_STEPS", "25")
                )
            images = self.ldm(prompt, image, **kwargs)["images"]
            return images[0]
        elif isinstance(self.ldm, StableDiffusionXLImg2ImgPipeline):
            if "num_inference_steps" not in kwargs:
                kwargs["num_inference_steps"] = int(
                    os.getenv("DEFAULT_NUM_INFERENCE_STEPS", "25")
                )
            image = image.convert("RGB")
            images = self.ldm(prompt, image=image, **kwargs)["images"]
            return images[0]
        elif isinstance(self.ldm, (StableUnCLIPImg2ImgPipeline, StableUnCLIPPipeline)):
            if "num_inference_steps" not in kwargs:
                kwargs["num_inference_steps"] = int(
                    os.getenv("DEFAULT_NUM_INFERENCE_STEPS", "25")
                )
            # image comes first
            images = self.ldm(image, prompt, **kwargs)["images"]
            return images[0]
        elif isinstance(self.ldm, StableDiffusionImageVariationPipeline):
            if "num_inference_steps" not in kwargs:
                kwargs["num_inference_steps"] = int(
                    os.getenv("DEFAULT_NUM_INFERENCE_STEPS", "25")
                )
            # only image is needed
            images = self.ldm(image, **kwargs)["images"]
            return images[0]
        elif isinstance(self.ldm, (KandinskyImg2ImgPipeline)):
            if "num_inference_steps" not in kwargs:
                kwargs["num_inference_steps"] = int(
                    os.getenv("DEFAULT_NUM_INFERENCE_STEPS", "100")
                )
            # not all args are supported by the prior
            prior_args = {
                "num_inference_steps": kwargs["num_inference_steps"],
                "num_images_per_prompt": kwargs["num_images_per_prompt"],
                "negative_prompt": kwargs.get("negative_prompt", None),
                "guidance_scale": kwargs.get("guidance_scale", 7),
            }
            if "guidance_scale" not in kwargs:
                default_guidance_scale = os.getenv("DEFAULT_GUIDANCE_SCALE")
                if default_guidance_scale is not None:
                    kwargs["guidance_scale"] = float(default_guidance_scale)
                    prior_args["guidance_scale"] = float(default_guidance_scale)
                # Else, don't specify anything, leave the default behaviour
            image_emb, zero_image_emb = self.prior(prompt, **prior_args).to_tuple()
            images = self.ldm(
                prompt,
                image=image,
                image_embeds=image_emb,
                negative_image_embeds=zero_image_emb,
                **kwargs,
            )["images"]
            return images[0]
        else:
            raise ValueError("Model type not found or pipeline not implemented")