def _process_req()

in docker_images/diffusers/app/pipelines/text_to_image.py [0:0]


    def _process_req(self, inputs, **kwargs):
        # only one image per prompt is supported
        kwargs["num_images_per_prompt"] = 1

        if "num_inference_steps" not in kwargs:
            default_num_steps = os.getenv("DEFAULT_NUM_INFERENCE_STEPS")
            if default_num_steps:
                kwargs["num_inference_steps"] = int(default_num_steps)
            elif self.is_karras_compatible:
                kwargs["num_inference_steps"] = 20
            # Else, don't specify anything, leave the default behaviour

        if "guidance_scale" not in kwargs:
            default_guidance_scale = os.getenv("DEFAULT_GUIDANCE_SCALE")
            if default_guidance_scale is not None:
                kwargs["guidance_scale"] = float(default_guidance_scale)
            # Else, don't specify anything, leave the default behaviour
        if "seed" in kwargs:
            seed = int(kwargs["seed"])
            generator = torch.Generator().manual_seed(seed)
            kwargs["generator"] = generator
            kwargs.pop("seed")

        images = self.ldm(inputs, **kwargs)["images"]
        return images[0]