def __call__()

in docker_images/diffusers/app/pipelines/text_to_image.py [0:0]


    def __call__(self, inputs: str, **kwargs) -> "Image.Image":
        """
        Args:
            inputs (:obj:`str`):
                a string containing some text
        Return:
            A :obj:`PIL.Image.Image` with the raw image representation as PIL.
        """

        # Check if users set a custom scheduler and pop if from the kwargs if so
        custom_scheduler = None
        if "scheduler" in kwargs:
            custom_scheduler = kwargs["scheduler"]
            kwargs.pop("scheduler")

        if custom_scheduler:
            compatibles = self.ldm.scheduler.compatibles
            # Check if the scheduler is compatible
            is_compatible_scheduler = [
                cls for cls in compatibles if cls.__name__ == custom_scheduler
            ]
            # In case of a compatible scheduler, swap to that for inference
            if is_compatible_scheduler:
                # Import the scheduler dynamically
                SchedulerClass = getattr(
                    importlib.import_module("diffusers.schedulers"), custom_scheduler
                )
                self.ldm.scheduler = SchedulerClass.from_config(
                    self.ldm.scheduler.config
                )
            else:
                logger.info("%s scheduler not loaded: incompatible", custom_scheduler)
                self.ldm.scheduler = self.default_scheduler
        else:
            self.ldm.scheduler = self.default_scheduler

        self._load_lora_adapter(kwargs)

        if idle.UNLOAD_IDLE:
            with idle.request_witnesses():
                self._model_to_gpu()
                resp = self._process_req(inputs, **kwargs)
        else:
            resp = self._process_req(inputs, **kwargs)
        return resp