in src/huggingface_inference_toolkit/diffusers_utils.py [0:0]
def __init__(self, model_dir: str, device: Union[str, None] = None, **kwargs): # needs "cuda" for GPU
dtype = torch.float32
if device == "cuda":
dtype = torch.bfloat16 if is_torch_bf16_gpu_available() else torch.float16
device_map = "balanced" if device == "cuda" else None
self.pipeline = AutoPipelineForText2Image.from_pretrained(
model_dir, torch_dtype=dtype, device_map=device_map, **kwargs
)
# try to use DPMSolverMultistepScheduler
if isinstance(self.pipeline, StableDiffusionPipeline):
try:
self.pipeline.scheduler = DPMSolverMultistepScheduler.from_config(self.pipeline.scheduler.config)
except Exception:
pass