# inference.py
import torch
from diffusers import StableDiffusionPipeline, DPMSolverMultistepScheduler

print("Loading Stable Diffusion model...")
pipe = StableDiffusionPipeline.from_pretrained(
    "stabilityai/stable-diffusion-2",
    torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32
)
pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)

# 正确初始化 torch.device
if torch.backends.mps.is_available():
    device = torch.device("mps")
elif torch.cuda.is_available():
    device = torch.device("cuda")
else:
    device = torch.device("cpu")

pipe = pipe.to(device)
print(f"Model loaded on {device}")
