# generate.py

import torch
from diffusers import StableDiffusionPipeline
from fastapi import Form, APIRouter
from fastapi.responses import HTMLResponse
from pathlib import Path as SysPath
from uuid import uuid4

from peft import PeftModel  # 用于加载 LoRA adapter

router = APIRouter()

GENERATED_DIR = SysPath("./generated")
GENERATED_DIR.mkdir(exist_ok=True)
pipe_cache = {}

@router.post("/generate/", response_class=HTMLResponse)
async def generate_image(concept: str = Form(...), lora: str = Form(None)):
    image_path = GENERATED_DIR / f"{uuid4().hex}.png"
    base_model_id = "runwayml/stable-diffusion-v1-5"

    device = "mps" if torch.backends.mps.is_available() else "cuda" if torch.cuda.is_available() else "cpu"

    # 缓存加载 Pipe
    if base_model_id not in pipe_cache:
        pipe = StableDiffusionPipeline.from_pretrained(
            base_model_id,
            torch_dtype=torch.float32,
        )
        pipe.to(device)
        pipe_cache[base_model_id] = pipe
    else:
        pipe = pipe_cache[base_model_id]

    # 加载 LoRA adapter（使用 PeftModel 替换 UNet）
    if lora:
        lora_path = SysPath("./models") / lora
        if not lora_path.exists():
            return f"<p>❌ LoRA 不存在: {lora_path}</p>"

        pipe.unet = PeftModel.from_pretrained(pipe.unet, str(lora_path))

    # 推理
    with torch.autocast(device if device != "cpu" else "cpu"):
        image = pipe(concept, num_inference_steps=25, guidance_scale=7.5).images[0]

    image.save(image_path)

    return f"""
        <html>
        <body>
            <h2>✅ 生成结果</h2>
            <p>关键词: {concept}</p>
            <p>使用 LoRA: {lora or "无"}</p>
            <img src="/generated/{image_path.name}" width="512"/>
            <br><a href="/">返回主页</a>
        </body>
        </html>
    """
