from fastapi import FastAPI, UploadFile, File, Form, HTTPException
from fastapi.middleware.cors import CORSMiddleware
from fastapi.responses import JSONResponse, FileResponse
import uvicorn
import torch
from PIL import Image
import io
import os
from datetime import datetime
import uuid
from modelscope import snapshot_download
from diffusers import (
    StableDiffusionPipeline,
    DPMSolverMultistepScheduler,
    UniPCMultistepScheduler,
    EulerAncestralDiscreteScheduler,
    DDIMScheduler,
    HeunDiscreteScheduler,
    DPMSolverSinglestepScheduler,
    KDPM2DiscreteScheduler,
    KDPM2AncestralDiscreteScheduler,
    PNDMScheduler,
    EulerDiscreteScheduler,
    LMSDiscreteScheduler,
)

app = FastAPI()

# 配置CORS
app.add_middleware(
    CORSMiddleware,
    allow_origins=["*"],
    allow_credentials=True,
    allow_methods=["*"],
    allow_headers=["*"],
)

# 创建输出目录
OUTPUT_DIR = "outputs"
MODELS_CACHE_DIR = "models"
os.makedirs(OUTPUT_DIR, exist_ok=True)
os.makedirs(MODELS_CACHE_DIR, exist_ok=True)

# 定义可用的模型列表
AVAILABLE_MODELS = {
    # Checkpoint 模型
    "sd-1.5": {
        "name": "Stable Diffusion 1.5",
        "model_id": "AI-ModelScope/stable-diffusion-v1-5",
        "description": "基础版本，适合通用场景",
        "type": "checkpoint"
    },
    "sd-2.1": {
        "name": "Stable Diffusion 2.1",
        "model_id": "AI-ModelScope/stable-diffusion-v2-1",
        "description": "改进版本，提供更好的细节",
        "type": "checkpoint"
    },
    "sdxl": {
        "name": "Stable Diffusion XL",
        "model_id": "AI-ModelScope/stable-diffusion-xl-base-1.0",
        "description": "高质量版本，支持更大分辨率",
        "type": "checkpoint"
    },
    # LoRA 模型
    "anime-lora": {
        "name": "动漫风格 LoRA",
        "model_id": "sd_lora/SD15-LoRA-AnimeLineartStyle",
        "description": "适合生成动漫风格图像",
        "type": "lora",
        "base_model": "sd-1.5",
        "weight": 0.7
    },
    "realistic-lora": {
        "name": "写实风格 LoRA",
        "model_id": "mushenL/3D-CG-Style-Realistic",
        "description": "增强真实感和细节表现",
        "type": "lora",
        "base_model": "sd-1.5",
        "weight": 0.8
    },
    # VAE 模型
    "anime-vae": {
        "name": "动漫 VAE",
        "model_id": "digiplay/YabaLMixAnimeRealistic_V1.0_VAE",
        "description": "优化动漫图像的色彩和细节",
        "type": "vae",
        "compatible_models": ["sd-1.5", "sd-2.1"]
    },
    "realistic-vae": {
        "name": "写实 VAE",
        "model_id": "AI-ModelScope/Realistic_Vision_V5.1_noVAE",
        "description": "优化真实图像的质感和光影",
        "type": "vae",
        "compatible_models": ["sd-1.5", "sd-2.1", "sdxl"]
    }
}

# 定义调度器映射
SCHEDULER_MAP = {
    "DPMSolverMultistep": DPMSolverMultistepScheduler,
    "DPMSolverSinglestep": DPMSolverSinglestepScheduler,
    "UniPCMultistep": UniPCMultistepScheduler,
    "EulerA": EulerAncestralDiscreteScheduler,
    "Euler": EulerDiscreteScheduler,
    "DDIM": DDIMScheduler,
    "Heun": HeunDiscreteScheduler,
    "KDPM2": KDPM2DiscreteScheduler,
    "KDPM2A": KDPM2AncestralDiscreteScheduler,
    "PNDM": PNDMScheduler,
    "LMS": LMSDiscreteScheduler,
}

# 模型缓存
model_cache = {}
current_model_id = "sd-1.5"

def load_model(model_id: str):
    if model_id not in AVAILABLE_MODELS:
        raise HTTPException(status_code=400, detail="Invalid model ID")
    
    if model_id not in model_cache:
        model_info = AVAILABLE_MODELS[model_id]
        try:
            # 下载模型
            model_dir = snapshot_download(model_info["model_id"], cache_dir=MODELS_CACHE_DIR)
            
            if model_info["type"] == "checkpoint":
                # 加载基础模型
                pipe = StableDiffusionPipeline.from_pretrained(
                    model_dir,
                    torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32,
                    use_safetensors=True,
                    safety_checker=None,
                )
            elif model_info["type"] == "lora":
                # 加载基础模型
                base_model_info = AVAILABLE_MODELS[model_info["base_model"]]
                base_model_dir = snapshot_download(base_model_info["model_id"], cache_dir=MODELS_CACHE_DIR)
                pipe = StableDiffusionPipeline.from_pretrained(
                    base_model_dir,
                    torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32,
                    use_safetensors=True,
                    safety_checker=None,
                )
                # 加载 LoRA
                pipe.load_lora_weights(model_dir)
                pipe.fuse_lora(model_info["weight"])
            elif model_info["type"] == "vae":
                # 获取当前模型
                if current_model_id not in model_cache:
                    raise HTTPException(status_code=400, detail="Please load a base model first")
                pipe = model_cache[current_model_id]
                # 加载 VAE
                pipe.vae = pipe.vae.from_pretrained(model_dir)
            
            if torch.cuda.is_available():
                pipe = pipe.to("cuda")
                
            model_cache[model_id] = pipe
        except Exception as e:
            raise HTTPException(status_code=500, detail=f"Failed to load model: {str(e)}")
    
    return model_cache[model_id]

# 获取可用模型列表
@app.get("/models")
async def get_models():
    # 按类型分组模型
    grouped_models = {
        "checkpoint": [],
        "lora": [],
        "vae": []
    }
    
    for model_id, model_info in AVAILABLE_MODELS.items():
        model_type = model_info["type"]
        if model_type in grouped_models:
            grouped_models[model_type].append({
                "id": model_id,
                **model_info
            })
    
    return JSONResponse({
        "models": grouped_models,
        "current_model": current_model_id
    })

# 切换模型
@app.post("/models/{model_id}")
async def switch_model(model_id: str):
    global current_model_id
    if model_id not in AVAILABLE_MODELS:
        raise HTTPException(status_code=400, detail="Invalid model ID")
    
    try:
        # 预加载模型
        load_model(model_id)
        current_model_id = model_id
        return JSONResponse({
            "status": "success",
            "message": f"Switched to model: {AVAILABLE_MODELS[model_id]['name']}"
        })
    except Exception as e:
        raise HTTPException(status_code=500, detail=str(e))

@app.post("/generate")
async def generate_image(
    prompt: str = Form(...),
    negative_prompt: str = Form(None),
    num_inference_steps: int = Form(50),
    guidance_scale: float = Form(7.5),
    width: int = Form(512),
    height: int = Form(512),
    num_images: int = Form(1),
    scheduler: str = Form("DPMSolverMultistep"),
    seed: int = Form(-1),
    clip_skip: int = Form(1),
    image_format: str = Form("png"),
):
    try:
        # 获取当前模型
        pipe = load_model(current_model_id)
        
        # 设置随机种子
        if seed != -1:
            torch.manual_seed(seed)
            torch.cuda.manual_seed(seed) if torch.cuda.is_available() else None
            
        # 设置调度器
        if scheduler in SCHEDULER_MAP:
            pipe.scheduler = SCHEDULER_MAP[scheduler].from_config(pipe.scheduler.config)
        
        # 设置CLIP跳过层数
        if hasattr(pipe, 'text_encoder') and clip_skip > 1:
            pipe.text_encoder.num_hidden_layers = clip_skip
        
        # 生成图像
        images = pipe(
            prompt=prompt,
            negative_prompt=negative_prompt,
            num_inference_steps=num_inference_steps,
            guidance_scale=guidance_scale,
            width=width,
            height=height,
            num_images_per_prompt=num_images,
        ).images

        # 保存所有生成的图像
        image_urls = []
        for idx, image in enumerate(images):
            filename = f"{uuid.uuid4()}.{image_format}"
            filepath = os.path.join(OUTPUT_DIR, filename)
            image.save(filepath, format=image_format.upper(), quality=95 if image_format == "jpeg" else None)
            image_urls.append(f"/images/{filename}")

        return JSONResponse({
            "status": "success",
            "image_urls": image_urls,
            "parameters": {
                "model_id": current_model_id,
                "model_name": AVAILABLE_MODELS[current_model_id]["name"],
                "prompt": prompt,
                "negative_prompt": negative_prompt,
                "num_inference_steps": num_inference_steps,
                "guidance_scale": guidance_scale,
                "width": width,
                "height": height,
                "num_images": num_images,
                "scheduler": scheduler,
                "seed": seed,
                "clip_skip": clip_skip,
                "image_format": image_format,
            }
        })

    except Exception as e:
        raise HTTPException(status_code=500, detail=str(e))

@app.get("/images/{image_name}")
async def get_image(image_name: str):
    image_path = os.path.join(OUTPUT_DIR, image_name)
    if not os.path.exists(image_path):
        raise HTTPException(status_code=404, detail="Image not found")
    return FileResponse(image_path)

# 获取支持的调度器列表
@app.get("/schedulers")
async def get_schedulers():
    return JSONResponse({
        "schedulers": [
            {
                "id": "DPMSolverMultistep",
                "name": "DPM++ 2M",
                "description": "快速、高质量的采样器，推荐使用"
            },
            {
                "id": "DPMSolverSinglestep",
                "name": "DPM++ SDE",
                "description": "单步DPM求解器，速度更快"
            },
            {
                "id": "UniPCMultistep",
                "name": "UniPC",
                "description": "统一的预测器-校正器采样器"
            },
            {
                "id": "EulerA",
                "name": "Euler A",
                "description": "欧拉祖先离散采样器，效果好"
            },
            {
                "id": "Euler",
                "name": "Euler",
                "description": "基础欧拉离散采样器"
            },
            {
                "id": "DDIM",
                "name": "DDIM",
                "description": "去噪扩散隐式模型，适合图像编辑"
            },
            {
                "id": "Heun",
                "name": "Heun",
                "description": "高质量但较慢的采样器"
            },
            {
                "id": "KDPM2",
                "name": "KDPM2",
                "description": "改进的DPM求解器"
            },
            {
                "id": "KDPM2A",
                "name": "KDPM2 A",
                "description": "带祖先采样的KDPM2"
            },
            {
                "id": "PNDM",
                "name": "PNDM",
                "description": "伪数值方法采样器"
            },
            {
                "id": "LMS",
                "name": "LMS",
                "description": "线性多步采样器"
            }
        ]
    })

if __name__ == "__main__":
    uvicorn.run("main:app", host="0.0.0.0", port=8000, reload=True) 