#!/usr/bin/env python3
"""
Stable Video Diffusion 推理服务 (FastAPI)

提供 REST API 接口，用于根据输入图片生成视频。
"""

import os
import io
import time
import uuid
import logging
from pathlib import Path
from typing import Optional

import yaml
import torch
from PIL import Image
from fastapi import FastAPI, File, Form, UploadFile, HTTPException
from fastapi.responses import FileResponse, JSONResponse
from diffusers import StableVideoDiffusionPipeline
from diffusers.utils import load_image, export_to_video

# --- 应用初始化 ---
app = FastAPI(title="Stable Video Diffusion API")

# --- 全局变量 ---
config = None
pipe = None

# --- 日志配置 ---
logger = logging.getLogger("svd_server")

# --- 辅助函数 ---

def load_config(config_path='config/config.yaml'):
    """加载配置文件"""
    global config
    try:
        with open(config_path, 'r', encoding='utf-8') as f:
            config = yaml.safe_load(f)
        return config
    except FileNotFoundError:
        raise RuntimeError(f"配置文件未找到: {config_path}")

def setup_logging():
    """配置日志记录器"""
    log_config = config.get('logging', {})
    level = log_config.get('level', 'INFO').upper()
    log_dir = Path(log_config.get('log_dir', './logs'))
    log_dir.mkdir(exist_ok=True)
    
    handler = logging.handlers.RotatingFileHandler(
        log_dir / "svd_server.log",
        maxBytes=log_config.get('max_file_size', 10*1024*1024),
        backupCount=log_config.get('backup_count', 5)
    )
    formatter = logging.Formatter(log_config.get('format', '%(asctime)s - %(name)s - %(levelname)s - %(message)s'))
    handler.setFormatter(formatter)
    logger.addHandler(handler)
    logger.setLevel(level)


def load_model():
    """加载SVD模型到GPU"""
    global pipe, config
    if not config:
        config = load_config()
    
    model_cfg = config['model']
    logger.info(f"正在从 {model_cfg['name']} 加载模型 (缓存目录: {model_cfg['cache_dir']})...")

    dtype_mapping = {
        'fp16': torch.float16,
        'bf16': torch.bfloat16,
        'fp32': torch.float32
    }
    torch_dtype = dtype_mapping.get(model_cfg.get('variant', 'fp16'), torch.float16)

    try:
        # 从预训练模型加载，直接使用 repo_id 和 cache_dir
        # Diffusers 会自动在 cache_dir 中寻找正确的模型文件
        pipe = StableVideoDiffusionPipeline.from_pretrained(
            model_cfg['name'],
            torch_dtype=torch_dtype,
            variant=model_cfg.get('variant', 'fp16'),
            use_safetensors=model_cfg.get('use_safetensors', True),
            cache_dir=model_cfg['cache_dir'],
            local_files_only=True
        )
        pipe.to(model_cfg['device'])
        
        if model_cfg.get('enable_xformers', False):
            try:
                pipe.enable_xformers_memory_efficient_attention()
                logger.info("xFormers 内存优化已启用")
            except Exception as e:
                logger.warning(f"xFormers 启用失败: {e}")
        
        logger.info("模型加载成功！")
        return True
    except OSError as e:
        logger.error(f"模型加载失败: {e}")
        logger.error("这是一个严重错误，通常意味着模型文件未能成功下载或缓存已损坏。")
        logger.error("请尝试以下步骤解决：")
        logger.error("1. 仔细检查 `download_models.py` 脚本的输出，确保没有网络错误或权限问题。")
        logger.error(f"2. 确认模型缓存目录 '{model_cfg['cache_dir']}' 是否存在且其中包含模型文件。")
        logger.error("3. 作为最后的手段，您可以尝试删除缓存目录并重新运行部署脚本，以强制重新下载所有模型。")
        raise RuntimeError(f"模型加载失败，原因：{e}。请检查下载脚本日志和缓存目录。")
    except Exception as e:
        logger.error(f"发生未知错误导致模型加载失败: {e}")
        raise RuntimeError(f"模型加载失败: {e}")

# --- FastAPI 事件 ---

@app.on_event("startup")
async def startup_event():
    """应用启动时执行"""
    global config
    config = load_config()
    setup_logging()
    load_model()
    
    # 创建上传和输出目录
    Path(config['paths']['upload_dir']).mkdir(exist_ok=True)
    Path(config['paths']['output_dir']).mkdir(exist_ok=True)


# --- API Endpoints ---

@app.get("/health", tags=["Status"])
async def health_check():
    """健康检查接口"""
    gpu_available = torch.cuda.is_available()
    gpu_info = {}
    if gpu_available:
        gpu_info = {
            "device_name": torch.cuda.get_device_name(0),
            "memory_allocated": f"{torch.cuda.memory_allocated(0) / 1e9:.2f} GB",
            "memory_reserved": f"{torch.cuda.memory_reserved(0) / 1e9:.2f} GB",
        }
    
    return JSONResponse(content={
        "status": "healthy",
        "model_loaded": pipe is not None,
        "gpu_available": gpu_available,
        "gpu_info": gpu_info
    })


@app.post("/generate", tags=["Video Generation"])
async def generate_video(
    image: UploadFile = File(..., description="输入图片 (JPEG/PNG)"),
    num_frames: Optional[int] = Form(None, description="生成帧数 (14 or 25)"),
    fps: Optional[int] = Form(None, description="视频帧率"),
    motion_bucket_id: Optional[int] = Form(None, description="运动强度 (0-255)"),
    noise_aug_strength: Optional[float] = Form(None, description="噪声增强 (0-1)"),
    seed: Optional[int] = Form(None, description="随机种子")
):
    """根据输入图片生成视频"""
    if not pipe:
        raise HTTPException(status_code=503, detail="模型正在加载或加载失败，请稍后重试")

    # --- 1. 参数处理 ---
    inf_cfg = config['inference']
    params = {
        "num_frames": num_frames or inf_cfg['num_frames'],
        "fps": fps or inf_cfg['fps'],
        "motion_bucket_id": motion_bucket_id or inf_cfg['motion_bucket_id'],
        "noise_aug_strength": noise_aug_strength or inf_cfg['noise_aug_strength'],
        "decode_chunk_size": inf_cfg['decode_chunk_size'],
        "seed": seed or inf_cfg['seed']
    }
    
    # --- 2. 图片处理 ---
    try:
        contents = await image.read()
        input_image = Image.open(io.BytesIO(contents)).convert("RGB")
        input_image = input_image.resize((inf_cfg['width'], inf_cfg['height']))
    except Exception as e:
        logger.error(f"图片处理失败: {e}")
        raise HTTPException(status_code=400, detail=f"无效的图片文件: {e}")

    # --- 3. 推理 ---
    start_time = time.time()
    
    generator = torch.manual_seed(params['seed']) if params['seed'] != -1 else None

    logger.info(f"开始生成视频, 参数: {params}")
    
    try:
        frames = pipe(
            input_image,
            num_frames=params['num_frames'],
            decode_chunk_size=params['decode_chunk_size'],
            generator=generator,
            motion_bucket_id=params['motion_bucket_id'],
            noise_aug_strength=params['noise_aug_strength'],
            num_inference_steps=inf_cfg['num_inference_steps']
        ).frames[0]
    except Exception as e:
        logger.error(f"推理失败: {e}")
        raise HTTPException(status_code=500, detail=f"视频生成失败: {e}")
        
    elapsed_time = time.time() - start_time
    logger.info(f"视频生成完成, 耗时: {elapsed_time:.2f}s")

    # --- 4. 视频导出 ---
    output_dir = Path(config['paths']['output_dir'])
    video_filename = f"{uuid.uuid4()}.mp4"
    video_path = output_dir / video_filename
    
    try:
        export_to_video(frames, str(video_path), fps=params['fps'])
        logger.info(f"视频已保存到: {video_path}")
    except Exception as e:
        logger.error(f"视频导出失败: {e}")
        raise HTTPException(status_code=500, detail=f"视频文件保存失败: {e}")

    return FileResponse(video_path, media_type="video/mp4", filename=video_filename)


# --- Main ---

if __name__ == "__main__":
    import uvicorn
    
    server_cfg = load_config()['server']
    uvicorn.run(
        "svd_server:app",
        host=server_cfg['host'],
        port=server_cfg['port'],
        workers=server_cfg.get('workers', 1),
        reload=server_cfg.get('reload', False)
    )
