#!/usr/bin/env python3
"""
Stable Video Diffusion 离线推理服务
完全不依赖网络，直接从本地加载模型
"""

import os
import io
import time
import uuid
import logging
import yaml
import torch
from pathlib import Path
from typing import Optional

from PIL import Image
from fastapi import FastAPI, File, Form, UploadFile, HTTPException
from fastapi.responses import FileResponse, JSONResponse
from diffusers import StableVideoDiffusionPipeline
from diffusers.utils import export_to_video

# 应用初始化
app = FastAPI(title="Stable Video Diffusion Offline API")

# 全局变量
config = None
pipe = None

# 日志配置
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger("svd_offline")


def load_config(config_path='config/config.yaml'):
    """加载配置文件"""
    global config
    try:
        with open(config_path, 'r', encoding='utf-8') as f:
            config = yaml.safe_load(f)
        return config
    except FileNotFoundError:
        logger.error(f"配置文件未找到: {config_path}")
        raise RuntimeError(f"配置文件未找到: {config_path}")


def load_model_offline():
    """
    离线加载模型 - 核心函数
    直接从本地目录加载，不进行任何网络请求
    """
    global pipe, config
    
    if not config:
        config = load_config()
    
    model_cfg = config['model']
    
    # 使用简单的本地路径
    local_model_path = Path(model_cfg['cache_dir']) / "svd_model"
    
    logger.info(f"从本地路径加载模型: {local_model_path}")
    
    # 检查路径是否存在
    if not local_model_path.exists():
        logger.error(f"模型路径不存在: {local_model_path}")
        logger.error("请先运行 python offline_model_manager.py 来准备模型文件")
        raise RuntimeError(f"模型路径不存在: {local_model_path}")
    
    # 数据类型映射
    dtype_mapping = {
        'fp16': torch.float16,
        'bf16': torch.bfloat16,
        'fp32': torch.float32
    }
    torch_dtype = dtype_mapping.get(model_cfg.get('variant', 'fp16'), torch.float16)
    
    try:
        # 关键：使用绝对路径，并且不指定 cache_dir 和 local_files_only
        # 这样 diffusers 会直接从指定路径加载，不会尝试网络连接
        pipe = StableVideoDiffusionPipeline.from_pretrained(
            str(local_model_path),  # 使用字符串路径
            torch_dtype=torch_dtype,
            variant=model_cfg.get('variant', 'fp16'),
            use_safetensors=model_cfg.get('use_safetensors', True)
        )
        
        # 移动到指定设备
        device = model_cfg.get('device', 'cuda' if torch.cuda.is_available() else 'cpu')
        pipe.to(device)
        
        # 可选：启用内存优化
        if model_cfg.get('enable_xformers', False):
            try:
                pipe.enable_xformers_memory_efficient_attention()
                logger.info("xFormers 内存优化已启用")
            except Exception as e:
                logger.warning(f"xFormers 启用失败: {e}")
        
        logger.info("✅ 模型加载成功！")
        logger.info(f"  设备: {device}")
        logger.info(f"  数据类型: {torch_dtype}")
        
        return True
        
    except Exception as e:
        logger.error(f"模型加载失败: {e}")
        logger.error("可能的原因：")
        logger.error("1. 模型文件不完整")
        logger.error("2. 模型文件版本不兼容")
        logger.error("3. GPU 内存不足")
        logger.error("请运行 python offline_model_manager.py 检查模型文件")
        raise RuntimeError(f"模型加载失败: {e}")


# FastAPI 事件
@app.on_event("startup")
async def startup_event():
    """应用启动时执行"""
    global config
    
    logger.info("="*50)
    logger.info("启动 SVD 离线服务")
    logger.info("="*50)
    
    # 加载配置
    config = load_config()
    
    # 创建必要的目录
    Path(config['paths']['upload_dir']).mkdir(exist_ok=True)
    Path(config['paths']['output_dir']).mkdir(exist_ok=True)
    Path(config['paths'].get('temp_dir', './temp')).mkdir(exist_ok=True)
    
    # 加载模型
    try:
        load_model_offline()
    except Exception as e:
        logger.error(f"启动失败: {e}")
        # 不要在这里退出，让服务继续运行，可以通过健康检查了解状态


# API Endpoints
@app.get("/health", tags=["Status"])
async def health_check():
    """健康检查接口"""
    gpu_available = torch.cuda.is_available()
    gpu_info = {}
    
    if gpu_available:
        gpu_info = {
            "device_name": torch.cuda.get_device_name(0),
            "memory_allocated": f"{torch.cuda.memory_allocated(0) / 1e9:.2f} GB",
            "memory_reserved": f"{torch.cuda.memory_reserved(0) / 1e9:.2f} GB",
            "memory_total": f"{torch.cuda.get_device_properties(0).total_memory / 1e9:.2f} GB"
        }
    
    return JSONResponse(content={
        "status": "healthy" if pipe is not None else "model_not_loaded",
        "model_loaded": pipe is not None,
        "gpu_available": gpu_available,
        "gpu_info": gpu_info,
        "mode": "offline"
    })


@app.get("/model_info", tags=["Status"])
async def model_info():
    """获取模型信息"""
    if not pipe:
        return JSONResponse(content={
            "error": "模型未加载",
            "suggestion": "请运行 python offline_model_manager.py 准备模型"
        }, status_code=503)
    
    model_path = Path(config['model']['cache_dir']) / "svd_model"
    
    return JSONResponse(content={
        "model_path": str(model_path),
        "model_exists": model_path.exists(),
        "device": str(pipe.device) if hasattr(pipe, 'device') else "unknown",
        "components": list(model_path.iterdir()) if model_path.exists() else []
    })


@app.post("/generate", tags=["Video Generation"])
async def generate_video(
    image: UploadFile = File(..., description="输入图片 (JPEG/PNG)"),
    num_frames: Optional[int] = Form(None, description="生成帧数 (14 or 25)"),
    fps: Optional[int] = Form(None, description="视频帧率"),
    motion_bucket_id: Optional[int] = Form(None, description="运动强度 (0-255)"),
    noise_aug_strength: Optional[float] = Form(None, description="噪声增强 (0-1)"),
    seed: Optional[int] = Form(None, description="随机种子")
):
    """根据输入图片生成视频"""
    if not pipe:
        raise HTTPException(
            status_code=503, 
            detail="模型未加载。请先运行 python offline_model_manager.py 准备模型文件"
        )
    
    # 参数处理
    inf_cfg = config['inference']
    params = {
        "num_frames": num_frames or inf_cfg['num_frames'],
        "fps": fps or inf_cfg['fps'],
        "motion_bucket_id": motion_bucket_id or inf_cfg['motion_bucket_id'],
        "noise_aug_strength": noise_aug_strength or inf_cfg['noise_aug_strength'],
        "decode_chunk_size": inf_cfg['decode_chunk_size'],
        "seed": seed or inf_cfg.get('seed', -1)
    }
    
    # 图片处理
    try:
        contents = await image.read()
        input_image = Image.open(io.BytesIO(contents)).convert("RGB")
        input_image = input_image.resize((inf_cfg['width'], inf_cfg['height']))
    except Exception as e:
        logger.error(f"图片处理失败: {e}")
        raise HTTPException(status_code=400, detail=f"无效的图片文件: {e}")
    
    # 推理
    start_time = time.time()
    
    # 设置随机种子
    if params['seed'] != -1:
        generator = torch.manual_seed(params['seed'])
    else:
        generator = None
    
    logger.info(f"开始生成视频, 参数: {params}")
    
    try:
        frames = pipe(
            input_image,
            num_frames=params['num_frames'],
            decode_chunk_size=params['decode_chunk_size'],
            generator=generator,
            motion_bucket_id=params['motion_bucket_id'],
            noise_aug_strength=params['noise_aug_strength'],
            num_inference_steps=inf_cfg.get('num_inference_steps', 25)
        ).frames[0]
    except Exception as e:
        logger.error(f"推理失败: {e}")
        raise HTTPException(status_code=500, detail=f"视频生成失败: {e}")
    
    elapsed_time = time.time() - start_time
    logger.info(f"视频生成完成, 耗时: {elapsed_time:.2f}s")
    
    # 视频导出
    output_dir = Path(config['paths']['output_dir'])
    video_filename = f"{uuid.uuid4()}.mp4"
    video_path = output_dir / video_filename
    
    try:
        export_to_video(frames, str(video_path), fps=params['fps'])
        logger.info(f"视频已保存到: {video_path}")
    except Exception as e:
        logger.error(f"视频导出失败: {e}")
        raise HTTPException(status_code=500, detail=f"视频文件保存失败: {e}")
    
    return FileResponse(
        video_path, 
        media_type="video/mp4", 
        filename=video_filename,
        headers={"X-Processing-Time": f"{elapsed_time:.2f}s"}
    )


# Main
if __name__ == "__main__":
    import uvicorn
    
    # 先检查模型
    from offline_model_manager import OfflineModelManager
    manager = OfflineModelManager()
    
    if not manager.check_model_exists():
        logger.error("模型文件不存在，请先准备模型")
        manager.provide_download_instructions()
        sys.exit(1)
    
    # 启动服务
    server_cfg = load_config()['server']
    uvicorn.run(
        "svd_server_offline:app",
        host=server_cfg['host'],
        port=server_cfg['port'],
        workers=1,  # 离线模式只用单 worker
        reload=server_cfg.get('reload', False)
    )
