"""
Z-Image-Turbo NPU REST API
基于 Tongyi-MAI/Z-Image-Turbo 模型
参考: https://huggingface.co/Tongyi-MAI/Z-Image-Turbo

支持: 文本到图像生成
适配: 华为昇腾 NPU
"""

import os
import sys
import time
import json
import uuid
import io
import base64
import gc
import threading
from pathlib import Path
from typing import List, Optional, Dict, Any, Union

import numpy as np
from PIL import Image

import torch

import uvicorn
from fastapi import FastAPI, HTTPException, UploadFile, File, Form
from fastapi.responses import JSONResponse, StreamingResponse
from pydantic import BaseModel
from contextlib import asynccontextmanager

from .model_loader import ZImageTurboModelLoader, ensure_npu_context
from .inference import ZImageTurboInference

# ========== NPU 设备配置 ==========
# 从环境变量读取设备ID，默认0
NPU_DEVICE_ID = int(os.getenv("ASCEND_DEVICE_ID", "0"))
NPU_NAME = f"npu:{NPU_DEVICE_ID}"
_npu_initialized = False

# ========== Patch threading.Thread 以自动继承 NPU 上下文 ==========
_OriginalThread = threading.Thread

class NPUContextThread(_OriginalThread):
    """
    确保每个新线程都会设置 NPU 上下文
    注意: 华为昇腾 NPU 不能重复初始化，只在第一次时设置
    """
    def run(self):
        global _npu_initialized
        if not _npu_initialized:
            try:
                import torch_npu
                torch.npu.set_device(NPU_NAME)
            except Exception as e:
                if "Repeated initialization" not in str(e) and "100002" not in str(e):
                    print(f"[NPUContextThread] Warning: {e}")
        super().run()

threading.Thread = NPUContextThread

# ========== Lifespan 事件处理 ==========
@asynccontextmanager
async def lifespan(app: FastAPI):
    # 启动时加载模型
    load_model()
    yield
    # 关闭时清理资源（如需要）

# ========== FastAPI ==========
app = FastAPI(
    title="Z-Image-Turbo NPU API",
    description="Tongyi-MAI Z-Image-Turbo 图像生成模型 API - 支持文本到图像生成",
    version="1.0.0",
    lifespan=lifespan
)

# ========== 全局模型 ==========
model_loader = None
inference_engine = None
device = None

# ========== 数据结构 ==========
class ImageGenerationRequest(BaseModel):
    """图像生成请求参数"""
    prompt: str
    height: Optional[int] = 1024
    width: Optional[int] = 1024
    num_inference_steps: Optional[int] = 9
    guidance_scale: Optional[float] = 0.0
    seed: Optional[int] = None
    return_base64: Optional[bool] = False
    image_format: Optional[str] = "PNG"

class BatchImageGenerationRequest(BaseModel):
    """批量图像生成请求"""
    prompts: List[str]
    height: Optional[int] = 1024
    width: Optional[int] = 1024
    num_inference_steps: Optional[int] = 9
    guidance_scale: Optional[float] = 0.0
    seeds: Optional[List[int]] = None
    return_base64: Optional[bool] = False
    image_format: Optional[str] = "PNG"


# -----------------------------------------------------------
# 模型加载
# -----------------------------------------------------------
def load_model():
    """加载 Z-Image-Turbo 模型"""
    global model_loader, inference_engine, device
    
    try:
        # 读取配置
        enable_cpu_offload = os.getenv("ENABLE_CPU_OFFLOAD", "false").lower() == "true"
        compile_model = os.getenv("COMPILE_MODEL", "false").lower() == "true"
        attention_backend = os.getenv("ATTENTION_BACKEND", "sdpa")
        
        # 创建模型加载器
        model_loader = ZImageTurboModelLoader()
        
        # 加载模型
        model_loader.load_model(
            enable_cpu_offload=enable_cpu_offload,
            compile_model=compile_model,
            attention_backend=attention_backend
        )
        
        # 创建推理引擎
        device = model_loader.get_device()
        inference_engine = ZImageTurboInference(
            model_loader.get_pipeline(),
            device
        )
        
        print(f"[Init] Model loaded successfully on {device}", flush=True)
        
    except Exception as e:
        print(f"[Error] Could not load model: {e}", flush=True)
        import traceback
        traceback.print_exc()
        model_loader = None
        inference_engine = None


# -----------------------------------------------------------
# 图片处理工具
# -----------------------------------------------------------
def encode_image_base64(image: Image.Image, format: str = "PNG") -> str:
    """将图片编码为 base64"""
    buffer = io.BytesIO()
    image.save(buffer, format=format)
    return base64.b64encode(buffer.getvalue()).decode('utf-8')


def decode_base64_image(base64_string: str) -> Image.Image:
    """解码 base64 图片（用于参考图像等场景）"""
    if base64_string.startswith('data:image'):
        base64_string = base64_string.split(',')[1]
    image_data = base64.b64decode(base64_string)
    return Image.open(io.BytesIO(image_data)).convert('RGB')


# -----------------------------------------------------------
# API 端点
# -----------------------------------------------------------

@app.get("/health")
async def health_check():
    """健康检查"""
    return {
        "status": "ok",
        "model_loaded": inference_engine is not None,
        "device": str(device) if device else "unknown"
    }


@app.get("/v1/models")
async def list_models():
    """列出可用模型"""
    return {
        "object": "list",
        "data": [{
            "id": "z-image-turbo",
            "object": "model",
            "created": int(time.time()),
            "owned_by": "tongyi-mai",
            "capabilities": ["text-to-image", "image-generation"]
        }]
    }


@app.post("/v1/images/generations")
async def generate_image(request: ImageGenerationRequest):
    """
    图像生成接口 - OpenAI 兼容格式
    
    **示例调用:**
    ```bash
    curl -X POST http://localhost:8000/v1/images/generations \\
      -H "Content-Type: application/json" \\
      -d '{
        "prompt": "A beautiful sunset over the ocean",
        "height": 1024,
        "width": 1024,
        "num_inference_steps": 9,
        "guidance_scale": 0.0,
        "seed": 42
      }'
    ```
    """
    if inference_engine is None:
        raise HTTPException(status_code=503, detail="Model not loaded")
    
    ensure_npu_context()
    
    start_time = time.time()
    
    try:
        # 生成图像
        image = inference_engine.generate_image(
            prompt=request.prompt,
            height=request.height or 1024,
            width=request.width or 1024,
            num_inference_steps=request.num_inference_steps or 9,
            guidance_scale=request.guidance_scale or 0.0,
            seed=request.seed
        )
        
        processing_time = time.time() - start_time
        
        # 根据返回格式处理
        if request.return_base64:
            image_base64 = encode_image_base64(image, request.image_format)
            return {
                "created": int(time.time()),
                "data": [{
                    "b64_json": image_base64,
                    "revised_prompt": request.prompt
                }],
                "processing_time": round(processing_time, 2)
            }
        else:
            # 返回 base64 URL 格式（OpenAI 兼容）
            image_base64 = encode_image_base64(image, request.image_format)
            mime_type = f"image/{request.image_format.lower()}"
            image_url = f"data:{mime_type};base64,{image_base64}"
            
            return {
                "created": int(time.time()),
                "data": [{
                    "url": image_url,
                    "revised_prompt": request.prompt
                }],
                "processing_time": round(processing_time, 2)
            }
        
    except Exception as e:
        import traceback
        traceback.print_exc()
        raise HTTPException(status_code=500, detail=str(e))


@app.post("/v1/generate")
async def generate_image_simple(
    prompt: str = Form(..., description="文本提示"),
    height: int = Form(default=1024, description="图像高度"),
    width: int = Form(default=1024, description="图像宽度"),
    num_inference_steps: int = Form(default=9, description="推理步数"),
    guidance_scale: float = Form(default=0.0, description="Guidance scale"),
    seed: Optional[int] = Form(default=None, description="随机种子"),
    return_base64: bool = Form(default=False, description="是否返回 base64")
):
    """
    简化的图像生成接口 - 使用 Form 数据
    
    **示例调用:**
    ```bash
    curl -X POST http://localhost:8000/v1/generate \\
      -F "prompt=A beautiful sunset over the ocean" \\
      -F "height=1024" \\
      -F "width=1024" \\
      -F "seed=42"
    ```
    """
    if inference_engine is None:
        raise HTTPException(status_code=503, detail="Model not loaded")
    
    ensure_npu_context()
    
    start_time = time.time()
    
    try:
        image = inference_engine.generate_image(
            prompt=prompt,
            height=height,
            width=width,
            num_inference_steps=num_inference_steps,
            guidance_scale=guidance_scale,
            seed=seed
        )
        
        processing_time = time.time() - start_time
        
        if return_base64:
            image_base64 = encode_image_base64(image)
            return {
                "status": "success",
                "image_base64": image_base64,
                "processing_time": round(processing_time, 2)
            }
        else:
            # 返回图像文件
            buffer = io.BytesIO()
            image.save(buffer, format="PNG")
            buffer.seek(0)
            return StreamingResponse(
                buffer,
                media_type="image/png",
                headers={
                    "X-Processing-Time": str(round(processing_time, 2))
                }
            )
        
    except Exception as e:
        import traceback
        traceback.print_exc()
        raise HTTPException(status_code=500, detail=str(e))


@app.post("/v1/generate/batch")
async def generate_images_batch(request: BatchImageGenerationRequest):
    """
    批量图像生成接口
    
    **示例调用:**
    ```bash
    curl -X POST http://localhost:8000/v1/generate/batch \\
      -H "Content-Type: application/json" \\
      -d '{
        "prompts": [
          "A beautiful sunset over the ocean",
          "A cat sitting on a windowsill"
        ],
        "height": 1024,
        "width": 1024
      }'
    ```
    """
    if inference_engine is None:
        raise HTTPException(status_code=503, detail="Model not loaded")
    
    ensure_npu_context()
    
    total_start_time = time.time()
    results = []
    
    try:
        for i, prompt in enumerate(request.prompts):
            try:
                seed = None
                if request.seeds and i < len(request.seeds):
                    seed = request.seeds[i]
                
                start_time = time.time()
                image = inference_engine.generate_image(
                    prompt=prompt,
                    height=request.height or 1024,
                    width=request.width or 1024,
                    num_inference_steps=request.num_inference_steps or 9,
                    guidance_scale=request.guidance_scale or 0.0,
                    seed=seed
                )
                processing_time = time.time() - start_time
                
                if request.return_base64:
                    image_base64 = encode_image_base64(image, request.image_format)
                    results.append({
                        "index": i,
                        "prompt": prompt,
                        "status": "success",
                        "image_base64": image_base64,
                        "processing_time": round(processing_time, 2)
                    })
                else:
                    image_base64 = encode_image_base64(image, request.image_format)
                    mime_type = f"image/{request.image_format.lower()}"
                    image_url = f"data:{mime_type};base64,{image_base64}"
                    results.append({
                        "index": i,
                        "prompt": prompt,
                        "status": "success",
                        "url": image_url,
                        "processing_time": round(processing_time, 2)
                    })
                
            except Exception as e:
                results.append({
                    "index": i,
                    "prompt": prompt,
                    "status": "error",
                    "error": str(e)
                })
        
        total_time = time.time() - total_start_time
        
        return {
            "status": "completed",
            "total_prompts": len(request.prompts),
            "successful": sum(1 for r in results if r["status"] == "success"),
            "failed": sum(1 for r in results if r["status"] == "error"),
            "total_processing_time": round(total_time, 2),
            "results": results
        }
        
    except Exception as e:
        import traceback
        traceback.print_exc()
        raise HTTPException(status_code=500, detail=str(e))


# -----------------------------------------------------------
# 运行
# -----------------------------------------------------------
if __name__ == "__main__":
    port = int(os.getenv("PORT", "8000"))
    uvicorn.run(app, host="0.0.0.0", port=port)

