import os
import time
import json
import requests
import torch
import torch.utils._pytree as _pytree
import uvicorn
import threading
from fastapi import FastAPI, HTTPException
from fastapi.responses import StreamingResponse
from pydantic import BaseModel
from typing import List, Optional, Union, Dict, Any
from PIL import Image
import io
import base64
import tempfile
import numpy as np
import av  # PyAV 替代 decord 进行视频处理

# ========== NPU 设备配置 ==========
NPU_DEVICE_ID = 0
NPU_NAME = f"npu:{NPU_DEVICE_ID}"

# ========== Patch threading.Thread 以自动继承 NPU 上下文 ==========
_OriginalThread = threading.Thread

class NPUContextThread(_OriginalThread):
    """
    自定义 Thread 类，确保每个新线程都会设置 NPU 上下文。
    这是解决 transformers 内部线程没有 NPU context 的关键。
    """
    def run(self):
        try:
            # 在子线程运行前设置 NPU 上下文
            torch.npu.set_device(NPU_NAME)
        except Exception as e:
            print(f"[NPUContextThread] Warning: Failed to set NPU context: {e}")
        super().run()

# 替换全局 Thread 类
threading.Thread = NPUContextThread

# ========== PyTree 兼容性 ==========
if not hasattr(_pytree, 'register_pytree_node'):
    _orig = _pytree._register_pytree_node
    def _compat(cls, flatten_fn, unflatten_fn, **_):
        return _orig(cls, flatten_fn, unflatten_fn)
    _pytree.register_pytree_node = _compat

# ========== Transformers ==========
from transformers import AutoModel, AutoTokenizer

# ========== FastAPI ==========
app = FastAPI(title="MiniCPM-V 4.5 NPU API")

# ========== 全局模型 ==========
model = None
tokenizer = None

# ========== 数据结构 ==========
class ChatMessage(BaseModel):
    role: str
    content: Union[str, List[Dict[str, Any]]]

class ChatCompletionRequest(BaseModel):
    model: str
    messages: List[ChatMessage]
    stream: Optional[bool] = False
    max_tokens: Optional[int] = 1024
    temperature: Optional[float] = 0.7
    top_p: Optional[float] = 0.8

# -----------------------------------------------------------
# 工具函数
# -----------------------------------------------------------

def ensure_npu_context():
    """每个线程必须设置一次 Ascend NPU 上下文"""
    torch.npu.set_device(NPU_NAME)

def decode_base64_image(base64_string):
    if base64_string.startswith('data:image'):
        base64_string = base64_string.split(',')[1]
    image_data = base64.b64decode(base64_string)
    return Image.open(io.BytesIO(image_data)).convert('RGB')

# 视频帧采样参数
# MiniCPM-V 4.5 处理多图片时，需要限制帧数以避免 image_bounds tensor 拼接错误
# 官方建议视频帧数不宜过多，推荐 16-32 帧
MAX_NUM_FRAMES = 32  # 最大帧数，可根据显存调整
VIDEO_FRAME_SIZE = (448, 448)  # 统一视频帧分辨率，避免不同分辨率导致的切片数量不一致

def encode_video(video_path_or_url: str) -> list:
    """
    从视频中均匀采样帧，返回 PIL.Image 列表。
    使用 PyAV 进行视频解码，替代 decord。
    根据 MiniCPM-V 4.5 官方文档，视频需要解码为帧列表传入 model.chat()
    
    重要：所有帧必须调整为统一尺寸，否则会导致 tensor 拼接错误
    """
    temp_file = None
    try:
        # 如果是 URL，先下载到临时文件
        if video_path_or_url.startswith(('http://', 'https://')):
            print(f"[Video] Downloading video from URL...")
            response = requests.get(video_path_or_url, timeout=60)
            response.raise_for_status()
            temp_file = tempfile.NamedTemporaryFile(suffix='.mp4', delete=False)
            temp_file.write(response.content)
            temp_file.close()
            video_path = temp_file.name
        elif video_path_or_url.startswith('data:video'):
            # Base64 编码的视频
            print(f"[Video] Decoding base64 video...")
            base64_data = video_path_or_url.split(',')[1]
            video_data = base64.b64decode(base64_data)
            temp_file = tempfile.NamedTemporaryFile(suffix='.mp4', delete=False)
            temp_file.write(video_data)
            temp_file.close()
            video_path = temp_file.name
        else:
            video_path = video_path_or_url
        
        print(f"[Video] Loading video: {video_path}")
        
        # 使用 PyAV 读取视频
        container = av.open(video_path)
        stream = container.streams.video[0]
        
        # 获取视频原始尺寸
        video_width = stream.width
        video_height = stream.height
        print(f"[Video] Original resolution: {video_width}x{video_height}")
        
        # 获取总帧数
        total_frames = stream.frames
        if total_frames == 0:
            # 某些视频格式可能无法直接获取帧数，需要遍历计算
            container_temp = av.open(video_path)
            total_frames = sum(1 for _ in container_temp.decode(video=0))
            container_temp.close()
        
        print(f"[Video] Total frames: {total_frames}")
        
        # 计算需要采样的帧索引
        if total_frames <= MAX_NUM_FRAMES:
            frame_indices = set(range(total_frames))
        else:
            # 均匀采样 MAX_NUM_FRAMES 帧
            frame_indices = set(np.linspace(0, total_frames - 1, MAX_NUM_FRAMES, dtype=int).tolist())
        
        print(f"[Video] Sampling {len(frame_indices)} frames")
        
        # 重新打开容器进行解码
        container = av.open(video_path)
        
        # 提取帧并转换为 PIL.Image
        pil_frames = []
        frame_idx = 0
        
        print(f"[Video] Target frame size: {VIDEO_FRAME_SIZE}")
        
        for frame in container.decode(video=0):
            if frame_idx in frame_indices:
                # 转换为 PIL.Image
                img = frame.to_image().convert('RGB')
                
                # 统一调整所有帧为标准尺寸，确保切片数量一致
                if img.size != VIDEO_FRAME_SIZE:
                    img = img.resize(VIDEO_FRAME_SIZE, Image.LANCZOS)
                
                pil_frames.append(img)
            frame_idx += 1
            if len(pil_frames) >= len(frame_indices):
                break
        
        container.close()
        
        print(f"[Video] Extracted {len(pil_frames)} frames successfully (all {VIDEO_FRAME_SIZE})")
        return pil_frames
        
    finally:
        # 清理临时文件
        if temp_file and os.path.exists(temp_file.name):
            os.unlink(temp_file.name)

# -----------------------------------------------------------
# 模型加载
# -----------------------------------------------------------
def load_model():
    global model, tokenizer

    model_path = os.getenv("MODEL_PATH", "/app/models/MiniCPM-V-4_5")
    if not os.path.exists(model_path):
        print(f"[Warning] Local model not found. Using HF hub: openbmb/MiniCPM-V-4_5")
        model_path = "openbmb/MiniCPM-V-4_5"

    print(f"[Init] Loading model from {model_path} ...")

    ensure_npu_context()

    tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)

    model = AutoModel.from_pretrained(
        model_path,
        trust_remote_code=True,
        torch_dtype=torch.bfloat16,
        attn_implementation='sdpa'
    ).eval()

    model = model.to(NPU_NAME)

    print("[Init] Model loaded on", NPU_NAME)
    
    # 预热模型（编译 NPU 图，减少首次推理延迟）
    # 进行多次预热以确保图完全编译
    print("[Init] Warming up model (compiling NPU graph)...", flush=True)
    try:
        warmup_msgs = [{"role": "user", "content": ["Hello"]}]
        
        # 第一次预热（编译图）
        with torch.no_grad():
            _ = model.chat(
                msgs=warmup_msgs,
                tokenizer=tokenizer,
                sampling=True,
                stream=False,
                max_new_tokens=2
            )
        
        # 第二次预热（流式，确保流式路径也编译）
        with torch.no_grad():
            warmup_stream = model.chat(
                msgs=warmup_msgs,
                tokenizer=tokenizer,
                sampling=True,
                stream=True,
                max_new_tokens=2
            )
            # 消费流式输出
            for _ in warmup_stream:
                break
        
        print("[Init] Model warmup completed (NPU graph compiled)", flush=True)
    except Exception as e:
        print(f"[Init] Warmup failed (non-critical): {e}", flush=True)
        import traceback
        traceback.print_exc()

# -----------------------------------------------------------
# 流式输出（优化版 - 减少首 token 延迟）
# -----------------------------------------------------------
def generate_stream(msgs, model_params):
    """
    流式输出，优化首 token 延迟
    
    优化措施：
    1. 减少不必要的上下文切换
    2. 优化模型参数设置
    3. 使用更高效的流式生成方式
    4. 确保模型和输入在正确的设备上
    """
    # 确保 NPU 上下文（必须在生成前设置）
    ensure_npu_context()
    model.to(NPU_NAME)
    
    # 优化参数设置
    optimized_params = model_params.copy()
    if "max_new_tokens" in optimized_params:
        optimized_params["max_length"] = 4096
    
    # 纯文本对话时，不需要 max_slice_nums 限制
    has_images = any(
        isinstance(item, Image.Image) 
        for msg in msgs 
        for item in msg.get("content", [])
    )
    
    try:
        # 开始生成前记录时间
        start_time = time.time()
        
        # 调用模型生成（流式）
        # 使用 torch.no_grad() 减少内存占用和加速
        with torch.no_grad():
            res = model.chat(
                msgs=msgs,
                tokenizer=tokenizer,
                sampling=True,
                stream=True,
                max_slice_nums=1 if has_images else None,  # 只在有图片时限制切片
                **optimized_params
            )
        
        first_token_time = None
        token_count = 0
        
        for new_text in res:
            # 记录首 token 时间
            if first_token_time is None:
                first_token_time = time.time()
                elapsed = first_token_time - start_time
                print(f"[Stream] First token latency: {elapsed:.2f}s", flush=True)
            
            token_count += 1
            
            chunk = {
                "id": f"chatcmpl-{int(time.time())}",
                "object": "chat.completion.chunk",
                "created": int(time.time()),
                "model": "MiniCPM-V-4_5",
                "choices": [{
                    "index": 0,
                    "delta": {"content": new_text},
                    "finish_reason": None
                }]
            }
            yield f"data: {json.dumps(chunk, ensure_ascii=False)}\n\n"

        end_chunk = {
            "id": f"chatcmpl-{int(time.time())}",
            "object": "chat.completion.chunk",
            "created": int(time.time()),
            "model": "MiniCPM-V-4_5",
            "choices": [{
                "index": 0,
                "delta": {},
                "finish_reason": "stop"
            }]
        }
        yield f"data: {json.dumps(end_chunk, ensure_ascii=False)}\n\n"
        yield "data: [DONE]\n\n"
        
        total_time = time.time() - start_time
        print(f"[Stream] Total tokens: {token_count}, Total time: {total_time:.2f}s", flush=True)

    except Exception as e:
        print(f"[Stream Error] {e}", flush=True)
        import traceback
        traceback.print_exc()
        yield f"data: {json.dumps({'error': str(e)}, ensure_ascii=False)}\n\n"

# -----------------------------------------------------------
# API 入口
# -----------------------------------------------------------
@app.post("/v1/chat/completions")
async def chat_completions(request: ChatCompletionRequest):
    if model is None:
        raise HTTPException(status_code=503, detail="Model is not initialized")

    # 记录请求开始时间
    request_start_time = time.time()
    
    minicpm_msgs = []

    for msg in request.messages:
        role = msg.role
        content = msg.content

        processed = []

        if isinstance(content, str):
            processed.append(content)

        elif isinstance(content, list):
            for item in content:
                if item["type"] == "text":
                    processed.append(item["text"])

                elif item["type"] == "image_url":
                    url = item["image_url"]["url"]
                    if url.startswith("data:image"):
                        processed.append(decode_base64_image(url))
                    else:
                        img = Image.open(io.BytesIO(requests.get(url).content)).convert("RGB")
                        processed.append(img)
                
                elif item["type"] == "video_url":
                    # 视频处理：提取帧列表并添加到 content 中
                    video_url = item["video_url"]["url"]
                    print(f"[API] Processing video input...")
                    video_frames = encode_video(video_url)
                    # 将所有视频帧添加到 content 列表
                    # MiniCPM-V 4.5 需要将帧作为列表元素传入
                    processed.extend(video_frames)
                    print(f"[API] Added {len(video_frames)} video frames to content")

        minicpm_msgs.append({"role": role, "content": processed})

    model_params = {
        "max_new_tokens": request.max_tokens,
        "temperature": request.temperature,
        "top_p": request.top_p
    }

    # ----------- 流式 ----------
    if request.stream:
        prep_time = time.time() - request_start_time
        print(f"[API] Request preprocessing time: {prep_time:.3f}s", flush=True)
        
        # 立即返回流式响应，减少延迟
        return StreamingResponse(
            generate_stream(minicpm_msgs, model_params),
            media_type="text/event-stream"
        )

    # ----------- 非流式 ----------
    ensure_npu_context()
    model.to(NPU_NAME)

    output = model.chat(
        msgs=minicpm_msgs,
        tokenizer=tokenizer,
        sampling=True,
        stream=False,
        max_slice_nums=1,  # 限制切片数量，避免多帧时 image_bounds tensor 维度不一致
        **model_params
    )

    return {
        "id": f"chatcmpl-{int(time.time())}",
        "object": "chat.completion",
        "created": int(time.time()),
        "model": "MiniCPM-V-4_5",
        "choices": [{
            "index": 0,
            "message": {"role": "assistant", "content": output},
            "finish_reason": "stop"
        }],
        "usage": {"prompt_tokens": -1, "completion_tokens": -1, "total_tokens": -1}
    }

# -----------------------------------------------------------
# 健康检查
# -----------------------------------------------------------
@app.get("/health")
async def health_check():
    return {"status": "ok", "model_loaded": model is not None}

# -----------------------------------------------------------
# 启动时加载模型
# -----------------------------------------------------------
@app.on_event("startup")
async def startup_event():
    load_model()

# -----------------------------------------------------------
# 运行
# -----------------------------------------------------------
if __name__ == "__main__":
    uvicorn.run(app, host="0.0.0.0", port=8000)