"""
HunyuanOCR NPU REST API
基于腾讯混元 HunyuanOCR 模型
参考: https://huggingface.co/spaces/tencent/HunyuanOCR

支持: 图像OCR、文档OCR、多语言文本识别
"""

import os
import sys
import time
import json
import uuid
import io
import base64
import gc
import threading
from pathlib import Path
from typing import List, Optional, Dict, Any, Union

import numpy as np
from PIL import Image

import torch
import math

# ========== PyTree 兼容性补丁（必须在其他导入之前）==========
import torch.utils._pytree as _pytree
# torch 2.1.0 使用 _register_pytree_node，新版 transformers 使用 register_pytree_node
# 直接添加兼容层（不检查是否存在，避免 AttributeError）
_orig_register = _pytree._register_pytree_node
def _compat_register(cls, flatten_fn, unflatten_fn, **_):
    return _orig_register(cls, flatten_fn, unflatten_fn)
_pytree.register_pytree_node = _compat_register

import torchvision.transforms as transforms
from transformers.image_utils import make_list_of_images, ChannelDimension
from transformers.image_transforms import convert_to_rgb

import uvicorn
from fastapi import FastAPI, HTTPException, UploadFile, File, Form
from fastapi.responses import JSONResponse, StreamingResponse
from pydantic import BaseModel

# ========== NPU 设备配置 ==========
NPU_DEVICE_ID = 0
NPU_NAME = f"npu:{NPU_DEVICE_ID}"
_npu_initialized = False  # 标记 NPU 是否已初始化

# ========== Patch threading.Thread 以自动继承 NPU 上下文 ==========
_OriginalThread = threading.Thread

class NPUContextThread(_OriginalThread):
    """
    确保每个新线程都会设置 NPU 上下文
    注意: 华为昇腾 NPU 不能重复初始化，只在第一次时设置
    """
    def run(self):
        global _npu_initialized
        if not _npu_initialized:
            try:
                import torch_npu
                torch.npu.set_device(NPU_NAME)
            except Exception as e:
                if "Repeated initialization" not in str(e) and "100002" not in str(e):
                    print(f"[NPUContextThread] Warning: {e}")
        super().run()

threading.Thread = NPUContextThread

# ========== Lifespan 事件处理 ==========
from contextlib import asynccontextmanager

@asynccontextmanager
async def lifespan(app: FastAPI):
    # 启动时加载模型
    load_model()
    yield
    # 关闭时清理资源（如需要）

# ========== FastAPI ==========
app = FastAPI(
    title="HunyuanOCR NPU API",
    description="腾讯混元 OCR 模型 API - 支持图像OCR、文档OCR、多语言文本识别",
    version="1.0.0",
    lifespan=lifespan
)

# ========== 全局模型 ==========
model = None
processor = None
device = None

# ========== 数据结构 ==========
class OCRRequest(BaseModel):
    """OCR 请求参数"""
    image: str  # base64 encoded image
    prompt: Optional[str] = "检测并识别图片中的文字，将文本坐标格式化输出。"
    max_tokens: Optional[int] = 16384
    temperature: Optional[float] = 0.0
    top_p: Optional[float] = 0.001

class ChatMessage(BaseModel):
    role: str
    content: Union[str, List[Dict[str, Any]]]

class ChatCompletionRequest(BaseModel):
    model: str = "hunyuan-ocr"
    messages: List[ChatMessage]
    stream: Optional[bool] = False
    max_tokens: Optional[int] = 16384
    temperature: Optional[float] = 0.0
    top_p: Optional[float] = 0.001


# -----------------------------------------------------------
# NPU 上下文管理
# -----------------------------------------------------------
def init_npu_once():
    """初始化 NPU（只执行一次）"""
    global _npu_initialized
    if _npu_initialized:
        return True
    
    try:
        import torch_npu
        torch.npu.set_device(NPU_NAME)
        _npu_initialized = True
        print(f"[Init] NPU initialized: {NPU_NAME}")
        return True
    except Exception as e:
        if "Repeated initialization" in str(e) or "100002" in str(e):
            _npu_initialized = True
            return True
        print(f"[Warning] Failed to init NPU: {e}")
        return False


def ensure_npu_context():
    """确保 NPU 上下文已设置"""
    init_npu_once()


# -----------------------------------------------------------
# Monkey Patch for HunyuanVLImageProcessor
# -----------------------------------------------------------
def smart_resize(
    height: int, width: int, factor: int = 16, min_pixels: int = 512 * 512, max_pixels: int = 2048 * 2048
):
    """Rescales the image so that the following conditions are met:

    1. Both dimensions (height and width) are divisible by 'factor'.

    2. The total number of pixels is within the range ['min_pixels', 'max_pixels'].

    3. The aspect ratio of the image is maintained as closely as possible.

    """
    if max(height, width) / min(height, width) > 200:
        raise ValueError(
            f"absolute aspect ratio must be smaller than 200, got {max(height, width) / min(height, width)}"
        )
    h_bar = round(height / factor) * factor
    w_bar = round(width / factor) * factor
    if h_bar * w_bar > max_pixels:
        beta = math.sqrt((height * width) / max_pixels)
        h_bar = max(factor, math.floor(height / beta / factor) * factor)
        w_bar = max(factor, math.floor(width / beta / factor) * factor)
    elif h_bar * w_bar < min_pixels:
        beta = math.sqrt(min_pixels / (height * width))
        h_bar = math.ceil(height * beta / factor) * factor
        w_bar = math.ceil(width * beta / factor) * factor
    return h_bar, w_bar


def _preprocess_patched(
    self,
    images,
    do_resize=None,
    size=None,
    resample=None,
    do_rescale=None,
    rescale_factor=None,
    do_normalize=None,
    image_mean=None,
    image_std=None,
    patch_size=None,
    temporal_patch_size=None,
    merge_size=None,
    do_convert_rgb=None,
    data_format=None,
    input_data_format=None,
):
    """
    Patched _preprocess method to handle tensor to numpy conversion correctly.
    """
    images = make_list_of_images(images)

    if do_convert_rgb:
        images = [convert_to_rgb(image) for image in images]

    width, height = images[0].width, images[0].height
    resized_width, resized_height = width, height
    processed_images = []
    for image in images:
        if do_resize:
            resized_width, resized_height = smart_resize(
                width,
                height,
                factor=patch_size * merge_size,
                min_pixels=size["shortest_edge"],
                max_pixels=size["longest_edge"],
            )
            image = image.resize((resized_width, resized_height))

        if do_normalize:
            image = transforms.Compose([
                transforms.ToTensor(),
                transforms.Normalize(self.image_mean, self.image_std)
            ])(image)
        processed_images.append(image)

    # FIX: Handle list of tensors
    if processed_images and isinstance(processed_images[0], torch.Tensor):
        patches = torch.stack(processed_images).numpy()
    else:
        patches = np.array(processed_images)

    channel = patches.shape[1]
    grid_t = patches.shape[0] // temporal_patch_size
    grid_h, grid_w = resized_height // patch_size, resized_width // patch_size
    patches = patches.reshape(
        1,
        channel,
        grid_h // merge_size,
        merge_size,
        patch_size,
        grid_w // merge_size,
        merge_size,
        patch_size,
    )
    patches = patches.transpose(0, 2, 3, 5, 6, 1, 4, 7)
    flatten_patches = patches.reshape( 1 * grid_h * grid_w, channel * patch_size * patch_size)

    return flatten_patches, (grid_t, grid_h, grid_w)


# -----------------------------------------------------------
# 模型加载 (参考 HunyuanWorld-Mirror-Ascend 实现)
# -----------------------------------------------------------
def load_model():
    """加载 HunyuanOCR 模型"""
    global model, processor, device, _npu_initialized
    
    # 检查设备可用性并初始化 (参考 HunyuanWorld-Mirror-Ascend)
    try:
        import torch_npu
        if torch.npu.is_available():
            # 初始化 NPU（只执行一次）
            init_npu_once()
            device = torch.device(NPU_NAME)
            print(f"[Init] Using NPU device: {NPU_NAME}")
        else:
            device = torch.device("cpu")
            print("[Init] NPU not available, using CPU")
    except ImportError:
        if torch.cuda.is_available():
            device = torch.device("cuda")
            print("[Init] Using CUDA device")
        else:
            device = torch.device("cpu")
            print("[Init] Using CPU device")
    
    # 尝试加载模型
    try:
        print("[Init] Importing transformers...", flush=True)
        from transformers import HunYuanVLForConditionalGeneration, AutoProcessor
        
        # 优先使用本地模型路径
        model_path = os.getenv("MODEL_PATH", "/data0/HunyuanOCR")
        
        if os.path.exists(model_path) and os.path.isdir(model_path):
            print(f"[Init] Loading model from local path: {model_path}", flush=True)
        else:
            print(f"[Init] Local model not found at {model_path}", flush=True)
            print("[Init] Trying to download from HuggingFace: tencent/HunyuanOCR", flush=True)
            model_path = "tencent/HunyuanOCR"
        
        # 加载处理器 (官方文档要求 use_fast=False)
        print("[Init] Loading processor...", flush=True)
        processor = AutoProcessor.from_pretrained(
            model_path,
            use_fast=False,
            trust_remote_code=True
        )
        print("[Init] Processor loaded.", flush=True)

        # Apply monkey patch
        import types
        if hasattr(processor, "image_processor"):
            processor.image_processor._preprocess = types.MethodType(_preprocess_patched, processor.image_processor)
            print("[Init] Patched processor.image_processor._preprocess", flush=True)
        
        # 根据设备类型选择数据类型
        use_bf16 = os.getenv("USE_BF16", "true").lower() == "true"
        
        if 'npu' in str(device):
            dtype = torch.bfloat16 if use_bf16 else torch.float16
        elif 'cuda' in str(device):
            dtype = torch.bfloat16
        else:
            dtype = torch.float32
        
        print(f"[Init] Loading model with dtype: {dtype}", flush=True)
        
        # 加载模型 (使用 HunYuanVLForConditionalGeneration，参考官方文档)
        print("[Init] Loading model weights (this may take a while)...", flush=True)
        model = HunYuanVLForConditionalGeneration.from_pretrained(
            model_path,
            attn_implementation="eager",  # 官方文档要求
            dtype=dtype,
            device_map="auto" if 'cuda' in str(device) else None,
            trust_remote_code=True
        ).eval()
        print("[Init] Model weights loaded.", flush=True)
        
        # 移动到设备
        # model.eval()
        print(f"[Init] Moving model to {device}...", flush=True)
        model = model.to(device)
        print("[Init] Model moved to device.", flush=True)
        
        # 清理内存
        if 'npu' in str(device):
            torch.npu.empty_cache()
        gc.collect()
        
        print(f"[Init] Model loaded successfully on {device}, dtype={next(model.parameters()).dtype}", flush=True)
                
    except Exception as e:
        print(f"[Error] Could not load model: {e}", flush=True)
        import traceback
        traceback.print_exc()
        model = None
        processor = None


# -----------------------------------------------------------
# 图片处理工具
# -----------------------------------------------------------
def decode_base64_image(base64_string: str) -> Image.Image:
    """解码 base64 图片"""
    if base64_string.startswith('data:image'):
        base64_string = base64_string.split(',')[1]
    image_data = base64.b64decode(base64_string)
    return Image.open(io.BytesIO(image_data)).convert('RGB')


def encode_image_base64(image: Image.Image) -> str:
    """将图片编码为 base64"""
    buffer = io.BytesIO()
    image.save(buffer, format='PNG')
    return base64.b64encode(buffer.getvalue()).decode('utf-8')


def load_image_from_url(url: str) -> Image.Image:
    """
    从 URL 加载图片
    
    Args:
        url: 图片的 URL 地址
        
    Returns:
        PIL.Image: RGB 格式的图片对象
        
    Raises:
        ValueError: URL 无效或无法访问
        HTTPException: 下载失败
    """
    import requests
    
    try:
        # 添加 User-Agent 避免某些网站拒绝请求
        headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'
        }
        response = requests.get(url, timeout=30, headers=headers, stream=True)
        response.raise_for_status()  # 检查 HTTP 错误
        
        # 验证内容类型
        content_type = response.headers.get('content-type', '').lower()
        if not content_type.startswith('image/'):
            print(f"[Warning] URL content-type is not image: {content_type}", flush=True)
        
        # 从流中读取图片
        image = Image.open(io.BytesIO(response.content)).convert('RGB')
        return image
        
    except requests.exceptions.RequestException as e:
        raise ValueError(f"Failed to download image from URL: {str(e)}")
    except Exception as e:
        raise ValueError(f"Failed to process image from URL: {str(e)}")


def clean_repeated_substrings(text):
    """
    清理文本中的重复子串 (官方文档提供的函数)
    用于处理模型可能产生的重复输出
    """
    n = len(text)
    if n < 8000:
        return text
    for length in range(2, n // 10 + 1):
        candidate = text[-length:] 
        count = 0
        i = n - length
        
        while i >= 0 and text[i:i + length] == candidate:
            count += 1
            i -= length

        if count >= 10:
            return text[:n - length * (count - 1)]  

    return text


# -----------------------------------------------------------
# 推理函数 (参考官方文档: https://huggingface.co/tencent/HunyuanOCR)
# -----------------------------------------------------------
def run_ocr(
    image: Image.Image,
    prompt: str = "检测并识别图片中的文字，将文本坐标格式化输出。",
    max_tokens: int = 16384,
    temperature: float = 0.0,
    top_p: float = 0.001,
    stream: bool = False
):
    """
    执行 OCR 推理
    参考官方文档的推理代码实现
    """
    global model, processor, device
    
    ensure_npu_context()

    
    if model is None or processor is None:
        raise ValueError("Model not loaded")
    
    # 确保图像是 PIL Image 格式
    if not isinstance(image, Image.Image):
        raise ValueError(f"Image must be PIL.Image, got {type(image)}")
    
    print(f"[OCR] Image size: {image.size}, mode: {image.mode}", flush=True)
    
    # 构建消息格式 (严格按照官方文档)
    # 参考: https://huggingface.co/tencent/HunyuanOCR
    # 注意：messages 中的 image 字段是路径字符串（用于模板），实际图像对象传给 processor
    messages1 = [
        {"role": "system", "content": ""},
        {
            "role": "user",
            "content": [
                {"type": "image", "image": "image.jpg"},  # 使用占位符路径
                {"type": "text", "text": prompt}
            ]
        }
    ]
    messages = [messages1]  # 外层包装列表
    
    # 应用聊天模板（批量处理）
    texts = [
        processor.apply_chat_template(msg, tokenize=False, add_generation_prompt=True)
        for msg in messages
    ]
    
    # 处理输入（图像对象作为列表传递，与 texts 对应）
    inputs = processor(
        text=texts,
        images=[image],  # 图像对象包装成列表，与 texts 列表长度对应
        padding=True,
        return_tensors="pt"
    )
    
    # 移动到设备
    inputs = inputs.to(device)
    
    # 生成参数 (官方文档使用 do_sample=False)
    do_sample = temperature > 0
    generate_kwargs = {
        "max_new_tokens": max_tokens,
        "do_sample": do_sample,
    }
    if do_sample:
        generate_kwargs["temperature"] = temperature
        generate_kwargs["top_p"] = top_p
    
    if stream:
        from transformers import TextIteratorStreamer
        
        streamer = TextIteratorStreamer(
            processor.tokenizer,
            skip_prompt=True,
            skip_special_tokens=True
        )
        
        generate_kwargs["streamer"] = streamer
        
        def generate():
            # 确保 NPU 上下文（关键：每个线程都需要设置）
            try:
                import torch_npu
                torch.npu.set_device(NPU_NAME)
            except Exception as e:
                if "Repeated initialization" not in str(e) and "100002" not in str(e):
                    print(f"[Generate Thread] Warning: {e}", flush=True)
            
            # 确保模型在正确的设备上
            model.to(device)
            
            with torch.no_grad():
                model.generate(**inputs, **generate_kwargs)
        
        thread = threading.Thread(target=generate)
        thread.start()
        
        return streamer
    else:
        with torch.no_grad():
            generated_ids = model.generate(**inputs, **generate_kwargs)
        
        # 获取输入的 input_ids
        if "input_ids" in inputs:
            input_ids = inputs.input_ids
        else:
            input_ids = inputs.inputs
        
        # 截取生成的部分
        generated_ids_trimmed = [
            out_ids[len(in_ids):] for in_ids, out_ids in zip(input_ids, generated_ids)
        ]
        
        # 解码输出
        output_text = processor.batch_decode(
            generated_ids_trimmed,
            skip_special_tokens=True,
            clean_up_tokenization_spaces=False
        )[0]
        
        # 清理重复子串
        output_text = clean_repeated_substrings(output_text)
        
        return output_text


# -----------------------------------------------------------
# API 端点
# -----------------------------------------------------------

@app.get("/health")
async def health_check():
    """健康检查"""
    return {
        "status": "ok",
        "model_loaded": model is not None,
        "device": str(device) if device else "unknown"
    }


@app.get("/v1/models")
async def list_models():
    """列出可用模型"""
    return {
        "object": "list",
        "data": [{
            "id": "hunyuan-ocr",
            "object": "model",
            "created": int(time.time()),
            "owned_by": "tencent",
            "capabilities": ["ocr", "document-understanding", "multilingual"]
        }]
    }


@app.post("/v1/ocr")
async def ocr_endpoint(
    file: UploadFile = File(None, description="上传的图片文件"),
    image_base64: str = Form(None, description="Base64编码的图片"),
    image_url: str = Form(None, description="图片URL"),
    prompt: str = Form(default="检测并识别图片中的文字，将文本坐标格式化输出。", description="OCR提示语"),
    max_tokens: int = Form(default=16384, description="最大生成token数"),
    temperature: float = Form(default=0.0, description="采样温度(0表示不采样)"),
    top_p: float = Form(default=0.001, description="Top-p采样参数")
):
    """
    OCR 接口 - 从图片中提取文本
    
    支持三种图片输入方式：
    1. 上传文件 (file)
    2. Base64编码 (image_base64)
    3. 图片URL (image_url)
    
    **示例调用:**
    ```bash
    # 方式1: 上传文件
    curl -X POST http://localhost:18004/v1/ocr \\
      -F "file=@document.jpg" \\
      -F "prompt=检测并识别图片中的文字"
    
    # 方式2: Base64
    curl -X POST http://localhost:18004/v1/ocr \\
      -F "image_base64=<base64_string>" \\
      -F "prompt=识别图片中的文字"
    
    # 方式3: URL
    curl -X POST http://localhost:18004/v1/ocr \\
      -F "image_url=https://example.com/image.jpg" \\
      -F "prompt=检测并识别图片中的文字"
    ```
    """
    if model is None:
        raise HTTPException(status_code=503, detail="Model not loaded")
    
    image = None
    try:
        if file:
            content = await file.read()
            image = Image.open(io.BytesIO(content)).convert('RGB')
        elif image_base64:
            image = decode_base64_image(image_base64)
        elif image_url:
            image = load_image_from_url(image_url)
        else:
            raise HTTPException(
                status_code=400, 
                detail="Must provide one of: file, image_base64, or image_url"
            )
    except Exception as e:
        raise HTTPException(status_code=400, detail=f"Failed to load image: {str(e)}")
    
    start_time = time.time()
    
    try:
        result = run_ocr(
            image=image,
            prompt=prompt,
            max_tokens=max_tokens,
            temperature=temperature,
            top_p=top_p,
            stream=False
        )
        
        processing_time = time.time() - start_time
        
        return {
            "status": "success",
            "text": result,
            "processing_time": round(processing_time, 2)
        }
        
    except Exception as e:
        import traceback
        traceback.print_exc()
        raise HTTPException(status_code=500, detail=str(e))


@app.post("/v1/chat/completions")
async def chat_completions(request: ChatCompletionRequest):
    """
    OpenAI 兼容的聊天接口
    
    支持多模态输入（图片+文本），图片可以通过以下方式提供：
    1. Base64 编码: `data:image/jpeg;base64,...`
    2. HTTP/HTTPS URL: `https://example.com/image.jpg`
    
    **示例调用:**
    ```bash
    # 使用 Base64
    curl -X POST http://localhost:18004/v1/chat/completions \\
      -H "Content-Type: application/json" \\
      -d '{
        "model": "hunyuan-ocr",
        "messages": [{
          "role": "user",
          "content": [
            {"type": "image_url", "image_url": {"url": "data:image/jpeg;base64,..."}},
            {"type": "text", "text": "识别图片中的文字"}
          ]
        }]
      }'
    
    # 使用 URL
    curl -X POST http://localhost:18004/v1/chat/completions \\
      -H "Content-Type: application/json" \\
      -d '{
        "model": "hunyuan-ocr",
        "messages": [{
          "role": "user",
          "content": [
            {"type": "image_url", "image_url": {"url": "https://example.com/image.jpg"}},
            {"type": "text", "text": "识别图片中的文字"}
          ]
        }]
      }'
    ```
    """
    if model is None:
        raise HTTPException(status_code=503, detail="Model not loaded")
    
    images = []
    text_prompt = ""
    
    for msg in request.messages:
        if msg.role == "user":
            if isinstance(msg.content, str):
                text_prompt = msg.content
            elif isinstance(msg.content, list):
                for item in msg.content:
                    if item.get("type") == "text":
                        text_prompt = item.get("text", "")
                    elif item.get("type") == "image_url":
                        url = item.get("image_url", {}).get("url", "")
                        if url.startswith("data:image"):
                            images.append(decode_base64_image(url))
                        elif url:
                            images.append(load_image_from_url(url))
    
    if not images:
        raise HTTPException(status_code=400, detail="No image provided")
    
    if not text_prompt:
        text_prompt = "检测并识别图片中的文字，将文本坐标格式化输出。"
    
    if request.stream:
        async def generate_stream():
            ensure_npu_context()
            try:
                streamer = run_ocr(
                    image=images[0],
                    prompt=text_prompt,
                    max_tokens=request.max_tokens,
                    temperature=request.temperature,
                    top_p=request.top_p,
                    stream=True
                )
                
                for new_text in streamer:
                    chunk = {
                        "id": f"chatcmpl-{uuid.uuid4().hex[:8]}",
                        "object": "chat.completion.chunk",
                        "created": int(time.time()),
                        "model": "hunyuan-ocr",
                        "choices": [{
                            "index": 0,
                            "delta": {"content": new_text},
                            "finish_reason": None
                        }]
                    }
                    yield f"data: {json.dumps(chunk, ensure_ascii=False)}\n\n"
                
                end_chunk = {
                    "id": f"chatcmpl-{uuid.uuid4().hex[:8]}",
                    "object": "chat.completion.chunk",
                    "created": int(time.time()),
                    "model": "hunyuan-ocr",
                    "choices": [{
                        "index": 0,
                        "delta": {},
                        "finish_reason": "stop"
                    }]
                }
                yield f"data: {json.dumps(end_chunk, ensure_ascii=False)}\n\n"
                yield "data: [DONE]\n\n"
                
            except Exception as e:
                print(f"[Stream Error] {e}")
                yield f"data: {json.dumps({'error': str(e)}, ensure_ascii=False)}\n\n"
        
        return StreamingResponse(
            generate_stream(),
            media_type="text/event-stream"
        )
    
    try:
        ensure_npu_context()
        
        result = run_ocr(
            image=images[0],
            prompt=text_prompt,
            max_tokens=request.max_tokens,
            temperature=request.temperature,
            top_p=request.top_p,
            stream=False
        )
        
        return {
            "id": f"chatcmpl-{uuid.uuid4().hex[:8]}",
            "object": "chat.completion",
            "created": int(time.time()),
            "model": "hunyuan-ocr",
            "choices": [{
                "index": 0,
                "message": {
                    "role": "assistant",
                    "content": result
                },
                "finish_reason": "stop"
            }],
            "usage": {
                "prompt_tokens": -1,
                "completion_tokens": -1,
                "total_tokens": -1
            }
        }
        
    except Exception as e:
        import traceback
        traceback.print_exc()
        raise HTTPException(status_code=500, detail=str(e))


@app.post("/v1/ocr/batch")
async def batch_ocr(
    files: List[UploadFile] = File(..., description="批量上传的图片文件"),
    prompt: str = Form(default="检测并识别图片中的文字，将文本坐标格式化输出。", description="OCR提示语"),
    max_tokens: int = Form(default=16384, description="最大生成token数")
):
    """批量 OCR 接口"""
    if model is None:
        raise HTTPException(status_code=503, detail="Model not loaded")
    
    results = []
    total_start_time = time.time()
    
    for i, file in enumerate(files):
        try:
            content = await file.read()
            image = Image.open(io.BytesIO(content)).convert('RGB')
            
            start_time = time.time()
            result = run_ocr(
                image=image,
                prompt=prompt,
                max_tokens=max_tokens,
                stream=False
            )
            processing_time = time.time() - start_time
            
            results.append({
                "index": i,
                "filename": file.filename,
                "status": "success",
                "text": result,
                "processing_time": round(processing_time, 2)
            })
            
        except Exception as e:
            results.append({
                "index": i,
                "filename": file.filename,
                "status": "error",
                "error": str(e)
            })
    
    total_time = time.time() - total_start_time
    
    return {
        "status": "completed",
        "total_files": len(files),
        "successful": sum(1 for r in results if r["status"] == "success"),
        "failed": sum(1 for r in results if r["status"] == "error"),
        "total_processing_time": round(total_time, 2),
        "results": results
    }


# -----------------------------------------------------------
# 运行
# -----------------------------------------------------------
if __name__ == "__main__":
    port = int(os.getenv("PORT", "8000"))
    uvicorn.run(app, host="0.0.0.0", port=port)
