# multimodal_api_server.py
# 在导入其他库之前设置环境变量 使用这个，内存低
import os

os.environ["TOKENIZERS_PARALLELISM"] = "false"
from fastapi import FastAPI, File, UploadFile, HTTPException, BackgroundTasks, Request
from fastapi.responses import JSONResponse
from fastapi.middleware.cors import CORSMiddleware
from pydantic import BaseModel
import torch
from transformers import Qwen3VLForConditionalGeneration, AutoProcessor
from PIL import Image
import json
import re
import uuid
import asyncio
from pdf2image import convert_from_path
import logging
from typing import Dict, List, Optional
import tempfile
from contextlib import asynccontextmanager
import uvicorn

# 配置日志
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)

tokenizer = None
global_model = None


def torch_gc():
    if torch.cuda.is_available():
        torch.cuda.empty_cache()
        torch.cuda.ipc_collect()


@asynccontextmanager
async def lifespan(app: FastAPI):
    # Startup  可用
    global global_model
    try:
        global_model = extractor.load_model()
    except Exception as e:
        print(f"❌ 加载失败: {e}")
        import traceback
        traceback.print_exc()
    print("模型加载完成!")
    yield
    # Shutdown
    if global_model is not None:
        del global_model
    torch_gc()


app = FastAPI(
    title="多模态提单信息提取API",
    description="基于Qwen3-VL-8B-Instruct的多模态提单文档信息提取服务",
    version="1.0.0",
    lifespan=lifespan
)

# CORS配置
app.add_middleware(
    CORSMiddleware,
    allow_origins=["*"],  # 生产环境应该限制域名
    allow_credentials=True,
    allow_methods=["*"],
    allow_headers=["*"],
)


# 请求和响应模型
class ExtractionRequest(BaseModel):
    """提取请求模型"""
    file_type: str = "pdf"  # pdf or image
    return_images: bool = False  # 是否返回处理后的图片


class ExtractionResponse(BaseModel):
    """提取响应模型"""
    request_id: str
    status: str
    result: Optional[Dict] = None
    error: Optional[str] = None
    processing_time: Optional[float] = None
    pages_processed: Optional[int] = None


class BatchExtractionRequest(BaseModel):
    """批量提取请求模型"""
    file_urls: List[str]
    file_type: str = "pdf"


class BatchExtractionResponse(BaseModel):
    """批量提取响应模型"""
    request_id: str
    status: str
    results: List[Dict]
    total_files: int
    successful_files: int
    failed_files: int


# 全局模型实例
class MultimodalExtractor:
    def __init__(self, model_path: str):
        self.model_path = model_path
        self.model = None
        self.processor = None
        self.is_loaded = False

    def load_model(self):
        """加载FP8模型"""
        try:
            logger.info(f"正在加载FP8多模态模型: {self.model_path}")

            # 主要修改：使用torch_dtype=torch.float8_e4m3fn 或 torch.float8
            self.model = Qwen3VLForConditionalGeneration.from_pretrained(
                self.model_path,
                torch_dtype=torch.float8_e4m3fn,  # 使用FP8数据类型
                device_map="auto",
                attn_implementation="flash_attention_2",
                use_safetensors=True,
                local_files_only=True,
                trust_remote_code=True
            )

            logger.info("FP8模型加载成功!")
        except Exception as e:
            logger.error(f"FP8模型加载失败: {e}")
            # 如果FP8加载失败，尝试回退到FP16
            try:
                logger.info("尝试使用FP16加载模型...")
                self.model = Qwen3VLForConditionalGeneration.from_pretrained(
                    self.model_path,
                    torch_dtype=torch.float16,
                    device_map="auto",
                    attn_implementation="flash_attention_2",
                    use_safetensors=True,
                    local_files_only=True,
                    trust_remote_code=True
                )
                logger.info("FP16模型加载成功!")
            except Exception as e2:
                logger.error(f"FP16模型加载也失败: {e2}")
                self.is_loaded = False
                return None

        try:
            logger.info("正在加载processor...")
            self.processor = AutoProcessor.from_pretrained(
                self.model_path,
                trust_remote_code=True
            )
            self.is_loaded = True
            logger.info("Processor加载成功!")
            return self.model
        except Exception as e:
            logger.error(f"Processor加载失败: {e}")
            self.is_loaded = False
            return None

    def extract_from_pdf(self, pdf_path: str) -> Dict:
        print(f"extract_from_pdf: {pdf_path}")
        """从PDF提取信息"""
        if not self.is_loaded:
            raise RuntimeError("模型未加载")

        try:
            # 转换PDF为图片
            images = convert_from_path(pdf_path, dpi=150)
            results = {}
            temp_dir = "/mnt/d/project/llt-model"
            llm_dir = os.path.join(temp_dir, "tmp")
            os.makedirs(llm_dir, exist_ok=True)
            pdf_filename = os.path.splitext(os.path.basename(pdf_path))[0]

            for page_num, image in enumerate(images):
                logger.info(f"处理第 {page_num + 1} 页...")
                image_filename = f"{pdf_filename}_page_{page_num + 1}.jpg"
                image_path = os.path.join(llm_dir, image_filename)
                image.save(image_path, "JPEG", quality=85)

                logger.info(f"图片已保存到: {image_path}")

                # 使用优化的prompt
                prompt = """<|im_start|>user
<image>
请仔细分析这张提单图片，提取以下字段信息：

1. 集装箱号 (Container No.)
2. 发货人公司名称 (Shipper/Exporter Company)
3. 发货人地址 (Shipper/Exporter Address)  地址如果包含邮政编码，请带上
4. 收货人公司名称 (Consignee Company)
5. 收货人地址 (Consignee Address) 地址如果包含邮政编码，请带上
6. 包裹数量 (packages)
7. 重量 (weight)
8. 体积 (volume)
9. 货品名称 (goods)  数组类型，一个货品名称为一条数据

请严格按照如下JSON格式返回数据，如果字段不存在请使用空字符串""：
{
"Container No.": "提取到的集装箱号",
"Shipper/Exporter[Company]": "发货公司名称", 
"Shipper/Exporter[Address]": "发货人地址",
"Consignee[Company]": "收货公司名称",
"Consignee[Address]": "收货人地址",
"goods": ["货品1","货品2"],
"packages": "包裹数量数字",
"weight": "重量数字", 
"volume": "体积数字"
}

特别注意：
- 货品名称(goods)字段必须是数组类型
- 如果货品描述中包含换行分隔的货品，请将它们分开作为数组的独立元素
- 例如："FAN \n AUDIO" 应该分割为 ["FAN", "AUDIO"]
- 请仔细扫描DESCRIPTION OF PACKAGES AND GOODS部分的每个货品名称

请仔细扫描文档的每个部分，特别是表格区域

请仔细分析需求，仔细分析推敲文档后给出结果。<|im_end|>
<|im_start|>assistant
"""
                image_pil = Image.open(image_path).convert('RGB')

                try:
                    # 使用Qwen3-VL的正确输入格式
                    messages = [
                        {
                            "role": "user",
                            "content": [
                                {"type": "image", "image": image_pil},
                                {"type": "text", "text": prompt}
                            ]
                        }
                    ]

                    # 使用processor处理多模态输入
                    text = self.processor.apply_chat_template(
                        messages,
                        tokenize=False,
                        add_generation_prompt=True
                    )

                    # 准备模型输入
                    inputs = self.processor(
                        text=[text],
                        images=[image_pil],
                        padding=True,
                        return_tensors="pt"
                    )

                    # 将输入数据移动到模型所在的设备
                    device = next(self.model.parameters()).device
                    inputs = {k: v.to(device) for k, v in inputs.items()}

                    # 生成响应 - 优化的生成参数
                    with torch.no_grad():
                        outputs = self.model.generate(
                            **inputs,
                            max_new_tokens=1024,
                            do_sample=False,
                            temperature=0.1,
                            top_p=0.9,
                            pad_token_id=self.processor.tokenizer.eos_token_id,
                            repetition_penalty=1.1  # 添加重复惩罚
                        )

                    # 解码响应
                    response = self.processor.decode(outputs[0], skip_special_tokens=True)
                    print(f"原始响应: {response}")

                    # 提取JSON部分
                    page_result = self.extract_json_from_string(response)
                    print(f"page_{page_num + 1}： {page_result}")

                    results[f"page_{page_num + 1}"] = page_result

                    # 清理内存
                    del inputs, outputs
                    torch.cuda.empty_cache()

                except Exception as e:
                    logger.error(f"第 {page_num + 1} 页处理失败: {e}")
                    results[f"page_{page_num + 1}"] = self._get_empty_result()
                    continue

            # 合并多页结果
            final_result = self._merge_page_results(results)
            return final_result

        except Exception as e:
            logger.error(f"PDF提取失败: {e}")
            raise

    def extract_from_image(self, image_path: str) -> Dict:
        """从图片提取信息"""
        if not self.is_loaded:
            raise RuntimeError("模型未加载")

        try:
            image = Image.open(image_path).convert('RGB')

            prompt = """<|im_start|>user
<image>
请仔细分析这张提单图片，提取以下字段信息：

1. 集装箱号 (Container No.)
2. 发货人公司名称 (Shipper/Exporter Company)
3. 发货人地址 (Shipper/Exporter Address)  
4. 收货人公司名称 (Consignee Company)
5. 收货人地址 (Consignee Address)
6. 包裹数量 (packages)
7. 重量 (weight)
8. 体积 (volume)

请返回准确的JSON格式数据，如果字段不存在请使用空字符串""。<|im_end|>
<|im_start|>assistant
"""

            # 使用正确的输入格式
            messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "image", "image": image},
                        {"type": "text", "text": prompt}
                    ]
                }
            ]

            text = self.processor.apply_chat_template(
                messages,
                tokenize=False,
                add_generation_prompt=True
            )

            inputs = self.processor(
                text=[text],
                images=[image],
                padding=True,
                return_tensors="pt"
            )

            # 将输入数据移动到模型所在的设备
            device = next(self.model.parameters()).device
            inputs = {k: v.to(device) for k, v in inputs.items()}

            # 生成响应
            with torch.no_grad():
                outputs = self.model.generate(
                    **inputs,
                    max_new_tokens=1024,
                    do_sample=False,
                    temperature=0.1,
                    pad_token_id=self.processor.tokenizer.eos_token_id,
                    repetition_penalty=1.1
                )

            # 解码响应
            response = self.processor.decode(outputs[0], skip_special_tokens=True)
            print(f"图片提取响应: {response}")

            # 提取JSON部分
            result = self.extract_json_from_string(response)
            return result

        except Exception as e:
            logger.error(f"图片提取失败: {e}")
            raise

    def extract_json_from_string(self, response: str):
        response = self.extract_after_assistant_regex(response)
        """
        从字符串中提取JSON内容
        """
        # 使用正则表达式匹配JSON格式的内容
        json_pattern = r'\{.*?\}'
        matches = re.findall(json_pattern, response, re.DOTALL)

        for match in matches:
            try:
                # 尝试解析为JSON
                json_data = json.loads(match)
                return json_data
            except json.JSONDecodeError:
                # 如果解析失败，继续尝试下一个匹配
                continue

        return None

    def extract_after_assistant_regex(self, response: str):
        """
        使用正则表达式匹配assistant后面的内容
        """
        pattern = r'```json\s*(.*)```'
        match = re.search(pattern, response, re.DOTALL)
        if match:
            return match.group(1).strip()
        return ""

    def _extract_json_from_response(self, response: str) -> Dict:
        """从响应中提取JSON"""
        try:
            json_match = re.search(r'\{[^{}]*(?:\{[^{}]*\}[^{}]*)*\}', response, re.DOTALL)
            if json_match:
                json_str = json_match.group()
                json_str = re.sub(r'[\x00-\x1f\x7f-\x9f]', '', json_str)
                json_str = json_str.replace('\\n', '').replace('\\t', '')
                return json.loads(json_str)
        except Exception as e:
            logger.warning(f"JSON解析失败: {e}, 响应内容: {response}")
            return self._build_json_from_text(response)

        return self._get_empty_result()

    def _build_json_from_text(self, text: str) -> Dict:
        """从文本中手动构建JSON"""
        result = self._get_empty_result()

        patterns = {
            "Container No.": r'[A-Z]{3}[A-Z]?\d{6,7}',
            "packages": r'packages?.*?(\d+)',
            "weight": r'weight.*?(\d+\.?\d*)',
            "volume": r'volume.*?(\d+\.?\d*)'
        }

        for key, pattern in patterns.items():
            match = re.search(pattern, text, re.IGNORECASE)
            if match:
                result[key] = match.group(1) if match.groups() else match.group()

        return result

    def _merge_page_results(self, page_results: Dict) -> Dict:
        """合并多页结果"""
        merged = self._get_empty_result()

        for page_data in page_results.values():
            for key in merged:
                if page_data.get(key) and not merged[key]:
                    merged[key] = page_data[key]

        return merged

    def _get_empty_result(self) -> Dict:
        """获取空结果模板"""
        return {
            "Container No.": "",
            "Shipper/Exporter[Company]": "",
            "Shipper/Exporter[Address]": "",
            "Consignee[Company]": "",
            "Consignee[Address]": "",
            "goods": [],
            "packages": "",
            "weight": "",
            "volume": ""
        }


# 修改模型路径为FP8模型
extractor = MultimodalExtractor("/mnt/d/project/llt-model/Qwen3-VL-8B-Instruct-FP8")


# 内存管理
def cleanup_memory():
    """清理内存"""
    import gc
    gc.collect()
    if torch.cuda.is_available():
        torch.cuda.empty_cache()


@app.get("/")
async def root():
    """根路径"""
    return {
        "message": "多模态提单信息提取API服务 (FP8版本)",
        "status": "运行中",
        "model_loaded": extractor.is_loaded
    }


@app.get("/health")
async def health_check():
    """健康检查"""
    return {
        "status": "healthy" if extractor.is_loaded else "unhealthy",
        "model_loaded": extractor.is_loaded,
        "gpu_available": torch.cuda.is_available(),
        "model_precision": "FP8" if extractor.is_loaded else "unknown"
    }


# 其余API路由保持不变...
@app.post("/extract", response_model=ExtractionResponse)
async def extract_from_file(
        file: UploadFile = File(...),
        request: ExtractionRequest = ExtractionRequest()
):
    """从上传的文件中提取信息"""
    request_id = str(uuid.uuid4())
    start_time = asyncio.get_event_loop().time()

    try:
        # 验证文件类型
        if request.file_type == "pdf":
            if not file.filename.lower().endswith('.pdf'):
                raise HTTPException(status_code=400, detail="文件必须是PDF格式")
        else:
            allowed_image_types = ['.jpg', '.jpeg', '.png', '.bmp']
            if not any(file.filename.lower().endswith(ext) for ext in allowed_image_types):
                raise HTTPException(status_code=400, detail="文件必须是图片格式")

        # 保存临时文件
        with tempfile.NamedTemporaryFile(delete=False, suffix=os.path.splitext(file.filename)[1]) as tmp_file:
            content = await file.read()
            tmp_file.write(content)
            tmp_path = tmp_file.name

        try:
            # 提取信息
            if request.file_type == "pdf":
                result = extractor.extract_from_pdf(tmp_path)
                pages_processed = len(convert_from_path(tmp_path))
            else:
                result = extractor.extract_from_image(tmp_path)
                pages_processed = 1

            processing_time = asyncio.get_event_loop().time() - start_time

            print(f"result: {result}")
            # 构建响应
            response = ExtractionResponse(
                request_id=request_id,
                status="success",
                result=result,
                processing_time=round(processing_time, 2),
                pages_processed=pages_processed
            )

            return response

        finally:
            # 清理临时文件
            os.unlink(tmp_path)
            cleanup_memory()

    except Exception as e:
        logger.error(f"提取失败: {e}")
        processing_time = asyncio.get_event_loop().time() - start_time

        response = ExtractionResponse(
            request_id=request_id,
            status="error",
            error=str(e),
            processing_time=round(processing_time, 2)
        )
        return response


# 其余代码保持不变...
@app.post("/extract/batch", response_model=BatchExtractionResponse)
async def batch_extract(request: BatchExtractionRequest):
    """批量提取信息"""
    request_id = str(uuid.uuid4())

    return BatchExtractionResponse(
        request_id=request_id,
        status="processing",
        results=[],
        total_files=len(request.file_urls),
        successful_files=0,
        failed_files=0
    )


@app.post("/extract/url")
async def extract_from_url(file_url: str, file_type: str = "pdf"):
    """从URL提取信息"""
    request_id = str(uuid.uuid4())
    start_time = asyncio.get_event_loop().time()

    try:
        import requests
        import tempfile

        # 下载文件
        response = requests.get(file_url, stream=True)
        response.raise_for_status()

        # 保存临时文件
        file_extension = ".pdf" if file_type == "pdf" else ".jpg"
        with tempfile.NamedTemporaryFile(delete=False, suffix=file_extension) as tmp_file:
            for chunk in response.iter_content(chunk_size=8192):
                tmp_file.write(chunk)
            tmp_path = tmp_file.name

        try:
            # 提取信息
            if file_type == "pdf":
                result = extractor.extract_from_pdf(tmp_path)
                pages_processed = len(convert_from_path(tmp_path))
            else:
                result = extractor.extract_from_image(tmp_path)
                pages_processed = 1

            processing_time = asyncio.get_event_loop().time() - start_time

            return ExtractionResponse(
                request_id=request_id,
                status="success",
                result=result,
                processing_time=round(processing_time, 2),
                pages_processed=pages_processed
            )

        finally:
            os.unlink(tmp_path)
            cleanup_memory()

    except Exception as e:
        logger.error(f"URL提取失败: {e}")
        processing_time = asyncio.get_event_loop().time() - start_time

        return ExtractionResponse(
            request_id=request_id,
            status="error",
            error=str(e),
            processing_time=round(processing_time, 2)
        )


@app.post("/v1/chat/completions")
async def chat_completions(request: Request):
    try:
        data = await request.json()
        messages = data.get("messages", [])

        if not extractor.is_loaded or extractor.processor is None:
            return JSONResponse({"error": "Model not loaded"}, status_code=503)

        # 构建 Qwen3-VL 的对话格式
        qwen_messages = []
        for msg in messages:
            role = msg.get("role")
            content = msg.get("content", "")

            if role == "user":
                qwen_messages.append({
                    "role": "user",
                    "content": [
                        {"type": "text", "text": content}
                    ]
                })
            elif role == "assistant":
                qwen_messages.append({
                    "role": "assistant",
                    "content": [
                        {"type": "text", "text": content}
                    ]
                })
            elif role == "system":
                qwen_messages.append({
                    "role": "user",
                    "content": [
                        {"type": "text", "text": content}
                    ]
                })

        if not qwen_messages:
            return JSONResponse({"error": "No valid messages found"}, status_code=400)

        # 应用聊天模板
        text = extractor.processor.apply_chat_template(
            qwen_messages,
            tokenize=False,
            add_generation_prompt=True
        )

        # 准备模型输入（仅文本）
        inputs = extractor.processor(
            text=[text],
            padding=True,
            return_tensors="pt"
        )

        # 将输入数据移动到模型所在的设备
        device = next(extractor.model.parameters()).device
        inputs = {k: v.to(device) for k, v in inputs.items()}

        # 生成响应
        with torch.no_grad():
            outputs = extractor.model.generate(
                **inputs,
                max_new_tokens=1024,
                do_sample=False,
                temperature=0.1,
                top_p=0.9,
                pad_token_id=extractor.processor.tokenizer.eos_token_id
            )

        # 解码响应
        response_text = extractor.processor.decode(outputs[0], skip_special_tokens=True)

        print(f"response: {response_text}")

        # 提取助手回复（去掉输入部分）
        if "assistant" in response_text:
            response_parts = response_text.split("assistant")
            if len(response_parts) > 1:
                response_text = response_parts[-1].strip()

        # 清理内存
        del inputs, outputs
        torch_gc()

        # 构建 OpenAI 兼容的响应
        return JSONResponse({
            "choices": [{
                "message": {
                    "role": "assistant",
                    "content": response_text,
                }
            }]
        })

    except Exception as e:
        logger.error(f"Chat completion error: {e}")
        return JSONResponse({"error": str(e)}, status_code=500)


# 错误处理
@app.exception_handler(HTTPException)
async def http_exception_handler(request, exc):
    return JSONResponse(
        status_code=exc.status_code,
        content={
            "request_id": str(uuid.uuid4()),
            "status": "error",
            "error": exc.detail
        }
    )


@app.exception_handler(Exception)
async def general_exception_handler(request, exc):
    logger.error(f"未处理的异常: {exc}")
    return JSONResponse(
        status_code=500,
        content={
            "request_id": str(uuid.uuid4()),
            "status": "error",
            "error": "内部服务器错误"
        }
    )


if __name__ == '__main__':
    config = uvicorn.Config(app, host="0.0.0.0", port=8000, workers=5)
    server = uvicorn.Server(config)

    try:
        asyncio.run(server.serve())
    except RuntimeError:
        import nest_asyncio

        nest_asyncio.apply()
        asyncio.run(server.serve())