# multimodal_api_server.py
from fastapi import FastAPI, File, UploadFile, HTTPException, BackgroundTasks, Request
from fastapi.responses import JSONResponse
from fastapi.middleware.cors import CORSMiddleware
from pydantic import BaseModel
import torch
from transformers import Qwen3VLForConditionalGeneration, AutoProcessor
from PIL import Image
import json
import re
import os
import uuid
import asyncio
from pdf2image import convert_from_path
import logging
from typing import Dict, List, Optional
import tempfile
from contextlib import asynccontextmanager
import uvicorn

# 配置日志
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)


tokenizer = None
global_model = None



def torch_gc():
    if torch.cuda.is_available():
        torch.cuda.empty_cache()
        torch.cuda.ipc_collect()

@asynccontextmanager
async def lifespan(app: FastAPI):
    # Startup  可用

    global global_model
    try:
        global_model = extractor.load_model()
    except Exception as e:
        print(f"❌ 加载失败: {e}")
        import traceback
        traceback.print_exc()
    print("模型加载完成!")
    yield
    # Shutdown
    if global_model is not None:
        del global_model
    torch_gc()


app = FastAPI(
    title="多模态提单信息提取API",
    description="基于Qwen3-VL-8B-Instruct的多模态提单文档信息提取服务",
    version="1.0.0",
    lifespan=lifespan
)
# CORS配置
app.add_middleware(
    CORSMiddleware,
    allow_origins=["*"],  # 生产环境应该限制域名
    allow_credentials=True,
    allow_methods=["*"],
    allow_headers=["*"],
)

# 请求和响应模型
class ExtractionRequest(BaseModel):
    """提取请求模型"""
    file_type: str = "pdf"  # pdf or image
    return_images: bool = False  # 是否返回处理后的图片


class ExtractionResponse(BaseModel):
    """提取响应模型"""
    request_id: str
    status: str
    result: Optional[Dict] = None
    error: Optional[str] = None
    processing_time: Optional[float] = None
    pages_processed: Optional[int] = None


class BatchExtractionRequest(BaseModel):
    """批量提取请求模型"""
    file_urls: List[str]
    file_type: str = "pdf"


class BatchExtractionResponse(BaseModel):
    """批量提取响应模型"""
    request_id: str
    status: str
    results: List[Dict]
    total_files: int
    successful_files: int
    failed_files: int


# 全局模型实例
class MultimodalExtractor:
    def __init__(self, model_path: str):
        self.model_path = model_path
        self.model = None
        self.processor = None
        self.is_loaded = False

    def load_model(self):
        """加载模型"""
        try:
            logger.info(f"正在加载多模态模型: {self.model_path}")
            self.model = Qwen3VLForConditionalGeneration.from_pretrained(
                self.model_path,
                torch_dtype=torch.float16,
                device_map="auto",
                # low_cpu_mem_usage=True,
                attn_implementation="flash_attention_2",
                use_safetensors=True,
                local_files_only=True,
                trust_remote_code=True
            )

            # self.processor = LlavaProcessor.from_pretrained(self.model_path)
            # self.is_loaded = True
            logger.info("模型加载成功!")
            # return self.model
        except Exception as e:
            logger.error(f"模型加载失败: {e}")
            self.is_loaded = False
            return None

        try:
            logger.info("正在加载多模态模型...processor")
            self.processor = AutoProcessor.from_pretrained(
                self.model_path,
                trust_remote_code=True
            )
            self.is_loaded = True
            logger.info("模型加载成功!")
            return self.model
        except Exception as e:
            logger.error(f"模型加载失败: {e}")
            self.is_loaded = False
            return None

    def extract_from_pdf(self, pdf_path: str) -> Dict:
        print(f"extract_from_pdf: {pdf_path}")
        """从PDF提取信息"""
        if not self.is_loaded:
            raise RuntimeError("模型未加载")

        try:
            # 转换PDF为图片
            images = convert_from_path(pdf_path, dpi=150)  # 降低DPI提高处理速度
            results = {}
            temp_dir = "/mnt/d/project/llt-model"
            llm_dir = os.path.join(temp_dir, "tmp")
            os.makedirs(llm_dir, exist_ok=True)
            pdf_filename = os.path.splitext(os.path.basename(pdf_path))[0]

            for page_num, image in enumerate(images):
                logger.info(f"处理第 {page_num + 1} 页...")
                image_filename = f"{pdf_filename}_page_{page_num + 1}.jpg"
                image_path = os.path.join(llm_dir, image_filename)
                image.save(image_path, "JPEG", quality=85)

                logger.info(f"图片已保存到: {image_path}")

                # 更具体、分步骤的prompt
                prompt = """<|im_start|>user
<image>
请仔细分析这张提单图片，提取以下字段信息：

1. 集装箱号 (Container No.)
2. 发货人公司名称 (Shipper/Exporter Company)
3. 发货人地址 (Shipper/Exporter Address)  地址如果包含邮政编码，请带上
4. 收货人公司名称 (Consignee Company)
5. 收货人地址 (Consignee Address) 地址如果包含邮政编码，请带上
6. 包裹数量 (packages)
7. 重量 (weight)
8. 体积 (volume)
9. 货品名称 (goods) 数组类型

请严格按照如下JSON格式返回数据，如果字段不存在请使用空字符串""：
{
"Container No.": "提取到的集装箱号",
"Shipper/Exporter[Company]": "发货公司名称", 
"Shipper/Exporter[Address]": "发货人地址",
"Consignee[Company]": "收货公司名称",
"Consignee[Address]": "收货人地址",
"goods": "货品名称",
"packages": "包裹数量数字",
"weight": "重量数字", 
"volume": "体积数字"

}

请仔细扫描文档的每个部分，特别是表格区域。<|im_end|>
<|im_start|>assistant
"""
                image_pil = Image.open(image_path).convert('RGB')  # 确保是RGB格式

                # 准备输入
                # inputs = self.processor(
                #     text=prompt,
                #     images=image,
                #     return_tensors="pt",
                #     padding=True
                # )
                # 使用聊天格式（LLaVA推荐方式）
                try:
                    # 使用Qwen3-VL的正确输入格式
                    messages = [
                        {
                            "role": "user",
                            "content": [
                                {"type": "image", "image": image_pil},
                                {"type": "text", "text": prompt}
                            ]
                        }
                    ]

                    # 使用processor处理多模态输入
                    text = self.processor.apply_chat_template(
                        messages,
                        tokenize=False,
                        add_generation_prompt=True
                    )

                    # 准备模型输入
                    inputs = self.processor(
                        text=[text],
                        images=[image_pil],
                        padding=True,
                        return_tensors="pt"
                    )

                    # 将输入数据移动到模型所在的设备
                    device = next(self.model.parameters()).device
                    inputs = {k: v.to(device) for k, v in inputs.items()}

                    # 生成响应 - 使用Qwen3-VL推荐的生成参数
                    with torch.no_grad():
                        outputs = self.model.generate(
                            **inputs,
                            max_new_tokens=1024,
                            do_sample=False,
                            temperature=0.1,
                            top_p=0.9,
                            pad_token_id=self.processor.tokenizer.eos_token_id
                        )

                    # 解码响应
                    response = self.processor.decode(outputs[0], skip_special_tokens=True)
                    print(f"原始响应: {response}")

                    # 提取JSON部分
                    page_result = self.extract_json_from_string(response)
                    print(f"page_{page_num + 1}： {page_result}")

                    results[f"page_{page_num + 1}"] = page_result

                    # 清理内存
                    del inputs, outputs
                    torch.cuda.empty_cache()

                except Exception as e:
                    logger.error(f"第 {page_num + 1} 页处理失败: {e}")
                    results[f"page_{page_num + 1}"] = self._get_empty_result()
                    continue

            # 合并多页结果
            final_result = self._merge_page_results(results)
            return final_result

        except Exception as e:
            logger.error(f"PDF提取失败: {e}")
            raise

    def preprocess_image_for_better_ocr(self, image_path: str) -> str:
        """图像预处理以提高可读性"""
        try:
            import cv2
            import numpy as np

            # 读取图像
            image = cv2.imread(image_path)

            # 调整大小保持可读性
            height, width = image.shape[:2]
            if width > 2000:
                scale = 2000 / width
                new_width = 2000
                new_height = int(height * scale)
                image = cv2.resize(image, (new_width, new_height))

            # 多种预处理尝试
            processed_images = []

            # 1. 灰度图
            gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
            processed_images.append(('gray', gray))

            # 2. 自适应阈值
            adaptive_thresh = cv2.adaptiveThreshold(
                gray, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
                cv2.THRESH_BINARY, 11, 2
            )
            processed_images.append(('adaptive', adaptive_thresh))

            # 3. 锐化
            kernel = np.array([[-1, -1, -1], [-1, 9, -1], [-1, -1, -1]])
            sharpened = cv2.filter2D(gray, -1, kernel)
            processed_images.append(('sharpened', sharpened))

            # 测试每种预处理的效果
            best_text = ""
            best_image_path = image_path

            for name, proc_image in processed_images:
                try:
                    # 保存预处理后的图像
                    temp_path = image_path.replace('.jpg', f'_{name}.jpg')
                    cv2.imwrite(temp_path, proc_image)

                    # OCR测试
                    text = pytesseract.image_to_string(proc_image, config='--oem 3 --psm 6')
                    if len(text) > len(best_text):
                        best_text = text
                        best_image_path = temp_path

                except Exception as e:
                    logger.warning(f"预处理 {name} 失败: {e}")
                    continue

            logger.info(f"最佳OCR文本长度: {len(best_text)}")
            return best_image_path

        except Exception as e:
            logger.error(f"图像预处理失败: {e}")
            return image_path


    def debug_image_processing(self, image_path: str):
        """调试图像处理过程"""
        image = Image.open(image_path).convert('RGB')

        # 测试不同的prompt格式
        test_prompts = [
            "<image>\n提取这张图片的文字",
            "<image>\n这张图片是什么？",
            "<image>\n描述图片内容。",
            "这张图片是什么？<image>",
        ]

        for i, prompt in enumerate(test_prompts):
            logger.info(f"测试prompt {i + 1}: {prompt}")

            inputs = self.processor(
                text=prompt,
                images=image,
                return_tensors="pt",
                padding=True
            )

            logger.info(f"输入键: {list(inputs.keys())}")
            if 'pixel_values' in inputs:
                logger.info(f"像素值形状: {inputs['pixel_values'].shape}")

            device = next(self.model.parameters()).device
            inputs = {k: v.to(device) for k, v in inputs.items()}

            with torch.no_grad():
                outputs = self.model.generate(
                    **inputs,
                    max_new_tokens=100,
                    do_sample=False
                )

            response = self.processor.decode(outputs[0], skip_special_tokens=True)
            logger.info(f"响应 {i + 1}: {response}")

            del inputs, outputs
            torch.cuda.empty_cache()

    def extract_from_image(self, image_path: str) -> Dict:
        """从图片提取信息"""
        if not self.is_loaded:
            raise RuntimeError("模型未加载")

        try:
            image = Image.open(image_path).convert('RGB')

            prompt = """<|im_start|>user
<image>
请仔细分析这张提单图片，提取以下字段信息：

1. 集装箱号 (Container No.)
2. 发货人公司名称 (Shipper/Exporter Company)
3. 发货人地址 (Shipper/Exporter Address)  
4. 收货人公司名称 (Consignee Company)
5. 收货人地址 (Consignee Address)
6. 包裹数量 (packages)
7. 重量 (weight)
8. 体积 (volume)

请返回准确的JSON格式数据，如果字段不存在请使用空字符串""。<|im_end|>
<|im_start|>assistant
"""

            # 使用正确的输入格式
            messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "image", "image": image},
                        {"type": "text", "text": prompt}
                    ]
                }
            ]

            text = self.processor.apply_chat_template(
                messages,
                tokenize=False,
                add_generation_prompt=True
            )

            inputs = self.processor(
                text=[text],
                images=[image],
                padding=True,
                return_tensors="pt"
            )

            # 将输入数据移动到模型所在的设备
            device = next(self.model.parameters()).device
            inputs = {k: v.to(device) for k, v in inputs.items()}

            # 生成响应
            with torch.no_grad():
                outputs = self.model.generate(
                    **inputs,
                    max_new_tokens=1024,
                    do_sample=False,
                    temperature=0.1,
                    pad_token_id=self.processor.tokenizer.eos_token_id
                )

            # 解码响应
            response = self.processor.decode(outputs[0], skip_special_tokens=True)
            print(f"图片提取响应: {response}")

            # 提取JSON部分
            result = self.extract_json_from_string(response)
            return result

        except Exception as e:
            logger.error(f"图片提取失败: {e}")
            raise


    def extract_json_from_string(self, response: str):
        response= self.extract_after_assistant_regex(response)
        """
        从字符串中提取JSON内容
        """
        # 使用正则表达式匹配JSON格式的内容
        json_pattern = r'\{.*?\}'
        matches = re.findall(json_pattern, response, re.DOTALL)

        for match in matches:
            try:
                # 尝试解析为JSON
                json_data = json.loads(match)
                return json_data
            except json.JSONDecodeError:
                # 如果解析失败，继续尝试下一个匹配
                continue

        return None


    def extract_after_assistant_regex(self, response: str):
        """
        使用正则表达式匹配assistant后面的内容
        """
        pattern = r'```json\s*(.*)```'  # 匹配assistant后面的所有内容
        match = re.search(pattern, response, re.DOTALL)  # re.DOTALL让.匹配换行符
        if match:
            return match.group(1).strip()
        return ""

    def _extract_json_from_response(self, response: str) -> Dict:
        """从响应中提取JSON"""
        try:
            # 查找JSON部分 - 改进的匹配模式
            json_match = re.search(r'\{[^{}]*(?:\{[^{}]*\}[^{}]*)*\}', response, re.DOTALL)
            if json_match:
                json_str = json_match.group()
                # 清理JSON字符串
                json_str = re.sub(r'[\x00-\x1f\x7f-\x9f]', '', json_str)
                json_str = json_str.replace('\\n', '').replace('\\t', '')
                return json.loads(json_str)
        except Exception as e:
            logger.warning(f"JSON解析失败: {e}, 响应内容: {response}")

            # 尝试手动构建JSON
            return self._build_json_from_text(response)

        # 如果JSON解析失败，返回空结果
        return self._get_empty_result()

    def _build_json_from_text(self, text: str) -> Dict:
        """从文本中手动构建JSON"""
        result = self._get_empty_result()

        # 简单的关键词匹配
        patterns = {
            "Container No.": r'[A-Z]{3}[A-Z]?\d{6,7}',
            "packages": r'packages?.*?(\d+)',
            "weight": r'weight.*?(\d+\.?\d*)',
            "volume": r'volume.*?(\d+\.?\d*)'
        }

        for key, pattern in patterns.items():
            match = re.search(pattern, text, re.IGNORECASE)
            if match:
                result[key] = match.group(1) if match.groups() else match.group()

        return result

    def _merge_page_results(self, page_results: Dict) -> Dict:
        """合并多页结果"""
        merged = self._get_empty_result()

        # 优先使用非空值
        for page_data in page_results.values():
            for key in merged:
                if page_data.get(key) and not merged[key]:
                    merged[key] = page_data[key]

        return merged

    def _get_empty_result(self) -> Dict:
        """获取空结果模板"""
        return {
            # "MBL/HBL No.": "",
            "Container No.": "",
            # "Date of Exportation": "",
            "Shipper/Exporter[Company]": "",
            "Shipper/Exporter[Address]": "",
            "Consignee[Company]": "",
            "Consignee[Address]": "",
            "goods": [],
            "packages": "",
            "weight": "",
            "volume": ""
        }


# 全局模型实例
extractor = MultimodalExtractor("/mnt/d/project/llt-model/Qwen3-VL-8B-Instruct")


# 内存管理
def cleanup_memory():
    """清理内存"""
    import gc
    gc.collect()
    if torch.cuda.is_available():
        torch.cuda.empty_cache()


# # API路由
# @app.on_event("startup")
# async def startup_event():
#     """启动时加载模型"""
#     logger.info("启动服务，正在加载模型...")
#     global global_model
#     global_model = extractor.load_model()
#     if global_model is None:
#         logger.error("模型加载失败，服务无法启动")
#         raise RuntimeError("模型加载失败")


@app.get("/")
async def root():
    """根路径"""
    return {
        "message": "多模态提单信息提取API服务",
        "status": "运行中",
        "model_loaded": extractor.is_loaded
    }


@app.get("/health")
async def health_check():
    """健康检查"""
    return {
        "status": "healthy" if extractor.is_loaded else "unhealthy",
        "model_loaded": extractor.is_loaded,
        "gpu_available": torch.cuda.is_available()
    }


@app.post("/extract", response_model=ExtractionResponse)
async def extract_from_file(
        file: UploadFile = File(...),
        request: ExtractionRequest = ExtractionRequest()
):
    """从上传的文件中提取信息"""
    request_id = str(uuid.uuid4())
    start_time = asyncio.get_event_loop().time()

    try:
        # 验证文件类型
        if request.file_type == "pdf":
            if not file.filename.lower().endswith('.pdf'):
                raise HTTPException(status_code=400, detail="文件必须是PDF格式")
        else:
            allowed_image_types = ['.jpg', '.jpeg', '.png', '.bmp']
            if not any(file.filename.lower().endswith(ext) for ext in allowed_image_types):
                raise HTTPException(status_code=400, detail="文件必须是图片格式")

        # 保存临时文件
        with tempfile.NamedTemporaryFile(delete=False, suffix=os.path.splitext(file.filename)[1]) as tmp_file:
            content = await file.read()
            tmp_file.write(content)
            tmp_path = tmp_file.name

        try:
            # 提取信息
            if request.file_type == "pdf":
                result = extractor.extract_from_pdf(tmp_path)
                pages_processed = len(convert_from_path(tmp_path))
            else:
                result = extractor.extract_from_image(tmp_path)
                pages_processed = 1

            processing_time = asyncio.get_event_loop().time() - start_time

            print(f"result: {result}")
            # 构建响应
            response = ExtractionResponse(
                request_id=request_id,
                status="success",
                result=result,
                processing_time=round(processing_time, 2),
                pages_processed=pages_processed
            )

            return response

        finally:
            # 清理临时文件
            os.unlink(tmp_path)
            cleanup_memory()

    except Exception as e:
        logger.error(f"提取失败: {e}")
        processing_time = asyncio.get_event_loop().time() - start_time

        response = ExtractionResponse(
            request_id=request_id,
            status="error",
            error=str(e),
            processing_time=round(processing_time, 2)
        )
        return response


@app.post("/extract/batch", response_model=BatchExtractionResponse)
async def batch_extract(request: BatchExtractionRequest):
    """批量提取信息"""
    request_id = str(uuid.uuid4())

    # 这里实现批量处理逻辑
    # 由于时间关系，先返回占位响应
    return BatchExtractionResponse(
        request_id=request_id,
        status="processing",
        results=[],
        total_files=len(request.file_urls),
        successful_files=0,
        failed_files=0
    )


@app.post("/extract/url")
async def extract_from_url(file_url: str, file_type: str = "pdf"):
    """从URL提取信息"""
    request_id = str(uuid.uuid4())
    start_time = asyncio.get_event_loop().time()

    try:
        import requests
        import tempfile

        # 下载文件
        response = requests.get(file_url, stream=True)
        response.raise_for_status()

        # 保存临时文件
        file_extension = ".pdf" if file_type == "pdf" else ".jpg"
        with tempfile.NamedTemporaryFile(delete=False, suffix=file_extension) as tmp_file:
            for chunk in response.iter_content(chunk_size=8192):
                tmp_file.write(chunk)
            tmp_path = tmp_file.name

        try:
            # 提取信息
            if file_type == "pdf":
                result = extractor.extract_from_pdf(tmp_path)
                pages_processed = len(convert_from_path(tmp_path))
            else:
                result = extractor.extract_from_image(tmp_path)
                pages_processed = 1

            processing_time = asyncio.get_event_loop().time() - start_time

            return ExtractionResponse(
                request_id=request_id,
                status="success",
                result=result,
                processing_time=round(processing_time, 2),
                pages_processed=pages_processed
            )

        finally:
            os.unlink(tmp_path)
            cleanup_memory()

    except Exception as e:
        logger.error(f"URL提取失败: {e}")
        processing_time = asyncio.get_event_loop().time() - start_time

        return ExtractionResponse(
            request_id=request_id,
            status="error",
            error=str(e),
            processing_time=round(processing_time, 2)
        )


@app.post("/v1/chat/completions")
async def chat_completions(request: Request):
    try:
        data = await request.json()
        messages = data.get("messages", [])

        if not extractor.is_loaded or extractor.processor is None:
            return JSONResponse({"error": "Model not loaded"}, status_code=503)

        # 构建 Qwen3-VL 的对话格式
        qwen_messages = []
        for msg in messages:
            role = msg.get("role")
            content = msg.get("content", "")

            if role == "user":
                qwen_messages.append({
                    "role": "user",
                    "content": [
                        {"type": "text", "text": content}
                    ]
                })
            elif role == "assistant":
                qwen_messages.append({
                    "role": "assistant",
                    "content": [
                        {"type": "text", "text": content}
                    ]
                })
            elif role == "system":
                # 系统消息可以作为第一个用户消息
                qwen_messages.append({
                    "role": "user",
                    "content": [
                        {"type": "text", "text": content}
                    ]
                })

        if not qwen_messages:
            return JSONResponse({"error": "No valid messages found"}, status_code=400)

        # 应用聊天模板
        text = extractor.processor.apply_chat_template(
            qwen_messages,
            tokenize=False,
            add_generation_prompt=True
        )

        # 准备模型输入（仅文本）
        inputs = extractor.processor(
            text=[text],
            padding=True,
            return_tensors="pt"
        )

        # 将输入数据移动到模型所在的设备
        device = next(extractor.model.parameters()).device
        inputs = {k: v.to(device) for k, v in inputs.items()}

        # 生成响应
        with torch.no_grad():
            outputs = extractor.model.generate(
                **inputs,
                max_new_tokens=1024,
                do_sample=False,
                temperature=0.1,
                top_p=0.9,
                pad_token_id=extractor.processor.tokenizer.eos_token_id
            )

        # 解码响应
        response_text = extractor.processor.decode(outputs[0], skip_special_tokens=True)

        print(f"response: {response_text}")

        # 提取助手回复（去掉输入部分）
        if "assistant" in response_text:
            response_parts = response_text.split("assistant")
            if len(response_parts) > 1:
                response_text = response_parts[-1].strip()

        # 清理内存
        del inputs, outputs
        torch_gc()

        # 构建 OpenAI 兼容的响应
        return JSONResponse({
            "choices": [{
                "message": {
                    "role": "assistant",
                    "content": response_text,
                }
            }]
        })

    except Exception as e:
        logger.error(f"Chat completion error: {e}")
        return JSONResponse({"error": str(e)}, status_code=500)

# 错误处理
@app.exception_handler(HTTPException)
async def http_exception_handler(request, exc):
    return JSONResponse(
        status_code=exc.status_code,
        content={
            "request_id": str(uuid.uuid4()),
            "status": "error",
            "error": exc.detail
        }
    )


@app.exception_handler(Exception)
async def general_exception_handler(request, exc):
    logger.error(f"未处理的异常: {exc}")
    return JSONResponse(
        status_code=500,
        content={
            "request_id": str(uuid.uuid4()),
            "status": "error",
            "error": "内部服务器错误"
        }
    )



if __name__ == '__main__':
    config = uvicorn.Config(app, host="0.0.0.0", port=8000, workers=5)
    server = uvicorn.Server(config)

    try:
        asyncio.run(server.serve())
    except RuntimeError:
        import nest_asyncio

        nest_asyncio.apply()
        asyncio.run(server.serve())