#!/usr/bin/env python3
"""
vLLM高性能推理脚本 - 专用于微调模型推理

支持高性能批量推理和单张图像推理
优化内存使用和推理速度
支持LoRA适配器加载
"""

import os
import sys
import json
import yaml
import argparse
from pathlib import Path
from typing import Dict, List, Any, Optional
import logging
from datetime import datetime
import time

# 设置日志
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)

# 尝试导入模板管理器
try:
    from template_manager import get_template_manager
    TEMPLATE_MANAGER_AVAILABLE = True
except ImportError:
    logger.warning("模板管理器不可用，使用内置字段定义")
    TEMPLATE_MANAGER_AVAILABLE = False


class VLLMDocumentInferencer:
    """基于vLLM的高性能文档信息抽取推理器"""
    
    def __init__(self, model_path: str, config_path: str = "config/config.yaml", document_type: str = "contract", gpu_id: Optional[int] = None, adapter_path: Optional[str] = None):
        """初始化推理器
        
        Args:
            model_path: 微调后的模型路径
            config_path: 配置文件路径
            document_type: 文档类型
            gpu_id: 指定GPU ID
            adapter_path: LoRA适配器路径（可选）
        """
        # 设置GPU ID
        if gpu_id is not None:
            os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_id)
            logger.info(f"🔧 已设置CUDA_VISIBLE_DEVICES={gpu_id}")
        
        self.model_path = self._resolve_model_path(model_path)
        self.adapter_path = adapter_path
        self.config = self._load_config(config_path)
        self.document_type = document_type
        
        # 加载文档模板
        self.template = self._load_document_template(document_type)
        self.document_fields = self._get_document_fields_from_template()
        
        logger.info(f"📄 文档类型: {self.template['display_name']}")
        logger.info(f"🏷️  提取字段: {len(self.document_fields)} 个")
        
        # 初始化vLLM引擎
        self._setup_vllm()
        
    def _resolve_model_path(self, model_path: str) -> str:
        """解析模型路径，查找最新的checkpoint"""
        model_dir = Path(model_path)
        logger.info(f"🔍 解析模型路径: {model_path}")
        
        if not model_dir.exists():
            logger.error(f"❌ 模型路径不存在: {model_path}")
            return model_path
            
        # 查找最新的版本目录
        version_dirs = sorted([d for d in model_dir.iterdir() if d.is_dir() and d.name.startswith('v')])
        if version_dirs:
            latest_version = version_dirs[-1]
            logger.info(f"📁 找到最新版本: {latest_version.name}")
            
            # 检查是否有config.json文件（完整模型）
            config_file = latest_version / "config.json"
            if config_file.exists():
                logger.info(f"✅ 使用完整模型: {latest_version}")
                return str(latest_version)
            
            # 检查是否有checkpoint目录（完整模型）
            checkpoints = sorted(latest_version.glob('checkpoint-*'))
            if checkpoints:
                checkpoint_dir = checkpoints[-1]
                config_file = checkpoint_dir / "config.json"
                if config_file.exists():
                    logger.info(f"✅ 使用checkpoint: {checkpoint_dir}")
                    return str(checkpoint_dir)
                else:
                    # 检查是否为LoRA适配器模型
                    adapter_config = checkpoint_dir / "adapter_config.json"
                    if adapter_config.exists():
                        logger.info(f"🔧 检测到LoRA适配器模型: {checkpoint_dir}")
                        # 查找基础模型路径
                        with open(adapter_config, 'r', encoding='utf-8') as f:
                            adapter_cfg = json.load(f)
                        
                        base_model_path = adapter_cfg.get('base_model_name_or_path')
                        if base_model_path and Path(base_model_path).exists():
                            logger.info(f"📂 使用基础模型路径: {base_model_path}")
                            # 设置适配器路径
                            self.adapter_path = str(checkpoint_dir)
                            return base_model_path
                        else:
                            # 尝试在项目目录中查找基础模型
                            project_base_model = Path("models/universal_document/stage1") / latest_version.name / "checkpoint-3"
                            if project_base_model.exists() and (project_base_model / "config.json").exists():
                                logger.info(f"📂 使用项目基础模型: {project_base_model}")
                                self.adapter_path = str(checkpoint_dir)
                                return str(project_base_model)
                            else:
                                logger.warning(f"⚠️  无法找到基础模型，可能无法用于vLLM推理")
                                return str(checkpoint_dir)
        
        logger.warning(f"⚠️  使用原路径: {model_path}")
        return model_path
        
    def _load_config(self, config_path: str) -> dict:
        """加载配置文件"""
        with open(config_path, 'r', encoding='utf-8') as f:
            return yaml.safe_load(f)
    
    def _load_document_template(self, document_type: str) -> dict:
        """加载文档模板配置"""
        template_path = Path(f"config/templates/{document_type}.yaml")
        
        if not template_path.exists():
            logger.warning(f"⚠️  未找到模板文件: {template_path}，使用默认合同模板")
            return self._get_default_template()
        
        try:
            with open(template_path, 'r', encoding='utf-8') as f:
                template = yaml.safe_load(f)
                logger.info(f"✅ 加载模板: {template['display_name']}")
                return template
        except Exception as e:
            logger.error(f"❌ 加载模板失败: {e}，使用默认合同模板")
            return self._get_default_template()
    
    def _get_document_fields_from_template(self) -> List[str]:
        """从模板中获取文档字段列表"""
        if TEMPLATE_MANAGER_AVAILABLE:
            try:
                manager = get_template_manager()
                template = manager.get_template(self.document_type)
                if template:
                    fields = template.get_field_names()
                    if fields:
                        logger.info(f"✅ 从模板管理器加载字段: {len(fields)} 个")
                        return fields
            except Exception as e:
                logger.warning(f"⚠️  模板管理器加载字段失败: {e}")
        
        # 回退到直接从模板配置中提取字段
        if 'fields' in self.template:
            if isinstance(self.template['fields'], list):
                # 兼容两种字段格式：[{name: "..."}] 和 [{field_name: "..."}]
                fields = []
                for field in self.template['fields']:
                    if isinstance(field, dict):
                        field_name = field.get('name') or field.get('field_name')
                        if field_name:
                            fields.append(field_name)
                if fields:
                    logger.info(f"✅ 从模板配置加载字段: {len(fields)} 个")
                    return fields
        
        # 最后的回退方案
        logger.warning("⚠️  使用硬编码的默认字段")
        return [
            "合同名称", "合同编号", "甲方名称", "甲方地址", "甲方联系人",
            "乙方名称", "乙方地址", "乙方联系人", "合同金额", "履行期限",
            "签署日期", "生效日期", "履行地点", "付款方式", "违约责任"
        ]
    
    def _get_default_template(self) -> dict:
        """获取默认合同模板"""
        # 尝试加载默认的合同模板配置文件
        default_template_path = Path("config/templates/contract.yaml")
        if default_template_path.exists():
            try:
                with open(default_template_path, 'r', encoding='utf-8') as f:
                    template = yaml.safe_load(f)
                    logger.info(f"✅ 加载默认模板: {template['display_name']}")
                    return template
            except Exception as e:
                logger.error(f"❌ 加载默认模板失败: {e}")
        
        # 如果无法加载配置文件，则使用最小化的默认模板
        logger.warning("⚠️  使用硬编码的默认模板")
        return {
            'document_type': 'contract',
            'display_name': '合同文档',
            'description': '合同类文档的关键信息抽取',
            'fields': [
                {'field_name': '合同名称', 'field_type': 'text', 'required': True},
                {'field_name': '合同编号', 'field_type': 'text', 'required': True},
                {'field_name': '甲方名称', 'field_type': 'text', 'required': True},
                {'field_name': '乙方名称', 'field_type': 'text', 'required': True},
                {'field_name': '合同金额', 'field_type': 'amount', 'required': True},
                {'field_name': '签署日期', 'field_type': 'date', 'required': True}
            ]
        }
    
    def _setup_vllm(self):
        """设置vLLM推理引擎"""
        try:
            from vllm import LLM, SamplingParams
            
            logger.info("🚀 初始化vLLM推理引擎...")
            
            # 检查配置文件
            config_file = Path(self.model_path) / "config.json"
            if not config_file.exists():
                raise FileNotFoundError(f"vLLM需要config.json文件，未在{self.model_path}中找到")
            
            # 准备vLLM初始化参数
            vllm_kwargs = {
                "model": self.model_path,
                "trust_remote_code": True,
                "gpu_memory_utilization": self.config['deployment']['vllm_gpu_memory_utilization'],
                "max_model_len": self.config['deployment']['vllm_max_model_len'],
                "enforce_eager": True,  # 避免图编译问题
                "disable_custom_all_reduce": True,  # 提高稳定性
                "dtype": "float16",  # 使用float16减少内存使用
                "tensor_parallel_size": 1,  # 单GPU
                # 允许本地媒体路径
                "allowed_local_media_path": "/home/ai/test_qwen/contract_extraction"
            }
            
            # 如果提供了适配器路径，则启用LoRA支持
            if self.adapter_path:
                logger.info(f"🔧 启用LoRA适配器支持: {self.adapter_path}")
                
                # 读取适配器配置获取基础模型路径
                adapter_config_path = os.path.join(self.adapter_path, "adapter_config.json")
                if os.path.exists(adapter_config_path):
                    with open(adapter_config_path, 'r', encoding='utf-8') as f:
                        adapter_config = json.load(f)
                    
                    base_model_path = adapter_config.get('base_model_name_or_path')
                    if base_model_path and os.path.exists(base_model_path):
                        logger.info(f"📂 使用基础模型路径: {base_model_path}")
                        vllm_kwargs["model"] = base_model_path
                    
                # 启用LoRA适配器
                vllm_kwargs.update({
                    "enable_lora": True,
                    "max_loras": 1,
                    "max_lora_rank": 64,
                    "lora_extra_vocab_size": 256
                })
            
            # 初始化vLLM模型
            self.vllm_model = LLM(**vllm_kwargs)
            
            # 设置采样参数（优化参数以提高准确性）
            self.sampling_params = SamplingParams(
                temperature=0.05,  # 降低温度以获得更确定的结果
                top_p=self.config['inference']['top_p'],
                max_tokens=self.config['inference']['max_new_tokens'],
                repetition_penalty=1.05,
                stop_token_ids=None
            )
            
            logger.info("✅ vLLM推理引擎初始化成功")
            
        except ImportError:
            raise ImportError("❌ vLLM未安装，请安装: pip install vllm")
        except Exception as e:
            raise RuntimeError(f"❌ vLLM初始化失败: {e}")
    
    def _create_inference_prompt(self) -> str:
        """创建优化的推理用提示词"""
        if 'prompt_template' in self.template:
            # 使用模板中的提示词
            field_list = "\n".join([f"- {field['field_name']}" for field in self.template['fields']])
            json_format = "{\n" + "\n".join([
                f'  "{field["field_name"]}": "具体内容或未找到"{"" if i == len(self.template["fields"]) - 1 else ","}'
                for i, field in enumerate(self.template['fields'])
            ]) + "\n}"
            
            return self.template['prompt_template'].format(
                field_list=field_list,
                json_format=json_format
            )
        else:
            # 使用优化的默认提示词
            prompt = f"""您是一位专业的{self.template['display_name']}审查专家，请仔细分析这份{self.template['display_name']}图像，准确提取以下关键字段信息。

提取要求：
1. 必须从图像中的{self.template['display_name']}准确提取信息
2. 如果某个字段在文档中找不到，请标记为"未找到"
3. 金额需要包含货币符号（如￥、$等）
4. 日期需要使用标准格式（如2024年1月15日或2024-01-15）
5. 公司名称需要完整准确

需要提取的字段：
"""
            for field in self.template['fields']:
                prompt += f"- {field['field_name']}\n"
            
            prompt += """
请严格按照以下JSON格式输出结果，不要添加任何其他内容：
{
"""
            for i, field in enumerate(self.template['fields']):
                comma = "," if i < len(self.template['fields']) - 1 else ""
                prompt += f'  "{field["field_name"]}": "具体内容或未找到"{comma}\n'
            
            prompt += "}"
            
            return prompt
    
    def infer_single_image(self, image_path: str) -> Dict[str, Any]:
        """推理单张图像
        
        Args:
            image_path: 图像路径
            
        Returns:
            推理结果字典
        """
        logger.info(f"🔄 开始推理图像: {image_path}")
        
        if not Path(image_path).exists():
            raise FileNotFoundError(f"图像文件不存在: {image_path}")
        
        start_time = time.time()
        
        try:
            # 构建多模态输入
            prompt = self._create_inference_prompt()
            
            # vLLM多模态推理格式
            messages = [
                {
                    "role": "user", 
                    "content": [
                        {"type": "image_url", "image_url": {"url": f"file://{os.path.abspath(image_path)}"}},
                        {"type": "text", "text": prompt}
                    ]
                }
            ]
            
            logger.info("🚀 执行vLLM推理...")
            
            # 执行推理
            outputs = self.vllm_model.chat(
                messages=messages,
                sampling_params=self.sampling_params,
                use_tqdm=False
            )
            
            if not outputs or len(outputs) == 0:
                raise RuntimeError("vLLM推理未返回结果")
            
            response_text = outputs[0].outputs[0].text.strip()
            
            # 解析结果
            try:
                # 查找JSON部分
                json_start = response_text.find('{')
                json_end = response_text.rfind('}') + 1
                
                if json_start >= 0 and json_end > json_start:
                    json_text = response_text[json_start:json_end]
                    extracted_info = json.loads(json_text)
                    logger.info("✅ 成功解析JSON结果")
                    success = True
                else:
                    logger.warning("⚠️  未找到有效的JSON格式，尝试结构化提取")
                    extracted_info = self._extract_structured_info(response_text)
                    success = True
            except json.JSONDecodeError:
                logger.warning("⚠️  JSON解析失败，尝试结构化提取")
                extracted_info = self._extract_structured_info(response_text)
                success = True
            
        except Exception as e:
            logger.error(f"❌ 推理失败: {e}")
            extracted_info = {field['field_name']: "推理失败" for field in self.template['fields']}
            success = False
        
        end_time = time.time()
        inference_time = end_time - start_time
        
        # 构建结果
        result = {
            "image_path": str(image_path),
            "document_type": self.document_type,
            "timestamp": datetime.now().isoformat(),
            "inference_time_seconds": inference_time,
            "extracted_fields": extracted_info,
            "model_path": self.model_path,
            "adapter_path": self.adapter_path,
            "success": success,
            "inference_engine": "vllm"
        }
        
        logger.info(f"✅ 推理完成，用时: {inference_time:.2f}秒")
        return result
    
    def infer_batch(self, image_paths: List[str]) -> List[Dict[str, Any]]:
        """批量推理（利用vLLM的批处理优势）
        
        Args:
            image_paths: 图像路径列表
            
        Returns:
            推理结果列表
        """
        logger.info(f"🚀 开始批量推理: {len(image_paths)} 张图像")
        
        if not image_paths:
            return []
        
        start_time = time.time()
        
        try:
            # 构建批量输入
            prompt = self._create_inference_prompt()
            batch_messages = []
            
            for image_path in image_paths:
                if not Path(image_path).exists():
                    logger.warning(f"⚠️  图像不存在: {image_path}")
                    continue
                    
                messages = [
                    {
                        "role": "user",
                        "content": [
                            {"type": "image_url", "image_url": {"url": f"file://{os.path.abspath(image_path)}"}},
                            {"type": "text", "text": prompt}
                        ]
                    }
                ]
                batch_messages.append(messages)
            
            logger.info(f"🔄 执行批量推理: {len(batch_messages)} 个有效输入")
            
            # 批量推理
            outputs = self.vllm_model.chat(
                messages=batch_messages,
                sampling_params=self.sampling_params,
                use_tqdm=True
            )
            
            # 处理结果
            results = []
            for i, (image_path, output) in enumerate(zip(image_paths, outputs)):
                try:
                    if output and len(output.outputs) > 0:
                        response_text = output.outputs[0].text.strip()
                        
                        # 解析结果
                        try:
                            # 查找JSON部分
                            json_start = response_text.find('{')
                            json_end = response_text.rfind('}') + 1
                            
                            if json_start >= 0 and json_end > json_start:
                                json_text = response_text[json_start:json_end]
                                extracted_info = json.loads(json_text)
                                success = True
                            else:
                                logger.warning(f"⚠️  未找到有效的JSON格式，尝试结构化提取: {image_path}")
                                extracted_info = self._extract_structured_info(response_text)
                                success = True
                        except json.JSONDecodeError:
                            logger.warning(f"⚠️  JSON解析失败，尝试结构化提取: {image_path}")
                            extracted_info = self._extract_structured_info(response_text)
                            success = True
                    else:
                        extracted_info = {field['field_name']: "推理失败" for field in self.template['fields']}
                        success = False
                        
                except Exception as e:
                    logger.error(f"❌ 处理结果失败 {image_path}: {e}")
                    extracted_info = {field['field_name']: "处理失败" for field in self.template['fields']}
                    success = False
                
                # 构建单个结果
                result = {
                    "image_path": str(image_path),
                    "document_type": self.document_type,
                    "timestamp": datetime.now().isoformat(),
                    "extracted_fields": extracted_info,
                    "model_path": self.model_path,
                    "adapter_path": self.adapter_path,
                    "success": success,
                    "inference_engine": "vllm"
                }
                results.append(result)
        
        except Exception as e:
            logger.error(f"❌ 批量推理失败: {e}")
            # 返回失败结果
            results = []
            for image_path in image_paths:
                results.append({
                    "image_path": str(image_path),
                    "document_type": self.document_type,
                    "timestamp": datetime.now().isoformat(),
                    "extracted_fields": {field['field_name']: "批量推理失败" for field in self.template['fields']},
                    "model_path": self.model_path,
                    "adapter_path": self.adapter_path,
                    "success": False,
                    "inference_engine": "vllm",
                    "error": str(e)
                })
        
        end_time = time.time()
        total_time = end_time - start_time
        
        success_count = sum(1 for r in results if r.get('success', False))
        logger.info(f"✅ 批量推理完成: 成功 {success_count}/{len(results)} 个，用时: {total_time:.2f}秒")
        
        return results
    
    def _extract_structured_info(self, text: str) -> Dict[str, str]:
        """从文本中提取结构化信息"""
        result = {}
        
        for field in self.document_fields:
            # 简单的文本匹配
            if field in text:
                import re
                pattern = rf"{field}[：:]\s*([^\n，,。.]+)"
                match = re.search(pattern, text)
                if match:
                    result[field] = match.group(1).strip()
                else:
                    result[field] = "未找到"
            else:
                result[field] = "未找到"
        
        return result
    
    def print_result(self, result: Dict[str, Any]):
        """格式化打印推理结果"""
        print("\n" + "="*60)
        print(f"📄 图像: {Path(result['image_path']).name}")
        print(f"⏱️  推理时间: {result.get('inference_time_seconds', 0):.2f}秒")
        print(f"🚀 推理引擎: {result.get('inference_engine', 'unknown')}")
        print(f"📅 时间戳: {result['timestamp']}")
        if result.get('adapter_path'):
            print(f"🔧 适配器路径: {result['adapter_path']}")
        print("="*60)
        
        print("\n📋 提取的字段:")
        for field, value in result['extracted_fields'].items():
            status = "✓" if value != "未找到" else "✗"
            print(f"  {status} {field}: {value}")
        
        print("\n" + "="*60)


def main():
    """主函数"""
    parser = argparse.ArgumentParser(description="vLLM高性能文档信息抽取推理")
    parser.add_argument('--model', required=True, help='微调后的模型路径')
    parser.add_argument('--image', help='单张图像路径')
    parser.add_argument('--image_dir', help='图像目录（批量推理）')
    parser.add_argument('--output', help='输出文件路径')
    parser.add_argument('--config', default='config/config.yaml', help='配置文件路径')
    parser.add_argument('--document_type', default='contract', 
                       choices=['contract', 'invoice', 'resume', 'medical_report', 'id_card'],
                       help='文档类型')
    parser.add_argument('--gpu_id', type=int, default=None, help='指定GPU ID')
    parser.add_argument('--adapter', help='LoRA适配器路径')
    
    args = parser.parse_args()
    
    try:
        # 初始化推理器
        inferencer = VLLMDocumentInferencer(
            model_path=args.model,
            config_path=args.config,
            document_type=args.document_type,
            gpu_id=args.gpu_id,
            adapter_path=args.adapter
        )
        
        results = []
        
        if args.image:
            # 单张图像推理
            result = inferencer.infer_single_image(args.image)
            inferencer.print_result(result)
            results = [result]
            
        elif args.image_dir:
            # 批量推理
            image_dir = Path(args.image_dir)
            if not image_dir.exists():
                logger.error(f"❌ 图像目录不存在: {args.image_dir}")
                sys.exit(1)
            
            # 查找所有图像文件
            image_extensions = {'.jpg', '.jpeg', '.png', '.bmp', '.tiff'}
            image_files = []
            
            for ext in image_extensions:
                image_files.extend(image_dir.glob(f'*{ext}'))
                image_files.extend(image_dir.glob(f'*{ext.upper()}'))
            
            if not image_files:
                logger.error(f"❌ 在目录中未找到图像文件: {args.image_dir}")
                sys.exit(1)
            
            logger.info(f"📄 找到 {len(image_files)} 个图像文件")
            
            # 批量推理
            image_paths = [str(f) for f in image_files]
            results = inferencer.infer_batch(image_paths)
            
            # 打印结果摘要
            success_count = sum(1 for r in results if r.get('success', False))
            print(f"\n📊 批量推理完成: 成功 {success_count}/{len(results)} 个文件")
            
        else:
            logger.error("❌ 请指定 --image 或 --image_dir 参数")
            sys.exit(1)
        
        # 保存结果
        if args.output and results:
            output_path = Path(args.output)
            output_path.parent.mkdir(parents=True, exist_ok=True)
            with open(args.output, 'w', encoding='utf-8') as f:
                json.dump(results, f, ensure_ascii=False, indent=2)
            logger.info(f"💾 结果已保存到: {args.output}")
        
    except KeyboardInterrupt:
        logger.info("⏹️  用户中断推理")
        sys.exit(1)
    except Exception as e:
        logger.error(f"❌ 推理失败: {e}")
        import traceback
        traceback.print_exc()
        sys.exit(1)


if __name__ == "__main__":
    main()