"""
PanGu-Alpha LangChain LLM 包装器
"""
from typing import Optional, List, Dict, Any

from langchain_core.language_models.llms import LLM
from langchain_core.callbacks.manager import CallbackManagerForLLMRun

try:
    from transformers import AutoTokenizer, AutoModelForCausalLM
    import torch
    import sys
    import os
except ImportError:
    print("警告: 某些依赖未安装，请运行 pip install -r requirements.txt")
    AutoTokenizer = None
    AutoModelForCausalLM = None


class PanguLLM(LLM):
    """PanGu-Alpha 模型的 LangChain 包装器"""
    
    model_path: str = "/home/mcd/pangu_LLM/model"
    model_code_path: str = "/home/mcd/pangu_LLM/my_pangu"
    model: Optional[Any] = None
    tokenizer: Optional[Any] = None
    temperature: float = 0.7
    max_tokens: int = 512
    top_p: float = 1.0
    top_k: int = 50
    
    def __init__(self, model_path: Optional[str] = None, model_code_path: Optional[str] = None, **kwargs):
        """初始化 PanGu LLM
        
        Args:
            model_path: 模型权重路径，默认 /home/mcd/pangu_LLM/model
            model_code_path: 模型代码路径，默认 /home/mcd/pangu_LLM/my_pangu
        """
        if model_path:
            self.model_path = model_path
        if model_code_path:
            self.model_code_path = model_code_path
        super().__init__(**kwargs)
        self._load_model()
    
    def _load_model(self):
        """加载模型和分词器"""
        if AutoTokenizer is None:
            raise ImportError(
                "无法导入 transformers。请安装依赖: "
                "pip install transformers torch"
            )
        
        print(f"正在加载盘古模型: {self.model_path}")
        print(f"模型代码路径: {self.model_code_path}")
        
        # 检查模型代码路径是否存在
        if not os.path.exists(self.model_code_path):
            raise FileNotFoundError(
                f"模型代码路径不存在: {self.model_code_path}\n"
                f"请确保 my_pangu 目录存在并包含模型实现代码"
            )
        
        # 将模型代码路径的父目录添加到 sys.path，以便从 my_pangu 包导入
        parent_dir = os.path.dirname(self.model_code_path)
        if parent_dir not in sys.path:
            sys.path.insert(0, parent_dir)
        
        # 从 my_pangu 包导入模型类（参考 chat.py 的方式）
        try:
            from my_pangu.modeling_gptpangu import GPTPanguForCausalLM
            from my_pangu.configuration_gptpangu import GPTPanguConfig
            from my_pangu.tokenization_gptpangu import GPTPanguTokenizer
            print("模型类已成功导入")
        except ImportError as e:
            raise ImportError(
                f"无法导入模型类。请确保 {parent_dir} 目录下有 my_pangu 包，"
                f"且包含: configuration_gptpangu.py, tokenization_gptpangu.py, modeling_gptpangu.py"
            ) from e
        
        try:
            # 检查设备
            device = "cuda" if torch.cuda.is_available() else "cpu"
            print(f"使用设备: {device}")
            
            # 检查 torch 版本（处理 CVE-2025-32434）
            torch_version = torch.__version__.split('.')
            major, minor = int(torch_version[0]), int(torch_version[1])
            if major < 2 or (major == 2 and minor < 6):
                print("警告: PyTorch 版本较低，可能需要升级到 2.6+ 才能加载旧格式模型文件")
                print(f"当前版本: {torch.__version__}")
                print("建议运行: pip install torch>=2.6.0")
            
            # 先加载配置（参考 chat.py）
            config = GPTPanguConfig.from_pretrained(self.model_path)
            
            # 手动初始化分词器（参考 chat.py 的方式）
            vocab_file = os.path.join(self.model_path, "vocab.model")
            if not os.path.exists(vocab_file):
                raise FileNotFoundError(f"词汇表文件不存在: {vocab_file}")
            
            self.tokenizer = GPTPanguTokenizer(model_file=vocab_file)
            
            # 检查是否有 safetensors 格式的模型文件
            model_file_pytorch = os.path.join(self.model_path, "pytorch_model.bin")
            use_safetensors = False  # 默认不使用 safetensors
            
            # 加载模型（参考 chat.py 的方式）
            torch_dtype = torch.float16 if device == "cuda" else torch.float32
            
            # 尝试加载模型，处理 torch.load 安全限制
            try:
                self.model = GPTPanguForCausalLM.from_pretrained(
                    self.model_path,
                    config=config,
                    trust_remote_code=True,
                    local_files_only=True,
                    torch_dtype=torch_dtype,
                    low_cpu_mem_usage=True,
                    use_safetensors=use_safetensors
                )
            except RuntimeError as e:
                if "torch.load" in str(e) or "CVE-2025-32434" in str(e):
                    raise RuntimeError(
                        f"模型加载失败：PyTorch 版本过低。\n"
                        f"当前版本: {torch.__version__}\n"
                        f"请升级 PyTorch 到 2.6 或更高版本：\n"
                        f"  pip install --upgrade torch>=2.6.0\n"
                        f"或者如果使用 CUDA：\n"
                        f"  pip install --upgrade torch>=2.6.0 torchvision torchaudio --index-url https://download.pytorch.org/whl/cu118"
                    ) from e
                else:
                    raise
            
            # 将模型移动到设备
            self.model = self.model.to(device)
            
            # 设置为评估模式
            self.model.eval()
            
            print("盘古模型加载完成!")
            
        except Exception as e:
            raise RuntimeError(f"模型加载失败: {str(e)}") from e
    
    @property
    def _llm_type(self) -> str:
        """返回 LLM 类型"""
        return "pangu_alpha"
    
    def _call(
        self,
        prompt: str,
        stop: Optional[List[str]] = None,
        run_manager: Optional[CallbackManagerForLLMRun] = None,
        **kwargs: Any,
    ) -> str:
        """调用模型进行文本生成（参考 chat.py 的方式）"""
        
        try:
            device = next(self.model.parameters()).device
            
            # 处理输入（参考 chat.py）
            if not prompt.endswith("？") and not prompt.endswith("?"):
                prompt = prompt + "？"
            
            # 编码输入（参考 chat.py 的方式）
            inputs = self.tokenizer(prompt, return_tensors="pt")
            inputs = {k: v.to(device) for k, v in inputs.items()}
            
            # 生成输出（参考 chat.py 的方式）
            with torch.no_grad():
                if device.type == "cuda":
                    with torch.cuda.amp.autocast():
                        outputs = self.model.generate(
                            inputs['input_ids'],
                            attention_mask=inputs['attention_mask'] if 'attention_mask' in inputs else None,
                            max_new_tokens=self.max_tokens,
                            temperature=self.temperature if self.temperature > 0 else 1.0,
                            top_p=self.top_p,
                            top_k=self.top_k if self.top_k > 0 else 50,
                            do_sample=self.temperature > 0,
                            pad_token_id=9,  # 参考 chat.py
                            eos_token_id=9,
                            repetition_penalty=1.1,
                            use_cache=True,
                        )
                else:
                    outputs = self.model.generate(
                        inputs['input_ids'],
                        attention_mask=inputs['attention_mask'] if 'attention_mask' in inputs else None,
                        max_new_tokens=self.max_tokens,
                        temperature=self.temperature if self.temperature > 0 else 1.0,
                        top_p=self.top_p,
                        top_k=self.top_k if self.top_k > 0 else 50,
                        do_sample=self.temperature > 0,
                        pad_token_id=9,
                        eos_token_id=9,
                        repetition_penalty=1.1,
                        use_cache=True,
                    )
            
            # 解码输出（参考 chat.py 的方式）
            response = self.tokenizer.decode(outputs[0], skip_special_tokens=True)
            
            # 后处理：移除输入文本，只保留生成的部分（参考 chat.py）
            if prompt in response:
                response = response.replace(prompt, "").strip()
            
            # 清理特殊标记（参考 chat.py）
            response = response.replace("<eot>", "").replace("<pad>", "").strip()
            response = response.replace("？？", "？").replace("。。", "。")
            
            # 处理停止词
            if stop:
                for stop_word in stop:
                    if stop_word in response:
                        response = response.split(stop_word)[0]
            
            return response.strip()
            
        except Exception as e:
            raise RuntimeError(f"生成失败: {str(e)}") from e

