"""
模型加载器，负责加载和初始化大语言模型
支持Hugging Face模型和GGUF格式模型
"""
import os
import torch
import logging
from transformers import AutoModelForCausalLM, AutoTokenizer, AutoConfig
from ctransformers import AutoModelForCausalLM as CTAutoModelForCausalLM
from llama_cpp import Llama
from ..config import MODEL_PATH, MODEL_DEVICE, MODEL_PRECISION

# 配置日志
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)

class ModelLoader:
    """大模型加载器类"""
    
    def __init__(self):
        self.model = None
        self.tokenizer = None
        self.device = MODEL_DEVICE
        self.model_path = MODEL_PATH
        self.precision = MODEL_PRECISION
        self.model_type = self._detect_model_type()
        
    def _detect_model_type(self):
        """检测模型类型"""
        if self.model_path.endswith('.gguf'):
            return "gguf"
        else:
            return "huggingface"
        
    def load_model(self):
        """加载模型和分词器"""
        logger.info(f"正在加载模型: {self.model_path}")
        logger.info(f"模型类型: {self.model_type}")
        logger.info(f"使用设备: {self.device}")
        logger.info(f"使用精度: {self.precision}")
        
        # 检查ROCm是否可用
        if self.device == "cuda" and not torch.cuda.is_available():
            logger.warning("警告: CUDA/ROCm不可用，将使用CPU")
            self.device = "cpu"
            
        # 根据模型类型加载不同的模型
        if self.model_type == "gguf":
            return self._load_gguf_model()
        else:
            return self._load_huggingface_model()
    
    def _load_huggingface_model(self):
        """加载Hugging Face模型"""
        # 加载配置
        config = AutoConfig.from_pretrained(self.model_path)
        
        # 加载分词器
        self.tokenizer = AutoTokenizer.from_pretrained(self.model_path)
        
        # 根据精度加载模型
        if self.precision == "fp16" and self.device != "cpu":
            self.model = AutoModelForCausalLM.from_pretrained(
                self.model_path,
                config=config,
                torch_dtype=torch.float16,
                device_map=self.device
            )
        else:
            self.model = AutoModelForCausalLM.from_pretrained(
                self.model_path,
                config=config,
                device_map=self.device
            )
            
        logger.info("Hugging Face模型加载完成")
        return self.model, self.tokenizer
    
    def _load_gguf_model(self):
        """加载GGUF模型"""
        try:
            # 尝试使用llama-cpp-python加载
            gpu_layers = -1 if self.device == "cuda" else 0
            
            # 使用llama-cpp-python加载GGUF模型
            self.model = Llama(
                model_path=self.model_path,
                n_gpu_layers=gpu_layers,
                n_ctx=2048,  # 上下文窗口大小
                verbose=False
            )
            
            # 创建一个简单的分词器包装器，使其接口与HF分词器兼容
            class LlamaTokenizerWrapper:
                def __init__(self, llama_model):
                    self.llama_model = llama_model
                
                def __call__(self, text, return_tensors=None, **kwargs):
                    if isinstance(text, list):
                        return {"input_ids": [self.llama_model.tokenize(t) for t in text]}
                    return {"input_ids": [self.llama_model.tokenize(text)]}
                
                def decode(self, token_ids, skip_special_tokens=True):
                    if hasattr(token_ids, "tolist"):
                        token_ids = token_ids.tolist()
                    return self.llama_model.detokenize(token_ids)
            
            self.tokenizer = LlamaTokenizerWrapper(self.model)
            logger.info("GGUF模型通过llama-cpp-python加载完成")
            
        except Exception as e:
            logger.warning(f"使用llama-cpp-python加载失败: {str(e)}，尝试使用ctransformers加载")
            
            # 使用ctransformers加载GGUF模型
            gpu_layers = "auto" if self.device == "cuda" else 0
            self.model = CTAutoModelForCausalLM.from_pretrained(
                self.model_path,
                model_type="llama",  # 可以根据实际模型类型调整
                gpu_layers=gpu_layers,
                hf=True  # 启用Hugging Face兼容模式
            )
            
            # 使用ctransformers自带的分词器
            self.tokenizer = self.model.tokenizer
            logger.info("GGUF模型通过ctransformers加载完成")
            
        return self.model, self.tokenizer
        
    def get_model_and_tokenizer(self):
        """获取模型和分词器，如果未加载则先加载"""
        if self.model is None or self.tokenizer is None:
            return self.load_model()
        return self.model, self.tokenizer
