from typing import List, Dict, Any, Optional
import torch
from transformers import pipeline, AutoTokenizer, AutoModelForCausalLM
from langchain.llms.base import LLM
from app.config import settings
from app.utils.logger import setup_logger
import logging

logger = logging.getLogger(__name__)

class TinyLlamaModel:
    _instance = None
    
    def __new__(cls):
        if cls._instance is None:
            cls._instance = super().__new__(cls)
            cls._instance._initialized = False
        return cls._instance
    
    def __init__(self):
        if self._initialized:
            return
            
        self._pipe = None
        self._tokenizer = None
        self._initialized = True
        # 在初始化时就加载模型
        self.load_model()
        
    def load_model(self) -> None:
        """加载模型和分词器"""
        try:
            model_path = settings.model_path
            logger.info(f"开始加载模型，使用路径: {model_path}")
            
            # 加载分词器
            logger.info("加载分词器...")
            self._tokenizer = AutoTokenizer.from_pretrained(
                str(model_path),
                trust_remote_code=True
            )
            logger.info("分词器加载成功")
            
            # 设置数据类型
            if settings.USE_MPS and torch.backends.mps.is_available():
                logger.info("使用 MPS 后端 (Mac GPU)")
                dtype = torch.float16  # MPS 不支持 bfloat16
            else:
                logger.info("使用 CPU 后端")
                dtype = torch.float32
            
            # 创建 pipeline，使用简化的参数
            logger.info("加载模型...")
            self._pipe = pipeline(
                "text-generation",
                model=str(model_path),
                torch_dtype=dtype,
                device_map="auto"  # 让 accelerate 自动处理设备分配
            )
            
            logger.info("模型加载完成")
            
        except Exception as e:
            logger.exception("模型加载过程中发生错误")
            raise RuntimeError(f"模型加载失败: {str(e)}")
    
    def generate(self, prompt: str) -> str:
        """生成回复"""
        try:
            # 使用聊天模板格式化消息
            messages = [
                {
                    "role": "user",
                    "content": prompt
                }
            ]
            formatted_prompt = self._tokenizer.apply_chat_template(
                messages, 
                tokenize=False, 
                add_generation_prompt=True
            )
            
            outputs = self._pipe(
                formatted_prompt,
                max_new_tokens=settings.MAX_NEW_TOKENS,
                do_sample=True,
                temperature=settings.TEMPERATURE,
                top_k=settings.TOP_K,
                top_p=settings.TOP_P
            )
            
            return outputs[0]['generated_text']
            
        except Exception as e:
            logger.error(f"生成过程中发生错误: {e}")
            raise
    
    def load_model_cpu(self) -> None:
        """回退到 CPU 加载"""
        try:
            logger.info("使用 CPU 加载模型...")
            self._pipe = pipeline(
                "text-generation",
                model=settings.MODEL_CACHE_DIR,
                tokenizer=self._tokenizer,
                torch_dtype=torch.float32,
                device_map="cpu",
                trust_remote_code=True,
                model_kwargs={
                    "torch_dtype": torch.float32,
                    "low_cpu_mem_usage": True
                }
            )
            logger.info("模型成功加载到 CPU")
        except Exception as e:
            raise RuntimeError(f"CPU 加载也失败: {str(e)}")
    
    def generate_chat_response(
        self,
        messages: List[Dict[str, str]],
        system_prompt: str = None
    ) -> str:
        """生成对话回复"""
        if self._pipe is None:
            self.load_model()
            
        if system_prompt:
            messages = [
                {"role": "system", "content": system_prompt}
            ] + messages
            
        try:
            prompt = self._tokenizer.apply_chat_template(
                messages,
                tokenize=False,
                add_generation_prompt=True
            )
            
            outputs = self._pipe(
                prompt,
                max_new_tokens=settings.MAX_NEW_TOKENS,
                do_sample=settings.DO_SAMPLE,
                temperature=settings.TEMPERATURE,
                top_k=settings.TOP_K,
                top_p=settings.TOP_P,
                repetition_penalty=settings.REPETITION_PENALTY
            )
            
            return outputs[0]["generated_text"]
            
        except Exception as e:
            raise RuntimeError(f"生成失败: {str(e)}")

class TinyLlamaLLM(LLM):
    """LangChain兼容的LLM实现"""
    
    def __init__(self, model: TinyLlamaModel):
        super().__init__()
        self.model = model
        
    @property
    def _llm_type(self) -> str:
        return "tiny-llama"
        
    def _call(self, prompt: str, stop: Optional[List[str]] = None) -> str:
        messages = [{"role": "user", "content": prompt}]
        return self.model.generate_chat_response(messages)
        
    @property
    def _identifying_params(self) -> Dict[str, Any]:
        return {"model_name": settings.MODEL_NAME} 