"""
LLM管理器

这个模块负责管理各种大语言模型的初始化、配置和调用。
支持多种LLM提供商，包括OpenAI、Anthropic等。
"""

import logging
from typing import Optional, Dict, Any, List, Iterator
from langchain_core.language_models.base import BaseLanguageModel
from langchain_core.language_models.llms import LLM
from langchain_core.language_models.chat_models import BaseChatModel
from langchain_openai import ChatOpenAI, OpenAI
from langchain_core.messages import BaseMessage, HumanMessage, AIMessage, SystemMessage
from langchain_core.callbacks import CallbackManagerForLLMRun
from config.settings import settings

logger = logging.getLogger(__name__)

class LLMManager:
    """
    LLM管理器类
    
    负责管理和提供对各种大语言模型的统一访问接口。
    支持模型切换、参数调整、流式输出等功能。
    """
    
    def __init__(self):
        """初始化LLM管理器"""
        self.current_model: Optional[BaseLanguageModel] = None
        self.available_models: Dict[str, Dict[str, Any]] = {}
        self._initialize_models()
    
    def _initialize_models(self):
        """初始化可用的模型"""
        logger.info("正在初始化LLM模型...")
        
        # 初始化OpenAI兼容模型（硅基流动）
        if settings.OPENAI_API_KEY:
            self.available_models.update({
                "Qwen/Qwen2.5-7B-Instruct": {
                    "class": ChatOpenAI,
                    "params": {
                        "model": "Qwen/Qwen2.5-7B-Instruct",
                        "api_key": settings.OPENAI_API_KEY,
                        "base_url": settings.OPENAI_API_BASE,
                        "temperature": settings.DEFAULT_TEMPERATURE,
                        "max_tokens": settings.DEFAULT_MAX_TOKENS,
                        "streaming": settings.ENABLE_STREAMING
                    },
                    "description": "通义千问2.5-7B - 阿里巴巴开源的中文大模型"
                },
                "Qwen/Qwen2.5-14B-Instruct": {
                    "class": ChatOpenAI,
                    "params": {
                        "model": "Qwen/Qwen2.5-14B-Instruct",
                        "api_key": settings.OPENAI_API_KEY,
                        "base_url": settings.OPENAI_API_BASE,
                        "temperature": settings.DEFAULT_TEMPERATURE,
                        "max_tokens": settings.DEFAULT_MAX_TOKENS,
                        "streaming": settings.ENABLE_STREAMING
                    },
                    "description": "通义千问2.5-14B - 更强大的中文大模型"
                },
                "Qwen/Qwen2.5-32B-Instruct": {
                    "class": ChatOpenAI,
                    "params": {
                        "model": "Qwen/Qwen2.5-32B-Instruct",
                        "api_key": settings.OPENAI_API_KEY,
                        "base_url": settings.OPENAI_API_BASE,
                        "temperature": settings.DEFAULT_TEMPERATURE,
                        "max_tokens": settings.DEFAULT_MAX_TOKENS,
                        "streaming": settings.ENABLE_STREAMING
                    },
                    "description": "通义千问2.5-32B - 高性能中文大模型"
                },
                "Qwen/QwQ-32B": {
                    "class": ChatOpenAI,
                    "params": {
                        "model": "Qwen/QwQ-32B",
                        "api_key": settings.OPENAI_API_KEY,
                        "base_url": settings.OPENAI_API_BASE,
                        "temperature": settings.DEFAULT_TEMPERATURE,
                        "max_tokens": settings.DEFAULT_MAX_TOKENS,
                        "streaming": settings.ENABLE_STREAMING
                    },
                    "description": "通义千问QwQ-32B - 专注推理的大模型"
                },
                "deepseek-ai/DeepSeek-V2.5": {
                    "class": ChatOpenAI,
                    "params": {
                        "model": "deepseek-ai/DeepSeek-V2.5",
                        "api_key": settings.OPENAI_API_KEY,
                        "base_url": settings.OPENAI_API_BASE,
                        "temperature": settings.DEFAULT_TEMPERATURE,
                        "max_tokens": settings.DEFAULT_MAX_TOKENS,
                        "streaming": settings.ENABLE_STREAMING
                    },
                    "description": "DeepSeek-V2.5 - 深度求索的高性能模型"
                },
                "meta-llama/Llama-3.1-8B-Instruct": {
                    "class": ChatOpenAI,
                    "params": {
                        "model": "meta-llama/Llama-3.1-8B-Instruct",
                        "api_key": settings.OPENAI_API_KEY,
                        "base_url": settings.OPENAI_API_BASE,
                        "temperature": settings.DEFAULT_TEMPERATURE,
                        "max_tokens": settings.DEFAULT_MAX_TOKENS,
                        "streaming": settings.ENABLE_STREAMING
                    },
                    "description": "Llama-3.1-8B - Meta的开源大模型"
                }
            })
            logger.info(f"已加载 {len(self.available_models)} 个硅基流动模型")
        else:
            logger.warning("未找到API密钥，跳过模型初始化")
        
        # 设置默认模型
        if self.available_models:
            default_model_name = settings.DEFAULT_LLM_MODEL
            if default_model_name in self.available_models:
                self.set_model(default_model_name)
            else:
                # 使用第一个可用模型作为默认模型
                first_model = list(self.available_models.keys())[0]
                self.set_model(first_model)
                logger.info(f"默认模型 {default_model_name} 不可用，使用 {first_model}")
    
    def set_model(self, model_name: str, **kwargs) -> bool:
        """
        设置当前使用的模型
        
        Args:
            model_name (str): 模型名称
            **kwargs: 额外的模型参数
            
        Returns:
            bool: 是否设置成功
        """
        if model_name not in self.available_models:
            logger.error(f"模型 {model_name} 不可用")
            return False
        
        try:
            model_config = self.available_models[model_name].copy()
            
            # 合并额外参数
            if kwargs:
                model_config["params"].update(kwargs)
            
            # 创建模型实例
            model_class = model_config["class"]
            self.current_model = model_class(**model_config["params"])
            
            logger.info(f"已切换到模型: {model_name}")
            return True
            
        except Exception as e:
            logger.error(f"设置模型 {model_name} 失败: {str(e)}")
            return False
    
    def get_current_model(self) -> Optional[BaseLanguageModel]:
        """
        获取当前模型实例
        
        Returns:
            Optional[BaseLanguageModel]: 当前模型实例
        """
        return self.current_model
    
    def list_available_models(self) -> Dict[str, str]:
        """
        列出所有可用的模型
        
        Returns:
            Dict[str, str]: 模型名称和描述的字典
        """
        return {
            name: config["description"] 
            for name, config in self.available_models.items()
        }
    
    def invoke(self, messages: List[BaseMessage], **kwargs) -> str:
        """
        调用当前模型生成响应
        
        Args:
            messages (List[BaseMessage]): 消息列表
            **kwargs: 额外参数
            
        Returns:
            str: 模型响应
        """
        if not self.current_model:
            raise ValueError("没有设置当前模型")
        
        try:
            if isinstance(self.current_model, BaseChatModel):
                response = self.current_model.invoke(messages, **kwargs)
                return response.content
            else:
                # 对于非聊天模型，将消息转换为字符串
                prompt = self._messages_to_prompt(messages)
                response = self.current_model.invoke(prompt, **kwargs)
                return response
                
        except Exception as e:
            logger.error(f"模型调用失败: {str(e)}")
            raise
    
    def stream(self, messages: List[BaseMessage], **kwargs) -> Iterator[str]:
        """
        流式调用当前模型
        
        Args:
            messages (List[BaseMessage]): 消息列表
            **kwargs: 额外参数
            
        Yields:
            str: 流式响应片段
        """
        if not self.current_model:
            raise ValueError("没有设置当前模型")
        
        try:
            if isinstance(self.current_model, BaseChatModel):
                for chunk in self.current_model.stream(messages, **kwargs):
                    if hasattr(chunk, 'content') and chunk.content:
                        yield chunk.content
            else:
                prompt = self._messages_to_prompt(messages)
                for chunk in self.current_model.stream(prompt, **kwargs):
                    yield chunk
                    
        except Exception as e:
            logger.error(f"流式调用失败: {str(e)}")
            raise
    
    def _messages_to_prompt(self, messages: List[BaseMessage]) -> str:
        """
        将消息列表转换为提示字符串
        
        Args:
            messages (List[BaseMessage]): 消息列表
            
        Returns:
            str: 提示字符串
        """
        prompt_parts = []
        
        for message in messages:
            if isinstance(message, SystemMessage):
                prompt_parts.append(f"System: {message.content}")
            elif isinstance(message, HumanMessage):
                prompt_parts.append(f"Human: {message.content}")
            elif isinstance(message, AIMessage):
                prompt_parts.append(f"Assistant: {message.content}")
            else:
                prompt_parts.append(f"{message.__class__.__name__}: {message.content}")
        
        return "\n".join(prompt_parts)
    
    def update_model_params(self, **kwargs) -> bool:
        """
        更新当前模型的参数
        
        Args:
            **kwargs: 要更新的参数
            
        Returns:
            bool: 是否更新成功
        """
        if not self.current_model:
            logger.error("没有设置当前模型")
            return False
        
        try:
            # 获取当前模型名称
            current_model_name = None
            for name, config in self.available_models.items():
                if isinstance(self.current_model, config["class"]):
                    current_model_name = name
                    break
            
            if current_model_name:
                return self.set_model(current_model_name, **kwargs)
            else:
                logger.error("无法确定当前模型名称")
                return False
                
        except Exception as e:
            logger.error(f"更新模型参数失败: {str(e)}")
            return False
    
    def get_model_info(self) -> Dict[str, Any]:
        """
        获取当前模型的信息
        
        Returns:
            Dict[str, Any]: 模型信息
        """
        if not self.current_model:
            return {"error": "没有设置当前模型"}
        
        # 查找当前模型的配置
        for name, config in self.available_models.items():
            if isinstance(self.current_model, config["class"]):
                return {
                    "name": name,
                    "description": config["description"],
                    "class": config["class"].__name__,
                    "params": config["params"]
                }
        
        return {"error": "无法获取模型信息"}

# 创建全局LLM管理器实例
llm_manager = LLMManager() 