"""
LLM接口，负责与大语言模型通信
"""

from typing import Dict

from langchain_deepseek import ChatDeepSeek
from langchain_qwq import ChatQwQ
from langgraph.graph import MessagesState

from app.config import config
from app.logger import logger
from app.schema import LLMLocalName

REASONING_MODELS = [config.get("llm_aliyuncs", "model"), config.get("llm_deepseek", "model")]
MULTIMODAL_MODELS = []


# 封装了与LLM API的交互
class LLM:
    _instances: Dict[str, "LLM"] = {}

    def __new__(cls, name: str = LLMLocalName.LLM_ALIYUNCS):
        if name not in cls._instances:
            instance = super().__new__(cls)
            instance.__init__(name)
            cls._instances[name] = instance
        return cls._instances[name]

    def __init__(self, name: str = LLMLocalName.LLM_ALIYUNCS):
        if hasattr(self, "client"):  # 防止重复初始化
            return

        llm_config = config.get_section(name)
        for attr in ['model', 'max_tokens', 'temperature', 'api_type', 'api_key', 'api_version', 'base_url', 'timeout']:
            if attr in llm_config:
                setattr(self, attr, llm_config[attr])
        self.support_img = self.model in MULTIMODAL_MODELS

        params = {
            "model": self.model,
            "max_tokens": self.max_tokens,
            "temperature": self.temperature,
            "api_key": self.api_key,
            "timeout": float(self.timeout),
            "max_retries": 3,
        }

        try:
            if name == LLMLocalName.LLM_DEEPSEEK:
                self.client = ChatDeepSeek(**params)
            else:
                self.client = ChatQwQ(**params, api_base=self.base_url)
        except Exception as e:
            logger.exception(e)
            raise

    def ask(self, state: MessagesState) -> str:
        """
        Send a prompt to the LLM and get the response.

        Args:
            state (MessagesState): 对话消息序列，含system、user、assistant等全部消息
        """
        messages = state["messages"]
        response = self.client.invoke(messages)
        return {"messages": response}
