from langchain_core.messages import SystemMessage, HumanMessage
from langchain_core.runnables import RunnableConfig

from owl_ai.domain.ai_model_entity import LLMModel
from owl_ai.domain.graph.nodes.graph_node import BaseGraphNode
from owl_ai.service.llm_model_service import LLMModelService


class ChatLLMNode(BaseGraphNode):
    """
    LLM对话节点，提供与LLM的交互
    """

    @classmethod
    def init_llm(cls, llm_config: LLMModel):
        """
        LLM初始化
        Args:
            llm_config:
        Returns:
        """
        api_type = llm_config.api_type

        api_host = llm_config.api_host
        model_name = llm_config.model_name
        if api_type == 'ollama':
            from langchain_ollama import ChatOllama
            return ChatOllama(
                base_url=api_host,
                model=model_name
            )
        elif api_type == 'openai':
            from langchain_openai import ChatOpenAI

            api_key = llm_config.api_key
            return ChatOpenAI(
                base_url=api_host,
                model_name=model_name,
                openai_key=api_key
            )

    def __init__(self, node_config: dict):
        """
        初始化
        Args:
            node_config: 节点配置
        """
        super().__init__(node_config)
        self.node_id = node_config.get("id")
        self.inputs = node_config.get("inputs")
        self.outputs = node_config.get("outputs")

        llm_config = node_config.get("llm")
        # 模型id，使用此id去查询模型配置
        model_id = llm_config.get("model_id")
        model_config = LLMModelService.find_llm_model_by_id(model_id)
        self.chat_llm = self.init_llm(model_config)
        self.system_prompt = llm_config.get("system_prompt")
        self.user_prompt = llm_config.get("user_prompt")
        self.include_history_messages = True
        # 如果支持视觉，则会将图片上传至大模型，需要LLM支持视觉模态
        self.vision_support = True
        self.audio_support = False

    def __call__(self, state, config: RunnableConfig):
        input_params = self.input_params_assem(state, self.inputs)

        llm_messages = []

        if self.system_prompt:
            llm_messages.append(SystemMessage(self.system_prompt.format(**input_params)))

        if self.include_history_messages:
            node_params = state.get("node_params")
            history_messages = node_params.get('__start__').get("messages")
            llm_messages.extend(history_messages)

        """
        if self.user_prompt:
            user_messages = [{
                'type': 'text',
                'text': self.user_prompt.format(**input_params)
            }]

            # 支持视觉，则将上传文件中的图片组装到提示词中
            if self.vision_support:
                files = input_params.get("files")
                # 需要先进行转换，这里使用的直接就是转换后的数据了。所以如果要LLM节点支持文件，需要在开始增加一个文档抽取节点
                if files:
                    for file in files:
                        file_type = file.get("type")
                        suffix = file.get('suffix')
                        file_data = file.get('data')
                        if file_type == 'image':
                            user_messages.append({
                                "type": "image_url",
                                "image_url": {
                                    "url": f"data:image/{suffix};base64,{file_data}",
                                }
                            })
            user_messages = input_params.get("userCommand")
            llm_messages.append(HumanMessage(user_messages))
            """

        llm_ret = self.chat_llm.invoke(input=llm_messages)

        return {
            "node_params": {
                self.node_id: {
                    "out": llm_ret
                }
            }
        }
