from langchain_core.messages import SystemMessage, HumanMessage
from langchain_core.runnables import RunnableConfig
from langgraph.graph import add_messages, StateGraph
from typing import Annotated, Dict, Any, TypedDict, Callable

from owl_ai.domain.agent_config_entity import AgentConfigEntity
from owl_ai.service.graph.graph_compile import GraphCompile
from owl_ai.service.rag.rag_service import RAGEmbeddingService


def chat_llm_generate(config: dict):
    """
    聊天LLM节点，用于生成聊天LLM的响应
    """
    llm_type = config.get("type")

    base_url = config.get("baseUrl")
    model_name = config.get("modelName")
    stream = config.get("stream")

    chat_llm = None
    if llm_type == "ollama":
        from langchain_ollama import ChatOllama
        chat_llm = ChatOllama(
            base_url=base_url,
            model=model_name,
            disable_streaming=False,
            num_ctx=16384,
            num_predict=2048
        )
    elif llm_type == "openai":
        from langchain_openai import ChatOpenAI
        chat_llm = ChatOpenAI(
            base_url=base_url,
            model_name=model_name,
            disable_streaming=False
        )
    return chat_llm


def update_node_params(state: Dict[str, Any], node_output: Any) -> Dict[str, Any]:
    """
    自定义 reducer 函数，用于更新 node_params 字段
    """
    # 如果 node_params 不存在，则初始化为空字典
    if state is None:
        state = {}

    if node_output:
        # 将节点的输出添加到 node_params 中
        state.update(node_output)
    return state


class WorkFlowState(TypedDict):
    """
    工作流状态
    """
    messages: Annotated[list, add_messages]
    node_params: Annotated[dict, update_node_params]


def params_assemble(input_args: dict, state: WorkFlowState):
    params = {}
    for k, v in input_args.items():
        param_type = v.get("type")
        if param_type == "ref":
            # 引用其它节点的字段
            ref_node_name = v.get("value").split(".")[0]
            ref_field_name = v.get("value").split(".")[1]
            params[k] = state.get("node_params").get(ref_node_name).get(ref_field_name)
        else:
            params[k] = v.get("value")
    return params


class ChatLLMNode(Callable):

    def __init__(self, node_config: dict):
        self.chat_llm = chat_llm_generate(node_config)
        self.node_name = node_config.get("nodeName")
        self.system_prompt = node_config.get("systemPrompt")
        self.user_prompt = node_config.get("userPrompt", None)
        self.input = node_config.get("input")
        self.output = node_config.get("output")
        self.files = True

    def params_assemble(self, state: WorkFlowState):
        params = {}
        for k, v in self.input.items():
            param_type = v.get("type")
            if param_type == "ref":
                # 引用其它节点的字段
                ref_node_name = v.get("value").split(".")[0]
                ref_field_name = v.get("value").split(".")[1]
                params[k] = state.get("node_params").get(ref_node_name).get(ref_field_name)
            else:
                params[k] = v.get("value")
        return params

    def __call__(self, state: WorkFlowState, config: RunnableConfig):
        # 提示词组装，首先需要按照input的配置生成入参字典
        input_params = self.params_assemble(state)
        system_prompt = self.system_prompt.format(**input_params)
        llm_messages = [SystemMessage(content=system_prompt)]

        if self.user_prompt:
            user_prompt = self.user_prompt.format(**input_params)

            input_files = state.get("node_params").get("__start__").get("files")
            user_messages = [{
                "type": "text",
                "text": user_prompt
            }]

            if self.files and input_files:
                for input_file in input_files:
                    file_type = input_file.get("type")
                    content_type = input_file.get("contentType")
                    if file_type == "image" and content_type == "base64":
                        user_messages.append({
                            "type": "image_url",
                            "image_url": {
                                "url": input_file.get("content"),
                            }
                        })

            llm_messages.append(HumanMessage(content=user_messages))
        ret = self.chat_llm.invoke(llm_messages)

        return {
            "node_params": {
                self.node_name: {
                    "out": ret
                }
            }
        }


class RAGRetrievalNode(Callable):

    def __init__(self, node_config: dict):
        self.node_name = node_config.get("nodeName")
        # 知识库id列表
        #self.rag = node_config.get("rag")
        self.rag = ["test"]
        self.input = node_config.get("input")

    def __call__(self, state: WorkFlowState, config: RunnableConfig):
        input_params = params_assemble(self.input, state)
        # 使用LLM生成rag query keyword
        keywords = [input_params.get("query")]

        rag_out = []
        for rag_id in self.rag:
            for keyword in keywords:
                documents = RAGEmbeddingService.document_embedding_search(rag_id, keyword)
                for document in documents:
                    rag_out.append({
                        "content": document.page_content,
                    })

        return {
            "node_params": {
                self.node_name: {
                    "searchResult": rag_out
                }
            }
        }


class IFBranchRouter(Callable):
    """
    IF条件判断节点，IF其实更适合做condition的分支判断
    """

    def __init__(self, node_config: dict):
        self.node_name = node_config.get("nodeName")
        self.conditions = node_config.get("conditions")

    def __call__(self, state: WorkFlowState, config: RunnableConfig):
        for condition in self.conditions:
            key = condition.get("key")
            cond = condition.get("cond")
            value = condition.get("value")
            next_node = condition.get("nextNode")

            if cond == "eq" and key == value:
                return next_node
            if cond == "contains" and value in key:
                return next_node

        return self.conditions[0].get("nextNode")


class WorkFlowGraphCompile(GraphCompile):
    """
    工作流图编译，工作流中涉及到各种类型的任务节点，所以在编译时，需要根据节点类型针对性处理
    """

    @classmethod
    def compile(cls, agent_config: AgentConfigEntity):
        """
        编译
        Args:
            agent_config: 按照节点进行配置，格式定义如下: [
                "nodeName": "节点名称",
                "nodeType": "节点类型，例如LLM，rag_retrieval等",
                "input": {
                    "key": {
                        "type": "input类型，例如str，list, ref等",
                        "value": "input值，例如'Hello World'，如果是ref类型，则表示引用的其它节点字段，格式为'nodeName.fieldName'，例如'rag_retrieval.result"
                    }
                },
                "output": {
                    "key": {
                        "type": "output类型，例如str，list, ref等",
                        "value": "output值，例如'Hello World'，如果是ref类型，则表示引用的其它节点字段，格式为'nodeName.fieldName'，例如'rag_retrieval.result"
                    }
                },
                "next": [
                    "下一个节点名称, 可能是多个节点，表示并行执行，例如['nodeName1', 'nodeName2']",
                ]
            ]

        Returns:

        """
        graph = StateGraph(WorkFlowState)
        config = agent_config.config

        node_config_dict = {}
        # 节点处理
        for node_config in config:
            node_name = node_config.get("nodeName")
            node_type = node_config.get("nodeType")

            node_config_dict.setdefault(node_name, node_config)
            # 添加LLM节点
            if node_type == "LLM":
                graph.add_node(node_name, ChatLLMNode(node_config))
            elif node_type == "rag_retrieval":
                graph.add_node(node_name, RAGRetrievalNode(node_config))

        # 完成节点编译后，需要处理边的连接
        for node_config in config:
            node_name = node_config.get("nodeName")
            next_nodes = node_config.get("next")
            if next_nodes:
                for next_node in next_nodes:
                    next_node_config = node_config_dict.get(next_node)
                    next_node_type = next_node_config.get("nodeType") if next_node_config else None

                    if next_node_type and next_node_type == "IF_BRANCH":
                        # 如果是分支节点，添加为condition edge
                        graph.add_conditional_edges(source=node_name, path=IFBranchRouter(config))
                    else:
                        graph.add_edge(node_name, next_node)

        return graph.compile()
