import copy
import json
import time
from abc import ABC
from typing import Optional, List, Callable, Union
from datetime import datetime
from inspect import getdoc
from functools import lru_cache

from langchain_core.messages import BaseMessage, SystemMessage, HumanMessage, AIMessage
from langchain_core.runnables import RunnableConfig, AddableDict
from langchain_openai.chat_models.base import BaseChatOpenAI
from langgraph.constants import START, END
from langgraph.graph import StateGraph
from langgraph.graph.state import CompiledStateGraph
from langgraph.types import StreamWriter, Command
from redis.asyncio import Redis as AsyncRedis

from loguru import logger

from ..graph.nodes.base import BaseNodes
from ..graph.nodes import CompletionNodes
from ..graph.saver.redis_saver import AsyncRedisSaver
from ..llms import LLMConfig, create_openai_llm, default_llm_max_model_len
from ..mcps import MCPClientManager
from ..models.graph import GraphState, GraphRequest, GraphNodeParam, AgentStepLog
from ..parser.xml_parser import XmlToolResultParser
from ..prompts.template import get_prompt_template, get_prompt_context
from ..utils.str import dict_to_json_str, print_message


class BaseGraph(ABC):
    workflow: StateGraph = None
    graph: CompiledStateGraph = None
    graph_name: str = ""
    graph_start: str = START
    graph_end: str = END
    llm_chat: BaseChatOpenAI = None
    nodes: List[Union[BaseNodes, Callable, "BaseGraph"]] = []
    verbose: bool = False
    parser = None

    def __init__(self,
                 graph_name: str,
                 llm_chat: BaseChatOpenAI | LLMConfig,
                 redis: AsyncRedis,
                 agent_template: str = None,
                 start: str = START,
                 end: str = END,
                 prefix_node: Callable | List[Callable] = None,
                 suffix_node: Callable | List[Callable] = None,
                 mcp_client: MCPClientManager = None,
                 sub_graph_is_loop: bool = False,
                 parse_tool_args: bool = False,
                 verbose: bool = False,
                 **kwargs):
        if not redis:
            raise ValueError("Redis is required for graph execution.")
        self.llm_chat = create_openai_llm(llm_chat) if isinstance(llm_chat, LLMConfig) else llm_chat
        self.llm_max_model_len = llm_chat.max_model_len if isinstance(llm_chat,
                                                                      LLMConfig) else default_llm_max_model_len
        self.verbose = verbose
        self.agent_template = agent_template
        self.graph_name = graph_name
        self.graph_start = start or START
        self.graph_end = end or END
        self.node_start = f"{graph_name}start"
        self.prefix_node = prefix_node
        self.suffix_node = suffix_node
        self.mcp_client = mcp_client
        self.workflow = StateGraph(GraphState)
        self.memory = AsyncRedisSaver(redis)
        self.sub_graph_is_loop = sub_graph_is_loop
        self.parse_tool_args = parse_tool_args
        self.kwargs = kwargs

    def build(self):
        self.add_nodes()
        self.add_edges()
        self.graph = self.workflow.compile(checkpointer=self.memory)

    async def astream(self, request: GraphRequest):
        if not self.graph:
            self.build()
        exist_thread_id = await self._exist_thread_id(request.thread_id)
        if exist_thread_id:
            input = Command(resume=request.question)
        else:
            input = {"messages": [HumanMessage(content=request.question)]}
        await self._save_thread_id(request.thread_id)

        graph_config = {}
        graph_config.update((self.kwargs or {}).get("graph_config", {}))
        graph_config.update(request.__dict__)
        async for chunk in self.graph.astream(input=input,
                                              stream_mode=["values", "custom"],
                                              config=graph_config):
            mode, message = chunk
            if mode == "values" and isinstance(message, AddableDict):
                goto = message.get("goto")
                is_task_done = message.get("is_task_done")
                if (goto and isinstance(goto, GraphNodeParam) and goto.type == self.graph_end) or is_task_done:
                    messages = message.get("messages", [])
                    if messages:
                        for i in range(len(messages) - 1, -1, -1):
                            last_ai_message = messages[i]
                            if isinstance(last_ai_message, AIMessage):
                                last_ai_message.response_metadata["usage"] = [usage.to_dict() for usage in
                                                                              message.get("usages", [])]
                                break
                    yield messages
            elif mode == "custom":
                yield message

    async def start(self, state: GraphState, writer: StreamWriter, *, config: Optional[RunnableConfig] = None):
        """
        Default implementation of the start method that can be overridden by subclasses if needed.
        """
        if self.prefix_node and not state.prefix_executed:
            return {"goto": GraphNodeParam(type=f"{self.graph_name}prefix_0"), "prefix_executed": True}
        if state.is_task_done:
            goto = GraphNodeParam(type=self.graph_end) if not self.suffix_node else GraphNodeParam(
                type=f"{self.graph_name}suffix_0")
            return {"goto": goto, "usages": state.usages, "loop_retry": 0, "is_task_done": False}
        if state.force_goto and state.force_goto.type:
            goto = GraphNodeParam(type=f"{self.graph_name}{state.force_goto.type}", params=state.force_goto.params,
                                  kwargs=state.force_goto.kwargs)
            return {"goto": goto, "force_goto": None}
        request = GraphRequest.from_runnable_config(config)
        system_template = await self.get_system_template(request, state)
        messages = [SystemMessage(content=system_template)]
        messages.extend(self.get_history_messages(request))

        state_messages = self.get_state_messages(state)
        if len(state_messages) > 1:
            messages.extend(state_messages[:-1])

        messages = self._filter_messages_by_length(messages)
        messages.extend(self.get_context_messages(request))

        current_question = state_messages[-1].content
        user_contents = []
        messages.extend(self.get_context_messages(request))

        user_contents.append(
            self._wrap_user_input(content=current_question,
                                  question_image=f"{request.image[:100]}..." if request.image else None))

        user_contents.extend(self.get_env_messages(request))

        messages.append(HumanMessage(content="\n".join(user_contents)))

        if self.verbose:
            print_message(messages)

        last_tool_step: AgentStepLog = None
        last_line = None

        if not self.parser:
            self.parser = XmlToolResultParser(tools=self.get_nodes_schema())

        try:
            start_time = time.time()

            async for step, line in self.parser.parse_stream(self.llm_chat.astream(messages, config),
                                                             state,
                                                             self.llm_chat.model_name):
                if not step:
                    continue

                if step.type == "tool" and self.parse_tool_args:
                    try:
                        args_parser = self.get_nodes_args_parser(step.output.type)
                        for param_name, parser_func in args_parser.items():
                            if param_name in step.output.params:
                                step.output.params[param_name] = parser_func(step.output.params[param_name])
                    except Exception as e:
                        logger.error(f"Error parsing tool parameters: {e}")

                last_line = line
                writer(step)
                if step.type == "tool":
                    last_tool_step = copy.deepcopy(step)

            logger.info(
                f"Processing time for current message '[{current_question[:50]}...]' with model [{self.llm_chat.model_name}] use: {time.time() - start_time:.2f} seconds")

            messages = [AIMessage(content=last_line)]

            if not last_tool_step or last_tool_step.meta[
                "action"] == CompletionNodes.attempt_completion.__name__ or state.is_task_done:
                logger.info(f"Task completed. Tokens usage statistics: {state.usages}")
                goto = GraphNodeParam(type=self.graph_end) if not self.suffix_node else GraphNodeParam(
                    type=f"{self.graph_name}suffix_0")

            else:
                goto = GraphNodeParam(type=f"{self.graph_name}{last_tool_step.output.type}",
                                      params=last_tool_step.output.params,
                                      kwargs=last_tool_step.output.kwargs)

            return {
                "messages": messages,
                "goto": goto,
                "usages": state.usages,
                "loop_retry": 0
            }
        except Exception as e:
            logger.error(f"Error in parse_stream: {e}")
            if state.loop_retry >= 3:
                error_msg = "Error processing your request after multiple attempts."
                writer(AgentStepLog.build_answer(chunk=error_msg, meta={"finish": True}))
            if state.loop_retry < 3:
                goto = GraphNodeParam(type=self.node_start)
            else:
                goto = GraphNodeParam(type=self.graph_end) if not self.suffix_node else GraphNodeParam(
                    type=f"{self.graph_name}suffix_0")
            return {
                "goto": goto,
                "loop_retry": state.loop_retry + 1
            }

    def get_history_messages(self, request: GraphRequest) -> List[BaseMessage]:
        messages = []
        if hasattr(request, 'history') and request.history:
            messages.extend(request.history)
        return messages

    def get_state_messages(self, state: GraphState) -> List[BaseMessage]:
        return state.messages

    def get_context_messages(self, request: GraphRequest) -> List[BaseMessage]:
        messages = []
        if request.context:
            messages.append(HumanMessage(content=get_prompt_context(request.context)))
        return messages

    def get_env_messages(self, request: GraphRequest):
        messages = []
        env_details = self._wrap_environment_details(**request.kwargs)
        if env_details:
            messages.append(self._wrap_human_content(env_details))
        return messages

    def _filter_messages_by_length(self, messages: List[BaseMessage]) -> List[BaseMessage]:
        """
        过滤消息列表，确保总长度不超过模型最大长度限制。
        如果超过限制，从头开始删除human和ai消息对，直到满足长度要求。
        保留系统消息不变。
        """
        if not messages:
            return messages
        total_length = sum(len(msg.content) for msg in messages)

        if total_length <= self.llm_max_model_len:
            return messages

        system_messages = [msg for msg in messages if isinstance(msg, SystemMessage)]
        other_messages = [msg for msg in messages if not isinstance(msg, SystemMessage)]

        system_length = sum(len(msg.content) for msg in system_messages)

        while other_messages and (
            system_length + sum(len(msg.content) for msg in other_messages) > self.llm_max_model_len):
            if len(other_messages) >= 2:
                other_messages.pop(0)
                if other_messages:
                    other_messages.pop(0)
            else:
                other_messages.pop(0)

        # 重新组合消息
        filtered_messages = system_messages + other_messages

        return filtered_messages

    async def get_system_template(self, request: GraphRequest, state=None):

        tools_desc = []
        tools_name = []

        nodes = self.get_nodes()
        for name, func in nodes.items():
            doc = getdoc(func) or ""
            tools_desc.append(f"## {name}\n{doc}")
            tools_name.append(name)

        kwargs = request.kwargs.copy()
        kwargs["tools_desc"] = "\n\n".join(tools_desc)
        kwargs["tools_name"] = ",".join(tools_name)

        if "cwd_dir" not in kwargs:
            kwargs["cwd_dir"] = ""

        mcp_tools = kwargs.get("mcp_tools", "")
        if self.mcp_client:
            try:
                if not self.mcp_client.is_available():
                    await self.mcp_client.initialize()
            except Exception as e:
                logger.error("initialize mcp server error", e)
            if self.mcp_client.is_available():
                mcp_tools = f"{mcp_tools}\n\n{await self.mcp_client.get_all_tools_str()}"
        kwargs["mcp_tools_prompt"] = f"""
# 可用的MCP工具列表如下：
{mcp_tools}
            """ if mcp_tools else ""
        template_name = self._get_template_name()
        self.wrap_template_kwargs(kwargs, request, state)
        template = get_prompt_template(template_name, **kwargs)
        return template

    def wrap_template_kwargs(self, kwargs: dict, request: GraphRequest, state=None) -> dict:
        return kwargs

    def _get_template_name(self) -> str:
        """
        Return the template name to be used for generating the system template.
        Must be implemented by subclasses.
        """
        return self.agent_template

    def get_nodes(self) -> dict:
        nodes = {}
        for node in self.nodes:
            if isinstance(node, BaseNodes):
                for node_info in node.get_nodes():
                    nodes[node_info["name"]] = node_info["func"]
            elif isinstance(node, BaseGraph):
                nodes[node.node_start] = node
            elif callable(node):
                nodes[node.__name__] = node
        return nodes

    def get_nodes_schema(self) -> dict:
        """
        获取节点的信息，包括schema和stream属性

        Returns:
            dict: 包含节点信息的字典，格式为 {node_name: {"schema": schema, "stream": stream}}
        """
        info = {}
        for node in self.nodes:
            if isinstance(node, BaseNodes):
                for node_info in node.get_nodes():
                    name = node_info["name"]
                    if name not in info:
                        info[name] = {}
                    info[name]["schema"] = node_info["args_schema"]
                    info[name]["stream"] = node_info["stream"]
            elif isinstance(node, BaseGraph):
                if node.node_start not in info:
                    info[node.node_start] = {}
                info[node.node_start]["schema"] = None
                info[node.node_start]["stream"] = True
            elif callable(node):
                name = node.__name__
                if name not in info:
                    info[name] = {}
                info[name]["schema"] = getattr(node, "args_schema", None)
                info[name]["stream"] = getattr(node, "stream", True)
        return info

    @lru_cache(maxsize=128)
    def get_nodes_args_parser(self, node_name: str = None) -> dict:
        nodes = {}
        if not node_name:
            return nodes
        for node in self.nodes:
            if isinstance(node, BaseNodes):
                for node_info in node.get_nodes():
                    if node_info["name"] == node_name:
                        return node_info["args_parser"]
            elif callable(node) and node.__name__ == node_name:
                return node.args_parser if hasattr(node, "args_parser") else {}
        return nodes

    def add_nodes(self):
        self.workflow.add_node(self.node_start, self.start)
        for node_name, node_func in self.get_nodes().items():
            if isinstance(node_func, BaseGraph):
                node_func.workflow = self.workflow
                node_func.add_nodes()
            else:
                self.workflow.add_node(f"{self.graph_name}{node_name}", node_func)

        if self.prefix_node:
            if isinstance(self.prefix_node, list):
                for i, node in enumerate(self.prefix_node):
                    self.workflow.add_node(f"{self.graph_name}prefix_{i}", node)
            else:
                self.workflow.add_node(f"{self.graph_name}prefix_0", self.prefix_node)

        if self.suffix_node:
            if isinstance(self.suffix_node, list):
                for i, node in enumerate(self.suffix_node):
                    self.workflow.add_node(f"{self.graph_name}suffix_{i}", node)
            else:
                self.workflow.add_node(f"{self.graph_name}suffix_0", self.suffix_node)

    def node_route(self, state: GraphState):
        return state.goto.type if state.goto.type else self.node_start

    def add_edges(self, skip_start_edge: bool = False):
        if not skip_start_edge:
            self.workflow.add_edge(self.graph_start, self.node_start)
        route_nodes = []
        for node_name, node_func in self.get_nodes().items():
            if isinstance(node_func, BaseGraph):
                node_func.graph_start = self.node_start
                if self.sub_graph_is_loop:
                    node_func.graph_end = self.node_start

                node_func.add_edges(skip_start_edge=True)
                route_nodes.append(node_func.node_start)
            else:
                self.workflow.add_edge(f"{self.graph_name}{node_name}", self.node_start)
                route_nodes.append(f"{self.graph_name}{node_name}")

        route_nodes.append(self.node_start)
        route_nodes.append(self.graph_end)

        if self.prefix_node:
            route_nodes.append(f"{self.graph_name}prefix_0")
            if isinstance(self.prefix_node, list):
                for i in range(len(self.prefix_node) - 1):
                    self.workflow.add_edge(f"{self.graph_name}prefix_{i}", f"{self.graph_name}prefix_{i + 1}")
                self.workflow.add_edge(f"{self.graph_name}prefix_{len(self.prefix_node) - 1}", self.node_start)
            else:
                self.workflow.add_edge(f"{self.graph_name}prefix_0", self.node_start)

        if self.suffix_node:
            route_nodes.append(f"{self.graph_name}suffix_0")
            if isinstance(self.suffix_node, list):
                for i in range(len(self.suffix_node) - 1):
                    self.workflow.add_edge(f"{self.graph_name}suffix_{i}", f"{self.graph_name}suffix_{i + 1}")
                self.workflow.add_edge(f"{self.graph_name}suffix_{len(self.suffix_node) - 1}", self.graph_end)
            else:
                self.workflow.add_edge(f"{self.graph_name}suffix_0", self.graph_end)

        self.workflow.add_conditional_edges(self.node_start, self.node_route, route_nodes)

    def _wrap_user_input(self, content: str, **kwargs):
        user_input = f"""<user_input>{content}</user_input>"""
        return self._wrap_human_content(content=user_input, **kwargs)

    def _wrap_human_content(self, content: str, type: str = "text", **kwargs):
        human_content = {"text": content, "type": type} | {k: v for k, v in kwargs.items() if v is not None}
        return dict_to_json_str(human_content)

    def _wrap_environment_details(self, **kwargs):
        """
        Format environment details for inclusion in the prompt.
        """
        details = []
        cwd_dir = kwargs.get("cwd_dir", "")
        if "os_name" in kwargs:
            details.append(f"""Operating System: {kwargs['os_name']}""")
        if "os_shell" in kwargs:
            details.append(f"""Default Shell: {kwargs['os_shell']}""")
        if "home_dir" in kwargs:
            details.append(f"""Home Directory: {kwargs['home_dir']}""")
        if cwd_dir:
            details.append(f"""Current Working Directory: {cwd_dir}""")
        details.append(f"""# Current Time: {datetime.now().strftime('%Y/%m/%d %H:%M:%S')}""")
        if "visible_files" in kwargs:
            details.append(f"""#VSCode Visible Files: {kwargs['visible_files']}""")
        if "open_files" in kwargs:
            details.append(f"""#VSCode Open Files: {kwargs['open_files']}""")
        if "cwd_files" in kwargs:
            details.append(f"""
#Current Working Directory ({cwd_dir}) Files:
{kwargs['cwd_files']}
(File list truncated. Use list_files on specific subdirectories if you need to explore further.)""")
        if not details:
            return None
        details = "\n".join(details)
        return f"""<environment_details>{details}</environment_details>"""

    async def _exist_thread_id(self, thread_id: str) -> bool:
        """
        Check if the thread_id exists in memory to determine if this is a resume or new question.
        Returns True if thread_id exists (resume), False otherwise (new question).
        """
        key = f"thread_id${thread_id}"
        return await self.memory.exists(key)

    async def _save_thread_id(self, thread_id: str):
        """Save thread_id to Redis for future resume checks."""
        key = f"thread_id${thread_id}"
        await self.memory.set(key, f"{datetime.now()}", expire_seconds=6 * 60 * 60)
