import copy
import time
import asyncio
from abc import ABC
from typing import Optional, List, Callable, Union
from datetime import datetime
from inspect import getdoc
from functools import lru_cache
from loguru import logger

from langchain_core.messages import BaseMessage, SystemMessage, HumanMessage, AIMessage
from langchain_core.runnables import RunnableConfig, AddableDict
from langgraph.checkpoint.memory import InMemorySaver
from langgraph.constants import START, END
from langgraph.graph import StateGraph
from langgraph.graph.state import CompiledStateGraph
from langgraph.types import StreamWriter, Command
from redis.asyncio import Redis as AsyncRedis

from .nodes.system import SystemNodes
from .saver import default_memory_saver
from ..graph.nodes.base import BaseNodes
from ..graph.nodes import CompletionNodes
from ..graph.saver.redis_saver import AsyncRedisSaver
from ..llms import LLMConfig, InsCodeModel
from ..mcps import MCPClientManager
from ..models.graph import AgentStepLogType, GraphState, GraphRequest, GraphNodeParam, AgentStepLog
from ..parser.xml_parser import XmlToolResultParser
from ..prompts.template import get_prompt_template, get_prompt_context
from ..config import get_env_config
from ..config.fuse import get_langfuse_callback
from ..config.agents import LLMType, AgentActions, SELECTED_MCP_SERVERS
from ..utils.redis import AsyncRedisClient
from ..utils.str import dict_to_json_str, print_message
from ..utils.tokens import count_tokens


class BaseGraph(ABC):
    workflow: StateGraph = None
    graph: CompiledStateGraph = None
    graph_name: str = ""
    graph_start: str = START
    graph_end: str = END
    node_start: str = None
    llm_chat: InsCodeModel = None
    nodes: List[Union[BaseNodes, Callable, "BaseGraph"]] = []
    parser = None

    def __init__(self,
                 graph_name: str,
                 nodes: List[Union[BaseNodes, Callable, "BaseGraph"]] = [],
                 llm_chat: LLMConfig | InsCodeModel = None,
                 redis: AsyncRedis = None,
                 agent_template: str = None,
                 agent_template_language: str = None,
                 start: str = START,
                 end: str = END,
                 prefix_node: Callable | List[Callable] = None,
                 suffix_node: Callable | List[Callable] = None,
                 mcp_client: MCPClientManager = None,
                 sub_graph_is_loop: bool = False,
                 parse_tool_args: bool = False,
                 recursion_limit: int = 25,
                 **kwargs):
        try:
            redis_config = get_env_config("redis")
            self.redis = redis if redis else AsyncRedisClient(**redis_config).redis if redis_config else None
        except Exception as e:
            logger.warning(f"Failed to connect to Redis: {e}. Falling back to memory storage.")
            self.redis = None
        self.nodes = nodes

        self.llm_chat = InsCodeModel.build(llm_chat if llm_chat else LLMConfig.build_from_type(LLMType.LLM_CHAT))
        if not self.llm_chat:
            raise ValueError(
                "Failed to initialize LLM chat model. Please provide a valid LLMConfig or InsCodeModel instance")

        self.agent_template = agent_template
        self.agent_template_language = agent_template_language
        self.graph_name = graph_name
        self.graph_start = start or START
        self.graph_end = end or END
        self.node_start = f"{graph_name}start"
        self.prefix_node = prefix_node
        self.prefix_node_name = f"{graph_name}prefix_0"
        self.suffix_node = suffix_node
        self.suffix_node_name = f"{graph_name}suffix_0"
        self.mcp_client = mcp_client
        self.workflow = StateGraph(GraphState)
        self.sub_graph_is_loop = sub_graph_is_loop
        self.parse_tool_args = parse_tool_args
        self.kwargs = kwargs
        self.default_recursion_limit = recursion_limit if recursion_limit > 0 else 25
        if self.redis:
            self.memory = AsyncRedisSaver(self.redis)
        else:
            self.memory = default_memory_saver

    def build(self, verbose: bool = True):
        self.add_nodes()
        self.add_edges()
        langfuse_callback = get_langfuse_callback()
        if langfuse_callback and verbose:
            self.graph = self.workflow.compile(checkpointer=self.memory).with_config({"callbacks": [langfuse_callback]})
        else:
            self.graph = self.workflow.compile(checkpointer=self.memory)

    async def astream(self, request: GraphRequest):
        if not self.graph:
            self.build(verbose=request.verbose)
        exist_thread_id = await self._exist_thread_id(request.thread_id)
        if exist_thread_id:
            input = Command(resume=request.question)
        else:
            input = {"messages": [HumanMessage(content=request.question, additional_kwargs={"is_question": True})]}
        await self._save_thread_id(request.thread_id)

        graph_config = {}
        graph_config.update((self.kwargs or {}).get("graph_config", {}))
        graph_config.update(request.__dict__)
        if "recursion_limit" not in graph_config:
            graph_config["recursion_limit"] = self.default_recursion_limit
        async for chunk in self.graph.astream(input=input,
                                              stream_mode=["values", "custom"],
                                              config=graph_config):
            mode, message = chunk
            if mode == "values" and isinstance(message, AddableDict):
                goto = message.get("goto")
                is_task_done = message.get("is_task_done")
                if (goto and isinstance(goto, GraphNodeParam) and goto.type == self.graph_end) or is_task_done:
                    total_steps = message.get("recursion_steps", 0)
                    total_time = message.get("total_time", 0)
                    start_time = message.get('start_time', int((time.time() * 1000)))
                    task_total_time = int(time.time() * 1000) - start_time
                    messages = message.get("messages", [])
                    if messages:
                        for i in range(len(messages) - 1, -1, -1):
                            last_ai_message = messages[i]
                            if isinstance(last_ai_message, AIMessage):
                                usage = [usage.to_dict() for usage in message.get("usages", [])]
                                task_info = {
                                    "recursion_steps": total_steps,
                                    "total_time": total_time,
                                    'task_total_time': task_total_time
                                }
                                last_ai_message.response_metadata["usage"] = usage
                                last_ai_message.response_metadata["task"] = task_info
                                logger.info(
                                    f"Task completed - Steps: {total_steps}, Task time: {task_total_time / 1000:.2f}s, AI time: {total_time / 1000:.2f}s, Token usage: {usage}")
                                yield AgentStepLog.build_usage_action(finish=True, usage=usage,
                                                                      meta={"task": task_info})
                                break
                    yield messages
            elif mode == "custom":
                yield message

    async def start(self, state: GraphState, writer: StreamWriter, *, config: Optional[RunnableConfig] = None):
        """
        Default implementation of the start method that can be overridden by subclasses if needed.
        """
        request = GraphRequest.from_runnable_config(config)
        recursion_limit = config.get("recursion_limit", self.default_recursion_limit)

        if state.recursion_steps > recursion_limit:
            error_msg = f"⚠️ 执行步骤超过限制已中断，请继续提问"
            logger.warning(f"{error_msg}, recursion_limit: {recursion_limit}")
            action = AgentActions.AGENT_STOP.value
            writer(AgentStepLog.build_tool_action(action=action, output=error_msg, meta={"finish": True}))
            goto = GraphNodeParam(type=self.graph_end) if not self.suffix_node else GraphNodeParam(
                type=self.suffix_node_name)
            return {"messages": [AIMessage(content=error_msg)], "goto": goto}

        if self.prefix_node and not state.prefix_executed:
            return {"goto": GraphNodeParam(type=self.prefix_node_name), "prefix_executed": True,
                    "recursion_steps": state.recursion_steps + 1}

        if state.is_task_done:
            goto = GraphNodeParam(type=self.graph_end) if not self.suffix_node else GraphNodeParam(
                type=self.suffix_node_name)
            return {"goto": goto, "usages": state.usages, "loop_retry": 0, "is_task_done": False,
                    "recursion_steps": state.recursion_steps + 1}
        if state.force_goto and state.force_goto.type:
            goto = GraphNodeParam(type=f"{self.graph_name}{state.force_goto.type}", params=state.force_goto.params,
                                  kwargs=state.force_goto.kwargs)
            return {"goto": goto, "force_goto": None, "recursion_steps": state.recursion_steps + 1}
        system_template = await self.get_system_template(request=request, state=state,
                                                         template_type=state.template_type)
        messages = [SystemMessage(content=[
            {
                "text": system_template,
                "type": "text",
                "cache_control": {"type": "ephemeral"},
            }
        ])]
        messages.extend(self.get_history_messages(request, state))

        state_messages = self.get_state_messages(state, request)
        if len(state_messages) > 1:
            messages.extend(state_messages[:-1])

        messages.extend(self.get_context_messages(request))

        current_question = state_messages[-1].content
        user_contents = []
        user_contents.append(
            self._wrap_user_input(content=current_question,
                                  question_image=f"{request.image[:100]}..." if request.image else None))

        user_contents.extend(self.get_env_messages(request))

        messages.append(HumanMessage(content="\n".join(user_contents)))
        messages = self._filter_messages_by_length(messages)

        if request.verbose:
            print_message(messages)

        last_tool_step: AgentStepLog = None
        last_line = None

        if not self.parser:
            self.parser = XmlToolResultParser(tools=self.get_nodes_schema())

        try:
            start_time = time.time()
            first_response_time = None

            response_id = None
            async for step, line in self.parser.parse_stream(self.llm_chat.chat_model.astream(messages, config),
                                                             state,
                                                             self.llm_chat.model_name):
                if not step:
                    continue

                if first_response_time is None:
                    first_response_time = time.time()

                # 获取 response_id
                if hasattr(step, 'response_id'):
                    response_id = step.response_id

                if step.type == AgentStepLogType.TOOL.value and self.parse_tool_args:
                    try:
                        args_parser = self.get_nodes_args_parser(step.output.type)
                        for param_name, parser_func in args_parser.items():
                            if param_name in step.output.params:
                                step.output.params[param_name] = parser_func(step.output.params[param_name])
                    except Exception as e:
                        logger.error(f"Error parsing tool parameters: {e}")

                last_line = line
                if (step.type != AgentStepLogType.SUBAGENT.value and
                    not (step.type == AgentStepLogType.TOOL.value and
                         self.get_nodes_schema().get(step.output.type, {}).get("execution_mode") == "server")):
                    writer(step)

                if step.type == AgentStepLogType.TOOL.value:
                    last_tool_step = copy.deepcopy(step)

            current_step_time = int((time.time() - start_time) * 1000)
            total_time = state.total_time + current_step_time
            first_response_delay = first_response_time - start_time if first_response_time else 0
            logger.info(
                f"Processing time for current message '[{current_question[:50]}...]' response_id: {response_id} with model [{self.llm_chat.model_name}] use: {current_step_time / 1000:.2f} seconds (first response: {first_response_delay:.2f}s)")

            messages = [AIMessage(content=last_line)]

            available_tools = [tool_name for tool_name in self.get_nodes_schema().keys()
                               if tool_name != CompletionNodes.attempt_completion.__name__]

            state_ai_messages_have_tools = any(
                isinstance(msg, AIMessage) and msg.content and
                any(tool_name in msg.content for tool_name in available_tools)
                for msg in state_messages
            )

            loop_retry = 0
            if not last_tool_step and not last_line and state.loop_retry < 3:
                logger.info(f"No tool call or answer found, returning to start node, loop_retry: {state.loop_retry}")
                messages.append(HumanMessage(content="请继续回答或者总结会话"))
                goto = GraphNodeParam(type=self.node_start)
                loop_retry = state.loop_retry + 1
            elif not last_tool_step and available_tools and not state_ai_messages_have_tools and state.loop_retry < 3:
                logger.info(f"No tool used but tools available, requesting tool usage, loop_retry: {state.loop_retry}")
                content = "Success .继续调用相应的XML格式工具来完成操作。"
                messages.append(HumanMessage(content=content))
                goto = GraphNodeParam(type=self.node_start)
                loop_retry = state.loop_retry + 1
                await asyncio.sleep(2)
            elif not last_tool_step or last_tool_step.meta[
                "action"] == CompletionNodes.attempt_completion.__name__ or state.is_task_done:
                goto = GraphNodeParam(type=self.graph_end) if not self.suffix_node else GraphNodeParam(
                    type=self.suffix_node_name)
            elif last_tool_step.type == AgentStepLogType.SUBAGENT.value:
                goto = GraphNodeParam(type=last_tool_step.output.params["name"],
                                      params=last_tool_step.output.params,
                                      kwargs=last_tool_step.output.kwargs)
            else:
                goto = GraphNodeParam(type=f"{self.graph_name}{last_tool_step.output.type}",
                                      params=last_tool_step.output.params,
                                      kwargs=last_tool_step.output.kwargs)

            return {
                "messages": messages,
                "goto": goto,
                "usages": state.usages,
                "loop_retry": loop_retry,
                "total_time": total_time,
                "start_time": state.start_time,
                "recursion_steps": state.recursion_steps + 1
            }
        except Exception as e:
            logger.error(f"Error in parse_stream: {e}")
            if state.loop_retry >= 3:
                error_msg = "❌ 多次尝试处理请求失败，请尝试切换模型后重试。"
                writer(AgentStepLog.build_answer(chunk=error_msg, meta={"finish": True, "error": True}))
            if state.loop_retry < 3:
                await asyncio.sleep(2)
                goto = GraphNodeParam(type=self.node_start)
            else:
                goto = GraphNodeParam(type=self.graph_end) if not self.suffix_node else GraphNodeParam(
                    type=self.suffix_node_name)
            return {
                "goto": goto,
                "loop_retry": state.loop_retry + 1,
                "recursion_steps": state.recursion_steps + 1
            }

    def get_history_messages(self, request: GraphRequest, state: GraphState = None) -> List[BaseMessage]:
        if not request.history:
            return []

        # 如果state中有AI消息且不包含launch_container，说明已在正式回答，不返回历史消息
        if state and state.messages:
            for msg in state.messages:
                if (isinstance(msg, AIMessage) and msg.content and
                    "launch_container" not in str(msg.content)):
                    return []

        return list(request.history)

    def get_state_messages(self, state: GraphState, request: GraphRequest = None) -> List[BaseMessage]:
        messages = copy.deepcopy(state.messages)

        for msg in messages:
            if isinstance(msg, HumanMessage) and msg.additional_kwargs and msg.additional_kwargs.get("is_question",
                                                                                                     False):
                original_content = self._get_message_content(msg)
                reminder_parts = ["## 🚨 重要提醒：当前用户问题如下，必须严格针对当前问题进行回答，禁止偏离主题："]
                if request and request.image:
                    reminder_parts.append("📷 用户上传了一张图片，请结合图片内容进行回答。")
                reminder_parts.append(f"""<user_current_question>
{original_content}
</user_current_question>""")
                modified_content = "\n".join(reminder_parts)
                self._set_message_content(msg, modified_content)

        return messages

    def get_context_messages(self, request: GraphRequest) -> List[BaseMessage]:
        messages = []
        if request.context:
            messages.append(HumanMessage(content=get_prompt_context(request.context)))
        return messages

    def get_env_messages(self, request: GraphRequest):
        messages = []
        env_details = self._wrap_environment_details(**request.kwargs)
        if env_details:
            # messages.append(self._wrap_human_content(env_details))
            messages.append(env_details)
        return messages

    def _get_message_content(self, msg: BaseMessage) -> str:
        """
        获取消息的文本内容，处理不同类型的content格式
        """
        if isinstance(msg.content, str):
            return msg.content
        elif isinstance(msg.content, list):
            text_parts = []
            for item in msg.content:
                if isinstance(item, dict):
                    if item.get("type") == "text" and "text" in item:
                        text_parts.append(item["text"])
                    elif isinstance(item.get("text"), str):
                        text_parts.append(item["text"])
                elif isinstance(item, str):
                    text_parts.append(item)
            return "\n".join(text_parts)
        else:
            return str(msg.content) if msg.content else ""

    def _set_message_content(self, msg: BaseMessage, content: str):
        """
        设置消息的内容，保持原有的content格式
        """
        if isinstance(msg.content, str):
            msg.content = content
        elif isinstance(msg.content, list):
            # 对于list格式，更新第一个text类型的项目
            for item in msg.content:
                if isinstance(item, dict) and item.get("type") == "text" and "text" in item:
                    item["text"] = content
                    break
                elif isinstance(item, dict) and isinstance(item.get("text"), str):
                    item["text"] = content
                    break
            else:
                # 如果没有找到text项目，添加一个新的
                msg.content.insert(0, {"type": "text", "text": content})
        else:
            msg.content = content

    def _filter_messages_by_length(self, messages: List[BaseMessage]) -> List[BaseMessage]:
        """
        过滤消息列表，确保总长度不超过模型最大长度限制的90%。
        优化策略：
        1. 保留系统消息不变（从开始到第一个AI消息之前的所有消息）
        2. 从第一个AI消息开始的所有消息作为other_messages
        3. 对other_messages中最后两个AI消息以前的AI消息进行内容截取（前300字符+省略号+后200字符）
        4. 如果仍超长，对最后两个Human消息以前的Human消息进行同样的截取
        5. 如果还是超长，则逐条消息进行截取
        """
        if not messages:
            return messages

        # 设置最大长度为90%以保留安全边际
        max_allowed_length = int(self.llm_chat.max_model_len * 0.9)

        def _calculate_total_tokens(system_msgs: List[BaseMessage], other_msgs: List[BaseMessage]) -> int:
            """计算系统消息和其他消息的总token数"""
            system_tokens = sum(count_tokens(self._get_message_content(msg)) for msg in system_msgs)
            other_tokens = sum(count_tokens(self._get_message_content(msg)) for msg in other_msgs)
            return system_tokens + other_tokens

        first_system_index = -1
        for i, msg in enumerate(messages):
            if isinstance(msg, SystemMessage):
                first_system_index = i
                break

        system_messages = []
        other_messages = messages

        if first_system_index != -1:
            system_messages = messages[:first_system_index + 1]
            other_messages = messages[first_system_index + 1:]

        def _truncate_content(content: str, front: int = 300, back: int = 200) -> str:
            """截取消息内容，保留前面和后面部分，中间用省略号"""
            if len(content) <= front + back + 10:
                return content
            return content[:front] + "\n\n......(文件内容已完全生成，截取显示部分内容)......\n\n" + content[-back:]

        def _truncate_messages_by_type(msg_type, keep_last: int = 2):
            """截取指定类型的消息，保留最后几个不截取"""
            indices = [i for i, msg in enumerate(other_messages) if isinstance(msg, msg_type)]
            if len(indices) > keep_last:
                for i in indices[:-keep_last]:
                    original_content = self._get_message_content(other_messages[i])
                    truncated_content = _truncate_content(original_content)
                    self._set_message_content(other_messages[i], truncated_content)

        _truncate_messages_by_type(AIMessage)

        if _calculate_total_tokens(system_messages, other_messages) > max_allowed_length:
            _truncate_messages_by_type(HumanMessage)

        if _calculate_total_tokens(system_messages, other_messages) > max_allowed_length:
            for i in range(len(other_messages)):
                current_tokens = _calculate_total_tokens(system_messages, other_messages)
                if current_tokens <= max_allowed_length:
                    break

                excess_tokens = current_tokens - max_allowed_length
                original_content = self._get_message_content(other_messages[i])
                target_tokens = max(1000, count_tokens(original_content) - excess_tokens - 100)
                half_tokens = target_tokens // 2

                truncated_content = _truncate_content(original_content, front=half_tokens, back=half_tokens)
                self._set_message_content(other_messages[i], truncated_content)

        return system_messages + other_messages

    async def get_system_template(self, request: GraphRequest, state: GraphState = None, template_type: str = None):
        """
        获取系统模板

        Args:
            request: 图请求对象
            state: 状态对象
            template_type: 模板类型

        Returns:
            生成的系统模板字符串
        """
        tools_desc = []
        graphs_desc = []
        tools_name = []

        nodes = self.get_nodes()
        for name, func in nodes.items():
            doc = getdoc(func) or ""
            tools_name.append(name)

            if isinstance(func, BaseGraph):
                graphs_desc.append(f"subagent_toolkit name: `{name}`\n{doc}")
            else:
                tools_desc.append(f"tool name: `{name}`\n{doc}")

        tools_desc_parts = []
        if tools_desc:
            tools_desc = "\n\n".join([f"{i}. {desc}" for i, desc in enumerate(tools_desc, 1)])
            tools_desc_parts.append(f"""
## 可用的工具列表如下：
{tools_desc}
""")
        if graphs_desc:
            graphs_desc = "\n\n".join([f"{i}. {desc}" for i, desc in enumerate(graphs_desc, 1)])
            tools_desc_parts.append(f"""
## 可用的Subagent Toolkit列表如下：
{graphs_desc}
""")

        kwargs = request.kwargs.copy()
        kwargs["tools_desc"] = "\n\n".join(tools_desc_parts)
        kwargs["tools_name"] = ",".join(tools_name)

        if "cwd_dir" not in kwargs:
            kwargs["cwd_dir"] = ""

        mcp_tools = kwargs.get("mcp_tools", "")
        if self.mcp_client:
            try:
                if not self.mcp_client.is_available():
                    await self.mcp_client.initialize()
            except Exception as e:
                logger.error("initialize mcp server error", e)

            if self.mcp_client.is_available():
                selected_mcp_servers = None
                if state and state.ext:
                    selected_mcp_servers = state.ext.get(SELECTED_MCP_SERVERS)

                # 根据SELECTED_MCP_SERVERS过滤MCP工具
                if selected_mcp_servers is not None:
                    # 如果SELECTED_MCP_SERVERS存在但为空，表示没有MCP工具
                    if not selected_mcp_servers:
                        mcp_tools = ""
                    else:
                        filtered_mcp_tools = await self.mcp_client.get_filtered_tools_str(selected_mcp_servers)
                        mcp_tools = f"{mcp_tools}\n\n{filtered_mcp_tools}" if filtered_mcp_tools else mcp_tools
                else:
                    mcp_tools = f"{mcp_tools}\n\n{await self.mcp_client.get_all_tools_str()}"

        kwargs["mcp_tools_prompt"] = f"""
# 可用的MCP工具列表如下：
## 注意：使用MCP工具时，必须通过<use_mcp_tool>标签执行
{mcp_tools}
""" if mcp_tools else ""

        template_name = self._get_template_name()
        self.wrap_template_kwargs(kwargs, request, state)
        template = get_prompt_template(template_name, language=self.agent_template_language, type=template_type,
                                       **kwargs)
        return template

    def wrap_template_kwargs(self, kwargs: dict, request: GraphRequest, state=None) -> dict:
        return kwargs

    def _get_template_name(self) -> str:
        """
        Return the template name to be used for generating the system template.
        Must be implemented by subclasses.
        """
        return self.agent_template

    def get_nodes(self) -> dict:
        nodes = {}
        for node in self.nodes:
            if isinstance(node, BaseNodes):
                for node_info in node.get_nodes():
                    nodes[node_info["name"]] = node_info["func"]
            elif isinstance(node, BaseGraph):
                nodes[node.node_start] = node
            elif callable(node):
                nodes[node.__name__] = node
        return nodes

    def get_nodes_schema(self) -> dict:
        """
        获取节点的信息
        """
        info = {}
        for node in self.nodes:
            if isinstance(node, BaseNodes):
                for node_info in node.get_nodes():
                    name = node_info["name"]
                    if name not in info:
                        info[name] = {}
                    info[name]["schema"] = node_info["args_schema"]
                    info[name]["stream"] = node_info["stream"]
                    info[name]["execution_mode"] = node_info.get("execution_mode", "client")
            elif isinstance(node, BaseGraph):
                if node.node_start not in info:
                    info[node.node_start] = {}
                info[node.node_start]["schema"] = None
                info[node.node_start]["stream"] = True
                info[node.node_start]["execution_mode"] = "client"
            elif callable(node):
                name = node.__name__
                if name not in info:
                    info[name] = {}
                info[name]["schema"] = getattr(node, "args_schema", None)
                info[name]["stream"] = getattr(node, "stream", True)
                info[name]["execution_mode"] = getattr(node, "execution_mode", "client")
        return info

    @lru_cache(maxsize=128)
    def get_nodes_args_parser(self, node_name: str = None) -> dict:
        nodes = {}
        if not node_name:
            return nodes
        for node in self.nodes:
            if isinstance(node, BaseNodes):
                for node_info in node.get_nodes():
                    if node_info["name"] == node_name:
                        return node_info["args_parser"]
            elif callable(node) and node.__name__ == node_name:
                return node.args_parser if hasattr(node, "args_parser") else {}
        return nodes

    def _group_parallel_nodes(self, nodes_list: List[Callable]) -> List[Union[Callable, List[str]]]:
        if not nodes_list:
            return []

        result = []
        current_parallel_group = []

        for node in nodes_list:
            is_parallel = False

            if hasattr(node, '__self__'):
                is_parallel = hasattr(node.__self__, 'parallel') and node.__self__.parallel

            if is_parallel:
                current_parallel_group.append(node)
            else:
                if current_parallel_group:
                    if len(current_parallel_group) > 1:
                        interrupt_nodes = []
                        for parallel_node in current_parallel_group:
                            if hasattr(parallel_node, 'interrupt') and parallel_node.interrupt:
                                interrupt_nodes.append(SystemNodes().interrupt)

                        result.append(current_parallel_group)
                        result.extend(interrupt_nodes)
                    else:
                        result.append(current_parallel_group[0])

                    current_parallel_group = []

                result.append(node)

        if current_parallel_group:
            if len(current_parallel_group) > 1:
                interrupt_nodes = []
                for parallel_node in current_parallel_group:
                    if hasattr(parallel_node, 'interrupt') and parallel_node.interrupt:
                        interrupt_nodes.append(SystemNodes().interrupt)

                result.append(current_parallel_group)
                result.extend(interrupt_nodes)
            else:
                result.append(current_parallel_group[0])

        return result

    def add_nodes(self):
        self.workflow.add_node(self.node_start, self.start)

        def add_addition_node(node_name: str, node_func: Callable):
            if hasattr(node_func, 'interrupt') and node_func.interrupt:
                self.workflow.add_node(f"{self.graph_name}{node_name}_interrupt", SystemNodes().interrupt)

        for node_name, node_func in self.get_nodes().items():
            if isinstance(node_func, BaseGraph):
                node_func.workflow = self.workflow
                node_func.add_nodes()
            else:
                self.workflow.add_node(f"{self.graph_name}{node_name}", node_func)

        def add_node_group(nodes, prefix: str):
            if not nodes:
                return

            if isinstance(nodes, list):
                grouped_nodes = self._group_parallel_nodes(nodes)
                node_index = 0
                parallel_group_index = 0

                for group in grouped_nodes:
                    if isinstance(group, list):
                        # 为parallel组添加dispatcher节点
                        parallel_dispatcher_name = f"{self.graph_name}{prefix}_parallel_{parallel_group_index}"
                        self.workflow.add_node(parallel_dispatcher_name, SystemNodes().blank)

                        # 添加parallel组中的每个节点
                        for node in group:
                            node_name = f"{self.graph_name}{prefix}_{node_index}"
                            self.workflow.add_node(node_name, node)
                            node_index += 1

                        parallel_group_index += 1
                    else:
                        node_name = f"{self.graph_name}{prefix}_{node_index}"
                        self.workflow.add_node(node_name, group)
                        add_addition_node(f"{prefix}_{node_index}", group)
                        node_index += 1
            else:
                self.workflow.add_node(f"{self.graph_name}{prefix}_0", nodes)
                add_addition_node(f"{prefix}_0", nodes)

        add_node_group(self.prefix_node, "prefix")
        add_node_group(self.suffix_node, "suffix")

    def node_route(self, state: GraphState):
        return state.goto.type if state.goto.type else self.node_start

    def add_edges(self, skip_start_edge: bool = False):
        if not skip_start_edge:
            self.workflow.add_edge(self.graph_start, self.node_start)
        route_nodes = []
        for node_name, node_func in self.get_nodes().items():
            if isinstance(node_func, BaseGraph):
                node_func.graph_start = self.node_start
                if self.sub_graph_is_loop:
                    node_func.graph_end = self.node_start

                node_func.add_edges(skip_start_edge=True)
                route_nodes.append(node_func.node_start)
            else:
                self.workflow.add_edge(f"{self.graph_name}{node_name}", self.node_start)
                route_nodes.append(f"{self.graph_name}{node_name}")

        route_nodes.append(self.node_start)
        route_nodes.append(self.graph_end)

        def add_addition_edge(edge_start_node_name: str, edge_end_node_name: str, node_func: Callable):
            if hasattr(node_func, 'interrupt') and node_func.interrupt:
                self.workflow.add_edge(edge_start_node_name, f"{edge_start_node_name}_interrupt")
                self.workflow.add_edge(f"{edge_start_node_name}_interrupt", edge_end_node_name)
            else:
                self.workflow.add_edge(edge_start_node_name, edge_end_node_name)

        def add_node_edges(nodes, prefix: str, end_node: str):
            if not nodes:
                return

            if isinstance(nodes, list):
                grouped_nodes = self._group_parallel_nodes(nodes)
                node_index = 0
                parallel_group_index = 0
                first_node_name = None

                for group_idx, group in enumerate(grouped_nodes):
                    if isinstance(group, list):
                        # parallel组处理
                        parallel_dispatcher_name = f"{self.graph_name}{prefix}_parallel_{parallel_group_index}"

                        if not first_node_name:
                            first_node_name = parallel_dispatcher_name

                        # 收集parallel节点名称
                        parallel_node_names = []
                        for i, node in enumerate(group):
                            parallel_node_names.append(f"{self.graph_name}{prefix}_{node_index + i}")

                        self.workflow.add_conditional_edges(
                            parallel_dispatcher_name,
                            SystemNodes(parallel_nodes=parallel_node_names).parallel_execute,
                            parallel_node_names
                        )

                        # 确定parallel节点的下一个目标
                        next_target = None
                        if group_idx + 1 < len(grouped_nodes):
                            # 有下一个组
                            next_group = grouped_nodes[group_idx + 1]
                            if isinstance(next_group, list):
                                # 下一个是parallel组，连接到下一个parallel的dispatcher
                                next_target = f"{self.graph_name}{prefix}_parallel_{parallel_group_index + 1}"
                            else:
                                # 下一个是单个节点
                                next_target = f"{self.graph_name}{prefix}_{node_index + len(group)}"
                        else:
                            # 没有下一个组，连接到end_node
                            next_target = end_node

                        # 所有parallel节点都连接到下一个目标
                        for i, node in enumerate(group):
                            current_node_name = f"{self.graph_name}{prefix}_{node_index + i}"
                            self.workflow.add_edge(current_node_name, next_target)

                        node_index += len(group)
                        parallel_group_index += 1
                    else:
                        # 单个节点处理
                        current_node_name = f"{self.graph_name}{prefix}_{node_index}"

                        if not first_node_name:
                            first_node_name = current_node_name

                        # 确定单个节点的下一个目标
                        next_target = None
                        if group_idx + 1 < len(grouped_nodes):
                            # 有下一个组
                            next_group = grouped_nodes[group_idx + 1]
                            if isinstance(next_group, list):
                                # 下一个是parallel组，连接到下一个parallel的dispatcher
                                next_target = f"{self.graph_name}{prefix}_parallel_{parallel_group_index}"
                            else:
                                # 下一个是单个节点
                                next_target = f"{self.graph_name}{prefix}_{node_index + 1}"
                        else:
                            # 没有下一个组，连接到end_node
                            next_target = end_node

                        add_addition_edge(current_node_name, next_target, group)

                        node_index += 1

                # 设置prefix/suffix node name为第一个节点/组
                if first_node_name:
                    if prefix == "prefix":
                        self.prefix_node_name = first_node_name
                    elif prefix == "suffix":
                        self.suffix_node_name = first_node_name
                    route_nodes.append(first_node_name)
            else:
                route_nodes.append(node_name)
                add_addition_edge(node_name, end_node, nodes)

        add_node_edges(self.prefix_node, "prefix", self.node_start)
        add_node_edges(self.suffix_node, "suffix", self.graph_end)

        self.workflow.add_conditional_edges(self.node_start, self.node_route, route_nodes)

    def _wrap_user_input(self, content: str, **kwargs):
        max_length = self.llm_chat.max_model_len // 2
        if len(content) > max_length:
            front = max_length // 2
            back = max_length // 2
            content = content[:front] + "\n\n......(文件内容已完全生成，截取显示部分内容)......\n\n" + content[-back:]

        user_input = f"""<user_input>\n{content}\n</user_input>"""
        return user_input
        # return self._wrap_human_content(content=user_input, **kwargs)

    def _wrap_human_content(self, content: str, type: str = "text", **kwargs):
        human_content = {"text": content, "type": type} | {k: v for k, v in kwargs.items() if v is not None}
        return dict_to_json_str(human_content)

    def _wrap_environment_details(self, **kwargs):
        """
        Format environment details for inclusion in the prompt.
        """
        details = []
        cwd_dir = kwargs.get("cwd_dir", "")
        if "inscode_api_key" in kwargs:
            details.append(f"""INSCODE_SDK_API_KEY: {kwargs['inscode_api_key']}\n""")
        if "os_name" in kwargs:
            details.append(f"""Operating System: {kwargs['os_name']}""")
        if "os_shell" in kwargs:
            details.append(f"""Default Shell: {kwargs['os_shell']}""")
        if "home_dir" in kwargs:
            details.append(f"""Home Directory: {kwargs['home_dir']}""")
        if cwd_dir:
            details.append(f"""Current Working Directory: {cwd_dir}""")
        details.append(f"""# Current Time: {datetime.now().strftime('%Y/%m/%d %H:%M:%S')}""")
        if "visible_files" in kwargs:
            details.append(f"""#VSCode Visible Files: {kwargs['visible_files']}""")
        if "open_files" in kwargs:
            details.append(f"""#VSCode Open Files: {kwargs['open_files']}""")
        if "cwd_files" in kwargs:
            details.append(f"""
#Current Working Directory ({cwd_dir}) Files:
{kwargs['cwd_files']}
(File list truncated. Use list_files on specific subdirectories if you need to explore further.)""")
        if not details:
            return None
        details = "\n".join(details)
        return f"""
<environment_details>
{details}
</environment_details>
"""

    async def _exist_thread_id(self, thread_id: str) -> bool:
        """
        Check if the thread_id exists in memory to determine if this is a resume or new question.
        Returns True if thread_id exists (resume), False otherwise (new question).
        """
        key = f"thread_id${thread_id}"
        if isinstance(self.memory, InMemorySaver):
            return key in self.memory.storage
        else:
            return await self.memory.exists(key)

    async def _save_thread_id(self, thread_id: str):
        """Save thread_id to Redis for future resume checks."""
        key = f"thread_id${thread_id}"
        if isinstance(self.memory, InMemorySaver):
            self.memory.storage[key] = f"{datetime.now()}"
        else:
            await self.memory.set(key, f"{datetime.now()}", expire_seconds=6 * 60 * 60)

    def draw_graph_image(self, output_path: str = "graph_image.png"):
        try:
            if not self.graph:
                self.build()
            graph_image = self.graph.get_graph().draw_mermaid_png()
            with open(output_path, "wb") as f:
                f.write(graph_image)
        except Exception as e:
            logger.error(f"Failed to generate graph image: {e}")

    async def destroy(self):
        """
        Clean up all resources to prevent memory leaks.
        This method should be called when the graph is no longer needed.
        """
        try:
            if self.redis:
                try:
                    await self.redis.aclose()
                except Exception as e:
                    logger.warning(f"Error closing Redis connection: {e}")
                finally:
                    self.redis = None

            if hasattr(self.memory, 'close') and callable(getattr(self.memory, 'close')):
                try:
                    await self.memory.close()
                except Exception as e:
                    logger.warning(f"Error cleaning up memory saver: {e}")
            self.memory = None

            if self.llm_chat:
                await self.llm_chat.close()
                self.llm_chat = None

            if self.nodes:
                for node in self.nodes:
                    try:
                        if isinstance(node, BaseNodes) and hasattr(node, 'destroy'):
                            await node.destroy()
                        elif isinstance(node, BaseGraph) and hasattr(node, 'destroy'):
                            await node.destroy()
                    except Exception as e:
                        logger.warning(f"Error cleaning up node {getattr(node, '__name__', type(node).__name__)}: {e}")
                self.nodes = []

            self.graph = None
            self.workflow = None
            logger.info(f"Graph cleanup completed successfully: {self.graph_name}")
        except Exception as e:
            logger.error(f"Error during graph cleanup: {e}")
