import os

from httpx import ConnectTimeout
os.environ["DEEPSEEK_API_KEY"] = "sk-1a1c5c7e74664f97b9ae47a8e93f2620"
os.environ["TAVILY_API_KEY"] = "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"

from PIL import Image as PILImage
import io

from typing import Annotated

from langchain_deepseek import ChatDeepSeek
from langchain_tavily import TavilySearch
from typing_extensions import TypedDict

from pydantic import BaseModel
from langgraph.checkpoint.memory import MemorySaver
from langgraph.graph import StateGraph, START, END
from langgraph.graph.message import add_messages
from langgraph.prebuilt import ToolNode, tools_condition
from langchain_core.messages import AIMessage, ToolMessage


class State(TypedDict):
    messages: Annotated[list, add_messages]
    # 添加新的状态，用于判断是否需要人工干预
    ask_human: bool


class RequestAssistance(BaseModel):
    """将对话升级到专家。如果你无法直接提供帮助，或者用户需要超出你权限的支持，请使用此功能。
    使用此功能时，请传达用户的“请求”，以便专家提供正确的指导。
    """

    request: str

# 初始化图
graph_builder = StateGraph(State)

# 初始化工具
tool = TavilySearch(max_results=2)
tools = [tool, RequestAssistance]

# 初始化LLM 并绑定工具，使得 LLM 可以知道有哪些工具可以调用
llm =  ChatDeepSeek(model="deepseek-chat", temperature=1.0)
llm_with_tools = llm.bind_tools(tools)


def chatbot(state: State):
    response = llm_with_tools.invoke(state["messages"])
    ask_human = False

    # 如果工具调用是 RequestAssistance，则需要人工干预，将 ask_human 设置为 True
    if (
        response.tool_calls
        and response.tool_calls[0]["name"] == RequestAssistance.__name__
    ):
        ask_human = True
    return {"messages": [response], "ask_human": ask_human}


def human_node(state: State):
    print("------进入 human_node-------")
    new_messages = []
    if not isinstance(state["messages"][-1], ToolMessage):
        # 通常，用户将在中断期间更新状态。
        # 如果他们选择不这样做，我们将包含一个占位符 ToolMessage 以便于。
        # 让大型语言模型继续。
        new_messages.append(
            ToolMessage(content="No response from human.", tool_call_id=state["messages"][-1].tool_calls[0]["id"])
        )
    
    # 用户手动更新，检测包含 ToolMessage 的消息，直接返回，并将 ask_human 设为 false
    return {
        # 添加新消息。
        "messages": new_messages,
        # 取消标志设置
        "ask_human": False,
    }

# 添加节点
graph_builder.add_node("human", human_node)
graph_builder.add_node("chatbot", chatbot)
tool_node = ToolNode(tools=[tool])
graph_builder.add_node("tools", tool_node)

# 添加边
graph_builder.add_edge(START, "chatbot")
graph_builder.add_edge("human", "chatbot")
graph_builder.add_edge("tools", "chatbot")

# 添加条件控制函数，条件边
def select_next_node(state: State):
    if state["ask_human"]:
        return "human"
    # 否则，我们可以像以前一样进行路由。
    return tools_condition(state)

graph_builder.add_conditional_edges(
    "chatbot",
    select_next_node,
    {"human": "human", "tools": "tools", END: END},
)

# 添加记忆模块，用于保存状态
memory = MemorySaver()
graph = graph_builder.compile(
    checkpointer=memory,
    interrupt_before=["human"],
)

# png_data = graph.get_graph().draw_mermaid_png()
# img = PILImage.open(io.BytesIO(png_data))
# img.show()


# user_input = "I'm learning LangGraph. Could you do some research on it for me?"
user_input = "I need some expert guidance for building this AI agent. Could you request assistance for me?"
config = {"configurable": {"thread_id": "1"}}

events = graph.stream(
    {"messages": [("user", user_input)]}, config, stream_mode="values"
)
for event in events:
    if "messages" in event:
        event["messages"][-1].pretty_print()


# 读出当前图状态
snapshot = graph.get_state(config)
# print(snapshot.next)  # ('tools', )
existing_messages = snapshot.values["messages"][-1]
# print(existing_messages.tool_calls)


# 用户 input ——> chatbot node ——> 条件判断 ask_human  ——> 打断，人工添加 toolmessage ——> 输入 None 继续执行 ——> 进入 human node 
#           ——> chatbot node ——> 条件判断 ask_human  ——> END 
if existing_messages.tool_calls[0]["name"] == RequestAssistance.__name__:
    print("需要人工干预")
    human_response = (
    "We, the experts are here to help! We'd recommend you check out LangGraph to build your agent."
    " It's much more reliable and extensible than simple autonomous agents."
)
    human_messages = [
        ToolMessage(content=human_response, tool_call_id=existing_messages.tool_calls[0]["id"])     
    ]
    graph.update_state(config, {"messages": human_messages})
    continue_tools = True
    answer_bool = False
else:
    print("不需要人工干预")
    continue_tools = False
    answer_bool = False


if continue_tools:
    print("继续工具调用")
    # `None`将不会向当前状态添加任何新内容，使其继续运行，就像从未被中断过一样。
    events = graph.stream(None, config, stream_mode="values")  
    # 第一次加入 None 是恢复中断，第二次加入 None 是进入 human node 
    for i, event in enumerate(events):
        if "messages" in event:
            event["messages"][-1].pretty_print()
else:
    print("不继续工具调用")
    if answer_bool:
        # 直接告诉 agent 答案，而不进行工具调用
        answer = (
            "LangGraph is a library for building stateful, multi-actor applications with LLMs."
        )
        new_messages = [
            # The LLM API expects some ToolMessage to match its tool call. We'll satisfy that here.
            ToolMessage(content=answer, tool_call_id=existing_messages.tool_calls[0]["id"]),
            # And then directly "put words in the LLM's mouth" by populating its response.
            AIMessage(content=answer),
        ]

        new_messages[-1].pretty_print()
        graph.update_state(
            # Which state to update
            config,
            # The updated values to provide. The messages in our `State` are "append-only", meaning this will be appended
            # to the existing state. We will review how to update existing messages in the next section!
            {"messages": new_messages},
        )

        print("\n\nLast 2 messages;")
        print(graph.get_state(config).values["messages"][-2:])
    
    else:
        # 手动调整工具调用内容
        print("Original")
        print('content', existing_messages.content)
        print(existing_messages.tool_calls[0])
        print("Message ID", existing_messages.id)
        print(existing_messages.tool_calls[0])
        
        new_tool_call = existing_messages.tool_calls[0].copy()
        # 替换原先工具调用信息的 query，id 需要保持一致
        new_tool_call["args"]["query"] = "LangGraph human-in-the-loop workflow"
        
        new_message = AIMessage(
            content=existing_messages.content,
            tool_calls=[new_tool_call],
            # 重要！ID是LangGraph知道如何在状态中替换消息而不是追加这些消息的方式。
            id=existing_messages.id,
        )

        print("Updated")
        print(new_message.tool_calls[0])
        print("Message ID", new_message.id)
        graph.update_state(config, {"messages": [new_message]})

        print("\n\nTool calls")
        print(graph.get_state(config).values["messages"][-1].tool_calls)    

        print('恢复工具调用')
        events = graph.stream(None, config, stream_mode="values")
        for event in events:
            if "messages" in event:
                event["messages"][-1].pretty_print()

# replay 整个完整的状态历史
print('replay 整个完整的状态历史')
to_replay = None
for state in graph.get_state_history(config):
    print("Num Messages: ", len(state.values["messages"]), "Next: ", state.next)
    print("-" * 80)
    if len(state.values["messages"]) == 4:
        # 我们根据该聊天消息数量有些随意地选择一个 state。
        to_replay = state

# `to_replay.config`中的`checkpoint_id`对应于我们保存到检查点的一个状态。
for event in graph.stream(None, to_replay.config, stream_mode="values"):
    if "messages" in event:
        event["messages"][-1].pretty_print()