'''

langgraph
chatbot+search
'''

import keys
import traceback
from typing import Annotated
# from langchain_anthropic import ChatAnthropic
from typing_extensions import TypedDict
from langgraph.graph import StateGraph
from langgraph.graph.message import add_messages
class State(TypedDict): # 字典
    messages: Annotated[list, add_messages]
    # name: str
    # birthday: str
    is_interrupt: bool
graph_builder = StateGraph(State)

# smith
# # https://smith.langchain.com/o/89358f56-ecf9-4b82-87a8-d098d7d16813/settings
import os
os.environ["LANGSMITH_TRACING"] = "true"
# os.environ["LANGSMITH_API_KEY"] = smith_key

# # llm client
# import os
# os.environ["OPENAI_API_KEY"] = keyx
# from langchain.chat_models import init_chat_model
# llm = init_chat_model("gpt-4o-mini", model_provider="openai")

# llm client qwen
from langchain_community.chat_models.tongyi import ChatTongyi
llm = ChatTongyi(streaming=False,) # qwen-turbo

# # llm client Custom_qwen
# from lc_infer import Custom_Langchain_ChatLLM
# model_path = r'/home/ps/zhangxiancai/llm_deploy/bigfiles/models/Qwen2.5-14B-Instruct'
# llm = Custom_Langchain_ChatLLM(mode_name_or_path=model_path) # qwen-14b

# tool1
from langchain_community.tools.tavily_search import TavilySearchResults
# https://app.tavily.com/home?code=1rEW_C0Pcz5GtmdGISBHi_htKOGYKyqNsVRu5Btuu4Md8&state=eyJyZXR1cm5UbyI6Ii9ob21lIn0
# os.environ["TAVILY_API_KEY"] = tavily_key #
search = TavilySearchResults(max_results=2) # pip install langchain-community== 0.3.17
# tool2
from langgraph.types import Command, interrupt
from langchain_core.tools import tool
@tool
def human_assistance(query: str) -> str:
    """Request assistance from a human."""
    print('执行human_assistance前 ')
    human_response = interrupt({"query": query}) # 中断，退出graph stream
    print('执行human_assistance后 ')
    print(human_response)
    return human_response["data"]

from langchain_core.messages import ToolMessage
from langchain_core.tools import InjectedToolCallId, tool

from langgraph.types import Command, interrupt

# tool3
@tool
# Note that because we are generating a ToolMessage for a state update, we
# generally require the ID of the corresponding tool call. We can use
# LangChain's InjectedToolCallId to signal that this argument should not
# be revealed to the model in the tool's schema.
def human_assistance_tool3(
    name: str, birthday: str,
) -> str: #  tool_call_id: Annotated[str, InjectedToolCallId]
    """Request assistance from a human."""
    print('human_assistance_tool3前')
    Command(update={'is_interrupt':True})
    human_response = interrupt( # 调用人类
        {
            "question": "Is this correct?",
            "name": name,
            "birthday": birthday,
        },
    )
    #...
    print('human_assistance_tool3后')
    Command(update={'is_interrupt': False})
    # # 处理人类反馈结果
    # # If the information is correct, update the state as-is.
    # if human_response.get("correct", "").lower().startswith("y"):
    #     verified_name = name
    #     verified_birthday = birthday
    #     response = "Correct"
    # # Otherwise, receive information from the human reviewer.
    # else:
    #     verified_name = human_response.get("name", name)
    #     verified_birthday = human_response.get("birthday", birthday)
    #     response = f"Made a correction: {human_response}"

    # This time we explicitly update the state with a ToolMessage inside
    # the tool.
    # state_update = {
    #     "name": verified_name,
    #     "birthday": verified_birthday,
    #     "messages": [ToolMessage(response, tool_call_id=tool_call_id)],
    # }
    # We return a Command object in the tool to update our state.
    # return Command(update=state_update) # 更新总状态字段???
    return human_response['data']








# from lc_tools import open_pycharm
# tools = [search, human_assistance_tool3,open_pycharm]

from lc_tools import get_lc_rag_tool
lc_rag_tool = get_lc_rag_tool()
tools = [lc_rag_tool]


# T 节点, 1节点对应n个invoke
llm_with_tools = llm.bind_tools(tools) # tool_schame作为参数传递给llm
def chatbot(state: State):
    message = llm_with_tools.invoke(state["messages"])
    assert len(message.tool_calls) <= 1
    return {"messages": [message]}
    # return message


# A 节点
import json
from langchain_core.messages import ToolMessage
class BasicToolNode: #
    """A node that runs the tools requested in the last AIMessage."""
    '''接收最后一个message,其中的执行tool_calls'''
    def __init__(self, tools: list) -> None:
        self.tools_by_name = {tool.name: tool for tool in tools}

    def __call__(self, inputs: dict):
        if messages := inputs.get("messages", []):
            message = messages[-1]
        else:
            raise ValueError("No message found in input")
        outputs = []
        for tool_call in message.tool_calls: #
            tool_result = self.tools_by_name[tool_call["name"]].invoke(
                tool_call["args"]
            ) # messages[-1].tool_calls
            outputs.append(
                ToolMessage(
                    # json.dumps(tool_result, ensure_ascii=False).encode('utf-8'), #
                    tool_result,
                    name=tool_call["name"],
                    tool_call_id=tool_call["id"],
                )
            )
        return {"messages": outputs}
tool_node = BasicToolNode(tools=tools) # tools包装成node


# 有条件边: T之后,根据结果确定执行tool还是end
def route_tools(
    state: State,
):
    """
    Use in the conditional_edge to route to the ToolNode if the last message
    has tool calls. Otherwise, route to the end.
    """
    if isinstance(state, list):
        ai_message = state[-1]
    elif messages := state.get("messages", []):
        ai_message = messages[-1]
    else:
        raise ValueError(f"No messages found in input state to tool_edge: {state}")
    if hasattr(ai_message, "tool_calls") and len(ai_message.tool_calls) > 0:
        return "tools"
    return END



# 构建图
# The first argument is the unique node name
# The second argument is the function or object that will be called whenever
# the node is used.
from langgraph.graph import START,END
graph_builder.add_node("chatbot", chatbot) # T
graph_builder.set_entry_point("chatbot")
graph_builder.set_finish_point("chatbot")

graph_builder.add_node("tools", tool_node) # A
graph_builder.add_conditional_edges(
    "chatbot", # 入node
    route_tools,
    {"tools": "tools", END: END}, # 出 node
) # 边
# graph_builder.add_conditional_edges(
#     "chatbot",
#     tools_condition,
# )

# Any time a tool is called, we return to the chatbot to decide the next step
graph_builder.add_edge("tools", "chatbot")
graph_builder.add_edge(START, "chatbot")

from langgraph.checkpoint.memory import MemorySaver
memory = MemorySaver() # 也可以是SqliteSaver 用于exe_id
graph = graph_builder.compile(checkpointer=memory)
resx = {'messages':[]}


# 交互逻辑 loop
def stream_graph_updates(user_input: str):
    exe_id = {"configurable": {"thread_id": "1"}} # 用于多次graph时保存值; 节点的输入输出会追加到下一次输入上

    snapshot = graph.get_state(exe_id)
    state_dict = snapshot.values
    # if state_dict.get('is_interrupt') and state_dict['is_interrupt']: # 判断graph中断中; 如何访问res
    if user_input[:6]=='expert':# 判断graph中断中?
    # if resx.get('messages') and resx['messages'][-1].type=='ai' and resx['messages'][-1].tool_calls[-1]['name']=='human_assistance':
        # human_response = (
        #     "We, the experts are here to help! We'd recommend you check out LangGraph to build your agent."
        #     " It's much more reliable and extensible than simple autonomous agents."
        # )
        human_response = user_input
        human_command = Command(resume={"data": human_response})

        # events = graph.stream(human_command, exe_id, stream_mode="values")
        res = graph.invoke(human_command, exe_id, stream_mode="values")
        # for event in events:
        #     if "messages" in event:
        #         event["messages"][-1].pretty_print()
    else:
        events = graph.stream({"messages": [{"role": "user", "content": user_input}]},exe_id,stream_mode='messages') # 执行graph,返回生成器 依次输出graph的每个节点的输出
        for message_chunk, metadata in events:  # stream_mode='messages'; AIMessageChunk/ToolMessage, {}
            if isinstance(message_chunk,ToolMessage):# .content  [{},{}]
                continue
            if message_chunk.content: # str
                print(message_chunk.content + '|||')


        # res = graph.invoke({"messages": [{"role": "user", "content": user_input}]}, exe_id)
    # for eind, event in enumerate(events):
    #     print(f'event {eind}')
    #     for vind, value in enumerate(event.values()): # AddableUpdatesDict 单个节点输出
    #         print(f'value{vind} {value}')
    #         # print("Assistant:", value["messages"][-1].pretty_print())
    # print(res)

    # for k,v in res.items():
    #     print(k) # name
    #     # if isinstance(v,list):
    #     if k == 'messages':
    #         for t in v:
    #             print(f'\ttype={t.type} ',end='')
    #             print(t)
    #     else:
    #         print('\t', end='')
    #         print(v)
    # resx['messages'] = res['messages']



while True:
    try:
        user_input = input("User: ")
        if user_input.lower() in ["quit", "exit", "q"]:
            print("Goodbye!")
            break


        stream_graph_updates(user_input) #
    except:
        # # fallback if input() is not available
        # user_input = "What do you know about LangGraph?"
        # print("User: " + user_input)
        # stream_graph_updates(user_input)
        print(traceback.format_exc())
        break

# user_input = "I need some expert guidance for building an AI agent. Could you request assistance for me?"
# config = {"configurable": {"thread_id": "1"}}
#
# events = graph.stream(
#     {"messages": [{"role": "user", "content": user_input}]},
#     config,
#     stream_mode="values",
# )
# for event in events:
#     if "messages" in event:
#         event["messages"][-1].pretty_print()

# 可以使用哪些工具
# 上海今天天气
# 请求人类提供帮助 如何搭建agent
# # 请求人类提供帮助 LangGraph何时发布
# 打开pycharm编程软件

# "Can you look up when LangGraph was released?  When you have the answer, use the human_assistance tool for review."