# taavily_key="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"
import asyncio
import os

from langchain_community.tools import TavilySearchResults
from langchain_core.messages import HumanMessage
from langchain_openai import ChatOpenAI
from langgraph.checkpoint.memory import MemorySaver
from langgraph.prebuilt import create_react_agent

# export TAVILY_API_KEY="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"
os.environ["TAVILY_API_KEY"] = "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"
search = TavilySearchResults(max_results=10, taavily_key="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")
# search_results=search.invoke("what is the weather in SF")
#
# print(search_results)

tools = [search]

model = ChatOpenAI(
    api_key="sk-CftUbVSsA61lwwgMz9xvt6znTunQZfgBP8ZCVLbQsKfXUR6k",
    model='deepseek-ai/DeepSeek-V3',
    base_url="https://www.henapi.top/v1"
)
model_with_tools = model.bind_tools(tools)

# response=model_with_tools.invoke([HumanMessage(content="What's the weather in SF?")])
# print(f"ContentString: {response.content}")
# print(f"ToolCalls: {response.tool_calls}")

agent_executor = create_react_agent(model, tools)
# response=agent_executor.invoke({"messages":[HumanMessage(content="What's the weather in SF?")]})

# print(response["messages"])


# for chunk in agent_executor.stream(
#         {"messages": [HumanMessage(content="What's the weather in SF? answer in chinese")]}
# ):
#     print(chunk)
#     print("----")

# async def process_events():
#     async for event in agent_executor.astream_events(
#         {"messages": [HumanMessage(content="whats the weather in sf? answer in chinese")]}, version="v1"
#     ):
#         kind = event["event"]
#         if kind == "on_chain_start":
#             if (
#                 event["name"] == "Agent"
#             ):  # Was assigned when creating the agent with `.with_config({"run_name": "Agent"})`
#                 print(
#                     f"Starting agent: {event['name']} with input: {event['data'].get('input')}"
#                 )
#         elif kind == "on_chain_end":
#             if (
#                 event["name"] == "Agent"
#             ):  # Was assigned when creating the agent with `.with_config({"run_name": "Agent"})`
#                 print()
#                 print("--")
#                 print(
#                     f"Done agent: {event['name']} with output: {event['data'].get('output')['output']}"
#                 )
#         if kind == "on_chat_model_stream":
#             content = event["data"]["chunk"].content
#             if content:
#                 # Empty content in the context of OpenAI means
#                 # that the model is asking for a tool to be invoked.
#                 # So we only print non-empty content
#                 print(content, end="|")
#         elif kind == "on_tool_start":
#             print("--")
#             print(
#                 f"Starting tool: {event['name']} with inputs: {event['data'].get('input')}"
#             )
#         elif kind == "on_tool_end":
#             print(f"Done tool: {event['name']}")
#             print(f"Tool output was: {event['data'].get('output')}")
#             print("--")
#
# # 运行异步函数
# asyncio.run(process_events())


memory=MemorySaver()
agent_executor = create_react_agent(model, tools, checkpointer=memory)
config={"configurable":{"thread_id":"abc123"}}

for chunk in agent_executor.stream(
        {"messages":[HumanMessage(content="hi im bob")]},config
):
    print(chunk)
    print("----")

for chunk in agent_executor.stream(
    {"messages": [HumanMessage(content="whats my name?")]}, config
):
    print(chunk)
    print("----")