from langchain_community.tools import TavilySearchResults
from langchain_core.messages import HumanMessage
from langchain_openai import ChatOpenAI
import os

from langgraph.checkpoint.memory import MemorySaver
from langgraph.prebuilt import create_react_agent

# 定义模型
llm = ChatOpenAI(model_name="gpt-4", base_url=os.environ["OPENAI_BASE_URL"])
# 创建记忆保存器
memory = MemorySaver()

# 创建搜索工具
search = TavilySearchResults(max_results=2)
# search_results = search.invoke("查询当前广州的天气")
# print("搜索的结果:", search_results)

tools = [search]

# 创建智能体
agent_executor = create_react_agent(llm, tools, checkpointer=memory)

# 执行智能体
config = {"configurable": {"thread_id": "abc123"}}
# response = agent_executor.invoke({"messages":[HumanMessage(content="查询当前广州的天气")]}, config=config)
# print("智能体的响应:", response)

#流式处理响应结果
# for chunk in agent_executor.stream({"messages":[HumanMessage(content="查询当前广州的天气")]}, config=config,stream_mode="values"):
#     chunk["messages"][-1].pretty_print()

#流式处理令牌
for chunk in agent_executor.stream({"messages":[HumanMessage(content="查询当前广州的天气")]}, config=config,stream_mode="values"):
    chunk["messages"][-1].pretty_print()