from langchain_community.chat_models import ChatTongyi
from langchain_community.tools.tavily_search import TavilySearchResults
from langchain_core.messages import HumanMessage
from langchain_core.runnables import RunnableConfig
from langgraph.checkpoint.memory import MemorySaver
from langgraph.prebuilt import create_react_agent
from pydantic import SecretStr

search = TavilySearchResults(max_results=1,tavily_api_key="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")
# search_results = search.invoke("北京的天气怎么样?")
# print(search_results)
# If we want, we can create other tools.
# Once we have all the tools we want, we can put them in a list that we will reference later.
tools = [search]


chatLLM = ChatTongyi(
    model="qwen-plus-2025-04-28",   # 此处以qwen-max为例，您可按需更换模型名称。模型列表：https://help.aliyun.com/zh/model-studio/getting-started/models
    streaming=True,
    api_key = SecretStr("sk-d16b46d66abb45bb960bd9c57804e2f9"),
    # other params...
)
model_with_tools = chatLLM.bind_tools(tools)

# response = model_with_tools.invoke([HumanMessage(content="你好!")])
#
# print(f"ContentString: {response.content}")
# print(f"ToolCalls: {response.tool_calls}")
#
# print("-----------------------")
#
#
# response = model_with_tools.invoke([HumanMessage(content="北京天气怎么样?")])
# print(f"ContentString: {response.content}")
# print(f"ToolCalls: {response.tool_calls}")
#
# print("-----------使用agent调用------------")
memory = MemorySaver()
agent_executor = create_react_agent(chatLLM, tools, checkpointer=memory)
# response = agent_executor.invoke({"messages": [HumanMessage(content="你好!")]})
# print(response)
# print("-----------------------")
# response = agent_executor.invoke(
#     {"messages": [HumanMessage(content="北京天气怎么样?")]}
# )
# print(response)


#
# for step,metadata in agent_executor.stream(
#     {"messages": [HumanMessage(content="北京天气怎么样?")]},
#     # stream_mode="values",
#     stream_mode="messages",
# ):
#     # step["messages"][-1].pretty_print()
#     if metadata["langgraph_node"] == "agent" and (text := step.text()):
#         print(text, end="|")


config:RunnableConfig = {"configurable": {"thread_id": "abc123"}}
for chunk in agent_executor.stream(
    {"messages": [HumanMessage(content="你好，我是冬阳")]}, config
):
    print(chunk)
    print("----")

print("==============")
for chunk in agent_executor.stream(
    {"messages": [HumanMessage(content="我叫什么名字？")]}, config
):
    print(chunk)
    print("----")