'''
langgraph agent流程
    使用 TavilySearchResults, create_react_agent

D:\code\other\LLMs\llm_py310
'''

## head
# smith
import os
os.environ["LANGSMITH_TRACING"] = "true"
# os.environ["LANGSMITH_API_KEY"] = smith_key
# llm client
# os.environ["OPENAI_API_KEY"] = keyx
from langchain.chat_models import init_chat_model
model = init_chat_model("gpt-4o-mini", model_provider="openai")



# Import relevant functionality
# from langchain_anthropic import ChatAnthropic

from langchain_core.messages import HumanMessage
from langgraph.checkpoint.memory import MemorySaver
from langgraph.prebuilt import create_react_agent

# tools
from langchain_community.tools.tavily_search import TavilySearchResults
# https://app.tavily.com/home?code=1rEW_C0Pcz5GtmdGISBHi_htKOGYKyqNsVRu5Btuu4Md8&state=eyJyZXR1cm5UbyI6Ii9ob21lIn0
# os.environ["TAVILY_API_KEY"] = tavily_key #
search = TavilySearchResults(max_results=2) # pip install langchain-community== 0.3.17
tools = [search]

# Create the agent
memory = MemorySaver()
# model = ChatAnthropic(model_name="claude-3-sonnet-20240229")
agent_executor = create_react_agent(model, tools, checkpointer=memory)

# Use the agent
config = {"configurable": {"thread_id": "abc123"}}
# for chunk in agent_executor.stream(
#     {"messages": [HumanMessage(content="你好,我是tom,居住在上海")]}, config
# ):
#     print(chunk)
#     print("----")
# print('xxx')
# for chunk in agent_executor.stream(
#     {"messages": [HumanMessage(content="我住的地方天气怎么样?")]}, config
# ):
#     print(chunk)
#     print("----")

res = agent_executor.invoke( {"messages": [HumanMessage(content="上海今天有什么新闻?")]}, config)
print(res)