import os
from langchain_openai import ChatOpenAI
from langchain_ollama import ChatOllama
from langchain_community.tools.tavily_search import TavilySearchResults
from langchain_core.messages import HumanMessage, SystemMessage
from langgraph.prebuilt import create_react_agent
from langgraph.checkpoint.memory import MemorySaver

# os.environ["TAVILY_API_KEY"] = "tvly-xxxx"

memory = MemorySaver()
model = ChatOllama(model="llama3.1")
search = TavilySearchResults(
    max_results=2,
)
tools = [search]
agent_executor = create_react_agent(model=model, tools=tools, checkpointer=memory)

# use the agent
#config = {"configurable": {"thread_id": "abc13"}}
#for chunk in agent_executor.stream(
#    {"messages": [
#        SystemMessage(content="请获取近一个月内的信息，并使用中文回答。"),
#        HumanMessage(content="嗨! 我是王灵，我生活在中国西安。")
#    ]},
#    config=config
#):
#    print(chunk)
#    print("----")


model = ChatOllama(model="llama3.1")
model_with_tools = model.bind_tools(tools=tools)

#response = model_with_tools.invoke([HumanMessage(content="Hi!")])
#print(f"ContentString: {response.content}")
#print(f"ToolCalls: {response.tool_calls}")


#response = model_with_tools.invoke([HumanMessage(content="What's the weather in SF?")])
#print(f"ContentString: {response.content}")
#print(f"ToolCalls: {response.tool_calls}")


agent_executor = create_react_agent(model, tools)

# response = agent_executor.invoke({
#    "messages": [HumanMessage(content="hi!")]
#})
#print(response["messages"])

#response = agent_executor.invoke({
#    "messages": [HumanMessage(content="What's the weather in sf?")]
#})
#print(response["messages"])

def test1():
    for event in agent_executor.astream_events(
        {"messages": [HumanMessage(content="What's the weather in sf")]}, version="v1"
    ):
        kind = event["event"]
        if kind == "on_chain_start":
            if event["name"] == "Agent":
                print(f"Start agent: {event['name']} with input: {event['data'].get('input')}")
        elif kind == "on_chain_end":
            if event["name"] == "Agent":
                print()
                print("--")
                print(f"Done agent: {event['name']} with output: {event['data'].get('output')['output']}")

        if kind == "on_chat_model_stream":
            content = event["data"]["chunk"].content
            if content:
                print(content, end="|")
        elif kind == "on_tool_start":
            print("--")
            print(f"Start tool: {event['name']} with input: {event['data'].get('input')}")
        elif kind == "on_tool_end":
            print(f"Done tool: {event['name']}")
            print(f"Tool output was: {event['data'].get('output')}")
            print("--")

test1()

