import asyncio
import uuid

from langchain_tavily import TavilySearch
from langgraph.checkpoint.memory import MemorySaver
from langchain.agents import create_agent

from app.chatmodel.qwen_qwq import chat_model, config


async def main():
    # Create the agent
    search = TavilySearch(tavily_api_key=config.get("tavily_search", "api_key"), max_results=2)
    agent_executor = create_agent(
        model=chat_model,
        tools=[search],
        checkpointer=MemorySaver()
    )

    # Use the agent
    thread_id = str(uuid.uuid4())
    while True:
        user_input = input("给 Agent 发送消息（输入‘quit’退出，输入‘new'开启新对话）: ")
        if user_input.lower() == "quit":
            break

        if user_input.lower() == "new":
            thread_id = str(uuid.uuid4())
            continue

        input_message = {
            "role": "user",
            "content": user_input,
        }
        for step in agent_executor.stream(
                {"messages": [input_message]}, {"configurable": {"thread_id": thread_id}}, stream_mode="values"
        ):
            step["messages"][-1].pretty_print()


if __name__ == "__main__":
    asyncio.run(main())
