import asyncio
from uuid import uuid4

from dotenv import load_dotenv
from langchain_core.runnables import RunnableConfig
from langgraph.types import Command, interrupt

load_dotenv()

from agents import DEFAULT_AGENT, get_agent  # noqa: E402

agent = get_agent("test_interupt")


def main() -> None:
    # Input
    initial_input = {"messages": [{"role": "user", "content": "what's the weather in beijing?"}]}
    thread_id = uuid4()
    # Thread
    thread = RunnableConfig(configurable={"thread_id": "1234"})

    # # Run the graph until the first interruption
    for event in agent.stream(initial_input, thread, stream_mode="updates"):
        print(event)
        print("\n")

    print("Pending Executions!")
    print(agent.get_state(thread).next)

    for event in agent.stream(
        # provide value
        Command(resume={"action": "continue"}),
        thread,
        stream_mode="updates",
    ):
        print(event)
        print("\n")

if __name__ == "__main__":
    main()
