
from langchain_core.messages import AIMessage
from langchain_core.runnables import RunnableConfig
from langgraph.graph import StateGraph, MessagesState, START, END

from run_custom_chat_model import ChatParrotLink
llm = ChatParrotLink(parrot_buffer_length=3, model="my_custom_model")

def call_model(state: MessagesState):
    messages = state["messages"]
    response = llm.invoke(messages)
    # for chunk in model.stream("cat"):
#     print(chunk.content, end="|")
    return {"messages": [response]}

builder = StateGraph(MessagesState)

builder.add_node("call_model", call_model)
builder.add_edge(START, "call_model")
builder.add_edge("call_model", END)
graph = builder.compile()

print("+++++++++++++++++++++++ non-streaming +++++++++++++++++++++++++")
# non-streaming
result = graph.invoke(
    input={"messages": [{"role": "user", "content": "what's the weather in sf?"}]},
    config={"configurable": {"user_id": "user_7798"}}
)
print(result)

print("+++++++++++++++++++++++ streaming +++++++++++++++++++++++++")
# streaming
for event in graph.stream(
    {"messages": [{"role": "user", "content": "what's the weather in sf?"}]},
    stream_mode="messages"
    ):
    print("-------------------------------------------")
    # print(event)
    # print(event[0])
    print(event[0].content)
