from langgraph.graph import MessagesState
from typing import Any, Dict, Iterator, List, Optional
from langgraph.graph import StateGraph
from langchain_ollama import ChatOllama
from langchain_core.messages.ai import UsageMetadata
from langchain_core.messages import (
    AIMessage,
    AIMessageChunk,
    BaseMessage,
    HumanMessage,
)
from langchain_core.outputs import (
    ChatGeneration, 
    ChatGenerationChunk, 
    ChatResult
)
from langgraph.graph import START, END
import json

llm = ChatOllama(model="qwen3:8b", temperature=0.5, reasoning=False, base_url="http://127.0.0.1:11434/")

def call_llm(state: MessagesState) -> Iterator[ChatGenerationChunk]:
    """Process input and returns output.

    Can use runtime configuration to alter behavior.
    """
    # for chunk in llm.stream(state["messages"]):
    #     chunkChat = ChatGenerationChunk(
    #         message=AIMessageChunk(content=chunk.content)
    #     )
    #     yield chunkChat
    
    response = llm.invoke(state["messages"])
    return {"messages": [response]}

streamGraph = (
    StateGraph(MessagesState)
    .add_node(call_llm)
    .add_edge(START, "call_llm")
    .add_edge("call_llm", END)
    .compile(name="streaming-test")
)

class MessageEncoder(json.JSONEncoder):
    def default(self, obj):
        if isinstance(obj, BaseMessage):
            print(obj)
            # 将消息对象转换为可序列化的字典
            return {
                'id': obj.id,
                'type': obj.type,
                'content': obj.content,
                'additional_kwargs': obj.additional_kwargs
            }
        return super().default(obj)

for event in streamGraph.stream(
    {"messages": [HumanMessage(content="tell me a joke about cat")]},
    # stream_mode="messages",
    # stream_mode="updates",
    stream_mode="values",
):
    print("-------------------------------------------")
    # print(event)
    print(json.dumps(event, indent=2, ensure_ascii=False, cls=MessageEncoder))
    # msgChunk = event[0]
    # print(msgChunk.content)

