from dotenv import load_dotenv, find_dotenv
from langchain_community.chat_models import ChatZhipuAI
from langchain_core.messages import  HumanMessage
from langchain_core.callbacks.base import BaseCallbackHandler
from langchain_core.callbacks.streaming_stdout import (
    StreamingStdOutCallbackHandler
)

_ = load_dotenv(find_dotenv())

class CustomCallbackHandler1(BaseCallbackHandler):
    def on_llm_start(self, serialized, prompts, **kwargs):
        print(">> LLM started in CustomCallbackHandler1")

    def on_llm_end(self, response, **kwargs):
        print(">> LLM ended in CustomCallbackHandler1")

class CustomCallbackHandler2(BaseCallbackHandler):
    def on_llm_start(self, serialized, prompts, **kwargs):
        print(">> LLM started in CustomCallbackHandler2")

    def on_llm_end(self, response, **kwargs):
        print(">> LLM ended in CustomCallbackHandler2")

handlers = [CustomCallbackHandler1(),
            CustomCallbackHandler2(),
            StreamingStdOutCallbackHandler()]

streaming_chat = ChatZhipuAI(
    model="glm-4-plus",
    temperature=0.9,
    streaming=True
)

messages = [
    HumanMessage(content="请写一句关于春天的诗。"),
]

streaming_chat.invoke(messages, config={"callbacks": handlers})