"""
自定义回调处理器
"""
from pprint import pprint
from typing import Optional, Any
from uuid import UUID

from langchain_core.callbacks import BaseCallbackHandler
from langchain_core.messages import BaseMessage
from langchain_core.outputs import LLMResult
from langchain_core.prompts import ChatPromptTemplate
from models import get_ds_model_client

class CustomLoggingHandler(BaseCallbackHandler):
    # 按Alt+Insert 选择重写方法就可以快速生成方法
    def on_chat_model_start(self, serialized: dict[str, Any], messages: list[list[BaseMessage]], *, run_id: UUID,
                            parent_run_id: Optional[UUID] = None, tags: Optional[list[str]] = None,
                            metadata: Optional[dict[str, Any]] = None, **kwargs: Any) -> Any:
        print("Chat model started")

    def on_llm_end(self, response: LLMResult, *, run_id: UUID, parent_run_id: Optional[UUID] = None,
                   **kwargs: Any) -> Any:
        print(f"Chat model ended, response: {response}")

    def on_chain_start(self, serialized: dict[str, Any], inputs: dict[str, Any], *, run_id: UUID,
                       parent_run_id: Optional[UUID] = None, tags: Optional[list[str]] = None,
                       metadata: Optional[dict[str, Any]] = None, **kwargs: Any) -> Any:
        print(f"Chain {serialized.get('name')} started")

    def on_chain_end(self, outputs: dict[str, Any], *, run_id: UUID, parent_run_id: Optional[UUID] = None,
                     **kwargs: Any) -> Any:
        print(f"Chain ended, outputs: {outputs}")

callbacks = [CustomLoggingHandler()]

prompt = ChatPromptTemplate.from_template("What is 1 + {number}?")
llm = get_ds_model_client()

# 方式1：配置RunnableConfig的callbacks
chain = prompt | llm
result = chain.invoke({"number": "2"}, config={"callbacks": callbacks})

# 方式2：使用with_config方法
# chain = prompt | llm.with_config(callbacks=callbacks)
# result = chain.invoke({"number": "2"})

pprint(result)
