"""
自定义回调处理器
"""
import asyncio
from pprint import pprint
from typing import Optional, Any
from uuid import UUID

from langchain_core.callbacks import AsyncCallbackHandler
from langchain_core.outputs import LLMResult
from langchain_core.prompts import ChatPromptTemplate
from models import get_ds_model_client

class AsyncCustomLoggingHandler(AsyncCallbackHandler):
    """Async callback handler that can be used to handle callbacks from langchain."""
    # 按Alt+Insert 选择重写方法就可以快速生成方法

    async def on_llm_start(self, serialized: dict[str, Any], prompts: list[str], *, run_id: UUID,
                           parent_run_id: Optional[UUID] = None, tags: Optional[list[str]] = None,
                           metadata: Optional[dict[str, Any]] = None, **kwargs: Any) -> None:
        """Run when chain starts running."""
        print("zzzz....")
        await asyncio.sleep(0.3)
        print("Hi! I just woke up. Your llm is starting")

    async def on_llm_end(self, response: LLMResult, *, run_id: UUID, parent_run_id: Optional[UUID] = None,
                         tags: Optional[list[str]] = None, **kwargs: Any) -> None:
        """Run when chain ends running."""
        print("zzzz....")
        await asyncio.sleep(0.3)
        print("Hi! I just woke up. Your llm is ending")

callbacks = [AsyncCustomLoggingHandler()]

async def call_llm():
    prompt = ChatPromptTemplate.from_template("What is 1 + {number}?")
    llm = get_ds_model_client()
    chain = prompt | llm.with_config(callbacks=callbacks)
    result = await chain.ainvoke({"number": "2"})
    pprint(result)
    return result

asyncio.run(call_llm())