# -*- coding: utf-8 -*-
"""
@Time    : 2024/6/29 10:15 
@Author  : ZhangShenao 
@File    : llm_ops_callback_handler.py 
@Desc    : 自定义回调处理器
"""
import datetime
from typing import Dict, Any, List, Optional, Union
from uuid import UUID

from langchain_core.callbacks import BaseCallbackHandler
from langchain_core.outputs import GenerationChunk, ChatGenerationChunk


class LLMOpsCallbackHandler(BaseCallbackHandler):
    """
    自定义回调处理器,继承BaseCallbackHandler
    """

    def on_llm_start(self,
                     serialized: Dict[str, Any],
                     prompts: List[str],
                     **kwargs: Any) -> None:
        """llm运行开始回调"""

        # 打印llm信息
        print('llm run start.')
        print(f'serialized: {serialized}')
        print(f'prompts: {prompts}')

        # 记录起始运行时间
        self.__start_time = datetime.datetime.now().timestamp()

    def on_llm_end(self,
                   response: str,
                   **kwargs: Any) -> None:
        """llm运行结束回调"""

        # 打印llm运行结果
        print('llm run end.')
        print(f'response: {response}')

        # 打印llm运行总耗时
        time_costs = round(datetime.datetime.now().timestamp() - self.__start_time, 2)
        print(f'llm total costs: {time_costs}s')

    def on_llm_new_token(
            self,
            token: str,
            *,
            chunk: Optional[Union[GenerationChunk, ChatGenerationChunk]] = None,
            run_id: UUID,
            parent_run_id: Optional[UUID] = None,
            **kwargs: Any,
    ) -> Any:
        """生成新的token回调"""

        print(f'generate new token: {token}\n')
