from time import time
from typing import Any, Optional
from uuid import UUID

import dotenv
from langchain_core.callbacks import BaseCallbackHandler
from langchain_core.messages import BaseMessage
from langchain_core.outputs import LLMResult


dotenv.load_dotenv()

class LlmCallbackHandler(BaseCallbackHandler):
    def __init__(self):
        super().__init__()
        self.start_at: float = 0

    def on_chat_model_start(
        self,
        serialized: dict[str, Any],
        messages: list[list[BaseMessage]],
        *,
        run_id: UUID,
        parent_run_id: Optional[UUID] = None,
        tags: Optional[list[str]] = None,
        metadata: Optional[dict[str, Any]] = None,
        **kwargs: Any,
    ) -> Any:
        print("聊天模型开始了.....")
        print("serialized:", serialized)
        print("messages:", messages)
        self.start_at = time()

    def on_llm_end(
        self,
        response: LLMResult,
        *,
        run_id: UUID,
        parent_run_id: Optional[UUID] = None,
        **kwargs: Any,
    ) -> Any:
        end_at: float = time()
        print("聊天模型结束了.....")
        print("response", response)
        print("总耗时:", end_at - self.start_at)
