"""
@Author: Bright
@File: callback_run.py
@Time: 2025/9/29
@Desc: 【openai的调用开始，结束时】 封装一个日志处理类，继承BaseCallbackHandler
"""
import os
from typing import Any, Optional
from uuid import UUID

from dotenv import load_dotenv
from langchain_core.callbacks import BaseCallbackHandler
from langchain_core.messages import BaseMessage
from langchain_core.outputs import LLMResult
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.runnables import RunnableConfig
from langchain_openai import AzureChatOpenAI


# LoggingHandler继承了BaseCallbackHandler
class LoggingHandler(BaseCallbackHandler):

    # 聊天模型开始时，调用的方法
    def on_chat_model_start(
            self,
            serialized: dict[str, Any],
            messages: list[list[BaseMessage]],
            *,
            run_id: UUID,
            parent_run_id: Optional[UUID] = None,
            tags: Optional[list[str]] = None,
            metadata: Optional[dict[str, Any]] = None,
            **kwargs: Any,
    ) -> Any:
        print("chat model start")

    # 当LLM结束时调用的方法
    def on_llm_end(
            self,
            response: LLMResult,
            *,
            run_id: UUID,
            parent_run_id: Optional[UUID] = None,
            **kwargs: Any,
    ) -> Any:
        print(f"llm end response:{response}")

    def on_chain_start(
            self,
            serialized: dict[str, Any],
            inputs: dict[str, Any],
            *,
            run_id: UUID,
            parent_run_id: Optional[UUID] = None,
            tags: Optional[list[str]] = None,
            metadata: Optional[dict[str, Any]] = None,
            **kwargs: Any,
    ) -> Any:
        print(f"chain start<=== inputs:{inputs}")

    def on_chain_end(
            self,
            outputs: dict[str, Any],
            *,
            run_id: UUID,
            parent_run_id: Optional[UUID] = None,
            **kwargs: Any,
    ) -> Any:
        print(f"chain end===> outputs:{outputs}")


# 创建了一个包含LoggingHandler的实例的回调列表
callbacks = [LoggingHandler()]

# 实例话一个ChatOPenAI对象，使用gpt-4模型
load_dotenv()
llm = AzureChatOpenAI(
    api_key=os.getenv("AZURE_OPENAI_API_KEY"),
    azure_endpoint=os.getenv("AZURE_OPENAI_ENDPOINT"),
    azure_deployment=os.getenv("AZURE_OPENAI_DEPLOYMENT_NAME"),
    api_version=os.getenv("AZURE_OPENAI_API_VERSION"),
    temperature=0.7
)

# 创建一个聊天提示词模板，模板内容为 what is 1 + {number}?
prompt = ChatPromptTemplate.from_template("what is 1 + {number}?")

# 将提示词模板和llm组合成一个链
chain = prompt | llm

# 调用链的invoke方法，传入参数number为2
result = chain.invoke({"number": 2}, config=RunnableConfig(callbacks=callbacks))
print("---" * 50)
print("返回result和outputs是一样。", result.content)
