'''
* This is the projet for Brtc LlmOps Platform
* @Author Leon-liao <liaosiliang@alltman.com>
* @Description //TODO 
* @File: 8_study_use_callback_handler.py
* @Time: 2025/7/13
* @All Rights Reserve By Brtc
'''
import time
from typing import Any, Optional
from uuid import UUID

import dotenv
from langchain_community.chat_models import ChatOpenAI
from langchain_core.callbacks import BaseCallbackHandler, StdOutCallbackHandler
from langchain_core.output_parsers import StrOutputParser
from langchain_core.outputs import LLMResult
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.runnables import RunnablePassthrough

# 通过 回调函数 实现 埋点上报
class LLMOpsCallbackHandler(BaseCallbackHandler):
    start_at :float = 0.0
    def on_llm_start(
        self,
        serialized: dict[str, Any],
        prompts: list[str],
        *,
        run_id: UUID,
        parent_run_id: Optional[UUID] = None,
        tags: Optional[list[str]] = None,
        metadata: Optional[dict[str, Any]] = None,
        **kwargs: Any,
    ) -> Any:
        print("聊天模型开始了：") # 是不是可以  发送到网络端
        print(f"LLM serialized: {serialized}")
        print(f"LLM prompts: {prompts}")
        self.start_at = time.time()

    def on_llm_end(
        self,
        response: LLMResult,
        *,
        run_id: UUID,
        parent_run_id: Optional[UUID] = None,
        **kwargs: Any,
    ) -> Any:
        end_at = time.time()
        print(f"\n完成输出时间：{end_at}")# 发送到 网络端
        print(f"\n整个输出耗时:{end_at - self.start_at}")# 其他  业务是不是 也可以上报

dotenv.load_dotenv()
prompt = ChatPromptTemplate.from_template("{query}")
llm = ChatOpenAI(model="gpt-3.5-turbo-16k")
paraser = StrOutputParser()
# 创建链
chain = {"query":RunnablePassthrough()}|prompt|llm|paraser
#运行链
rsp = chain.stream("请介绍一下langchain  400 字左右",
                   config={"callbacks":[StdOutCallbackHandler(), LLMOpsCallbackHandler()]})

# rsp 返回的是 一个  列表  由于是  stream 返回的 需要 一个个 字答应， 需要遍历去访问
for chunk in rsp:
    print(chunk, end="", flush=True)
