#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""

@Time   :2025/8/10 上午11:53
@Author :zengjiahao1989@gmail.com
@File   :1.回调功能使用.py
"""
import os
import time
from typing import Any, Optional
from uuid import UUID

import dotenv
from langchain_core.callbacks import StdOutCallbackHandler, BaseCallbackHandler
from langchain_core.messages import BaseMessage
from langchain_core.output_parsers import StrOutputParser
from langchain_core.outputs import LLMResult
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.runnables import RunnablePassthrough
from langchain_openai import ChatOpenAI

dotenv.load_dotenv()


class LLMOpsCallbackHandler(BaseCallbackHandler):
    """自定义LLMops回调处理器"""
    start_at: float = 0

    def on_chat_model_start(
            self,
            serialized: dict[str, Any],
            messages: list[list[BaseMessage]],
            *,
            run_id: UUID,
            parent_run_id: Optional[UUID] = None,
            tags: Optional[list[str]] = None,
            metadata: Optional[dict[str, Any]] = None,
            **kwargs: Any,
    ) -> Any:
        print("聊天模型开始执行")
        print('serialized', serialized)
        print('messages', messages)
        self.start_at = time.time()

    # def on_llm_new_token(
    #         self,
    #         token: str,
    #         *,
    #         chunk: Optional[Union[GenerationChunk, ChatGenerationChunk]] = None,
    #         run_id: UUID,
    #         parent_run_id: Optional[UUID] = None,
    #         **kwargs: Any,
    # ) -> Any:
    #     print("token", token)

    def on_llm_end(
            self,
            response: LLMResult,
            *,
            run_id: UUID,
            parent_run_id: Optional[UUID] = None,
            **kwargs: Any,
    ) -> Any:
        end_at: float = time.time()
        print("完整输出", response)
        print('程序消耗', end_at - self.start_at)


# 1.编排prompt
prompt = ChatPromptTemplate.from_template("{query}")

# 2.构建大语言模型
llm = ChatOpenAI(
    model_name="kimi-k2-0711-preview",
    openai_api_key=os.getenv("OPENAI_API_KEY"),
    openai_api_base=os.getenv("OPENAI_API_BASE"),
)

# 3.构建链
chain = {
            "query": RunnablePassthrough()
        } | prompt | llm | StrOutputParser()

# 4.调用链并执行 invoke方法会吧生成的全部输出，不会生成新的token
# content = chain.invoke(
#     "你好,你是谁？",
#     config={"callbacks": [StdOutCallbackHandler(), LLMOpsCallbackHandler()]}
# )

# print(content)

resp = chain.stream(
    "你好,你是谁？",
    config={"callbacks": [StdOutCallbackHandler(), LLMOpsCallbackHandler()]}
)

for chunk in resp:
    pass
