import time
from typing import Dict, Any, List, Optional
from uuid import UUID

import dotenv
from langchain_community.chat_models import ChatOpenAI
from langchain_core.callbacks import BaseCallbackHandler, StdOutCallbackHandler
from langchain_core.output_parsers import StrOutputParser
from langchain_core.outputs import LLMResult
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.runnables import RunnablePassthrough


class LLMOpsCallbackHandler(BaseCallbackHandler):
    start_at :float = 0.0
    # end_at : float
    def on_llm_start(
        self,
        serialized: Dict[str, Any],
        prompts: List[str],
        *,
        run_id: UUID,
        parent_run_id: Optional[UUID] = None,
        tags: Optional[List[str]] = None,
        metadata: Optional[Dict[str, Any]] = None,
        **kwargs: Any,
    ) -> Any:
        print("聊天模型开始了：")
        print(f"LLM serialized: {serialized}")
        print(f"LLM prompts: {prompts}")
        self.start_at = time.time()

    def on_llm_end(
        self,
        response: LLMResult,
        *,
        run_id: UUID,
        parent_run_id: Optional[UUID] = None,
        **kwargs: Any,
    ) -> Any:
        end_at = time.time()
        print(f"\n完成输出时间：{end_at}")
        print(f"\n完整输出时间：{end_at - self.start_at}")

dotenv.load_dotenv()
prompt = ChatPromptTemplate.from_template("{query}")
llm = ChatOpenAI(model="gpt-3.5-turbo-16k")
paraser = StrOutputParser()

# 创建链
chain = {"query":RunnablePassthrough()}|prompt|llm|paraser
# 运行链
rsp = chain.stream("请 介绍一下langchain 400字左右",
                   config={"callbacks":[StdOutCallbackHandler(),LLMOpsCallbackHandler()]})
# rsp 是一个列表 由于是stream 返回的 需要一个个输出
for chunk in rsp:
    print(chunk,end="",flush=True)