from typing import Dict, Any, List
import os
from langchain_community.chat_models import ChatTongyi
from langchain_core.callbacks import BaseCallbackHandler
from langchain_core.messages import BaseMessage
from langchain_core.outputs import LLMResult
from langchain_core.prompts import ChatPromptTemplate

from langChain.config import model, embedding


# 回调可以配置在请求方法（invoke等方法）中，也可以配置在LLM实例中。

# # 定义一个日志处理器类，继承自BaseCallbackHandler
# class LoggingHandler(BaseCallbackHandler):
#     # 当聊天模型开始时调用的方法
#     def on_chat_model_start(
#             self, serialized: Dict[str, Any], messages: List[List[BaseMessage]], **kwargs
#     ) -> None:
#         print("===> Chat model startted")  # 打印“Chat model started”
#
#     # 当LLM结束时调用的方法
#     def on_llm_end(self, response: LLMResult, **kwargs) -> None:
#         print(f"===> Chat model ended, response: {response}")  # 打印“Chat model ended, response: {response}”
#
#     # 当链开始时调用的方法
#     def on_chain_start(
#             self, serialized: Dict[str, Any], inputs: Dict[str, Any], **kwargs
#     ) -> None:
#         print(f"===> Chain started, inputs:{inputs}")  # 打印“Chain {serialized.get('name')} started”
#
#     # 当链结束时调用的方法
#     def on_chain_end(self, outputs: Dict[str, Any], **kwargs) -> None:
#         print(f"===> Chain ended, outputs: {outputs}")  # 打印“Chain ended, outputs: {outputs}”
#
#
# # 创建一个包含LoggingHandler实例的回调列表
# callbacks = [LoggingHandler()]
#
# # 创建一个聊天提示模板，模板内容为“What is 1 + {number}?”
# prompt = ChatPromptTemplate.from_template("What is 1 + {number}?")
#
# # 将提示模板和LLM组合成一个链
# chain = prompt | model
# # 1. 配置在请求方法（invoke等方法）中
# # 调用链的invoke方法，传入参数number为"2"，并配置回调
# chain.invoke({"number": "2"}, config={"callbacks": callbacks})


class MyCustomHandler(BaseCallbackHandler):
    def on_llm_new_token(self, token: str, **kwargs) -> None:
        print(f"===> My custom handler, token: {token}")


# 2. 配置在LLM实例中
model = ChatTongyi(
    model_name="qwen-max",  # 可选：qwen-plus、qwen-max、qwen-omni-turbo、qwen2.5-omni-7b
    temperature=0.7,  # 控制生成多样性
    api_key=os.getenv("DASHSCOPE_API_KEY"),
    streaming=True,  # 启用流式输出
    callbacks=[MyCustomHandler()]
)

prompt = ChatPromptTemplate.from_messages(["给我讲个关于{animal}的笑话，限制20个字"])
chain = prompt | model
response_1 = chain.invoke({"animal": "猫"})
print('猫的笑话 ： ', response_1.content)
response_2 = chain.invoke({"animal": "狗"})
print('狗的笑话 ： ', response_2.content)
