import asyncio
from typing import Any, Dict, List
from langchain.callbacks.base import AsyncCallbackHandler, BaseCallbackHandler
from langchain_core.messages import HumanMessage
from langchain_core.outputs import LLMResult
from langchain_openai import ChatOpenAI
from openaiConfigurations import openai_api_key, openai_api_base

# class MyCustomsyncHandler(BaseCallbackHandler):
#     def on_llm_new_token(self, token: str, **kwargs) -> None:
#         print(f"正在thread_pool_excuter中调用同步处理程序：token: {token}")

# class MyCustomAsyncHandler(AsyncCallbackHandler):
#     """处理来自langchian的异步回调处理程序"""
#     async def on_llm_start(self, serialized: Dict[str, Any], prompts: List[str], **kwargs: Any) -> None:
#         "链开始时运行"
#         print("zzzz....")
#         await asyncio.sleep(0.3)
#         # 拿到模型名称
#         class_name = serialized.get("name")
#         print("llm正在启动")

#     async def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None:
#         "链结束时运行"
#         print("zzzz....")
#         await asyncio.sleep(0.3)
#         print("llm结束")

# 如要启用流式传输，在ChatModel()构造函数中传入streaming = True
# 此外，还传入了一个包含自定义处理程序的列表
chat = ChatOpenAI(
    openai_api_key = openai_api_key,
    openai_api_base = openai_api_base,
    temperature = 0,
    streaming = True,
    # callbacks = [MyCustomsyncHandler(), MyCustomAsyncHandler()]
)

# async def main():
#     # 返回一个LLMResult对象,该对象有generations列表和llm_output字典
#     # llm_output：与LLM输出相关的额外信息，如令牌使用情况、模型名称等。
#     # generations：一个嵌套列表，每个内部列表对应一个输入的对话历史，
#     #   内部列表中的元素是Generation对象，包含了生成的文本和其他相关信息。
#     answer = await chat.agenerate([[HumanMessage(content = "告诉我北京的特产")]])
#     print(answer)

# asyncio.run(main())

# 更直观的可以参考下面的代码，理解异步请求如何减少网络io时间
class MyCustomAsyncHandler(AsyncCallbackHandler):
    """处理来自langchain的异步回调处理程序"""
    async def on_llm_start(self, serialized: Dict[str, Any], prompts: List[str], **kwargs: Any) -> None:
        """链开始时运行"""
        print(f"LLM 开始处理请求: {prompts}")

    async def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None:
        """链结束时运行"""
        print("LLM 结束处理请求")
        for generation in response.generations[0]:
            print(f"生成的文本: {generation.text}")

async def make_request(question: str):
    """异步发送单个请求"""
    messages = [[HumanMessage(content = question)]]
    callbacks = [MyCustomAsyncHandler()]
    result = await chat.agenerate(messages, callbacks = callbacks)
    return result

async def main():
    """主函数，异步处理多个请求"""
    questions = [
        "北京有哪些著名的景点？",
        "上海有什么特色美食？",
        "广州的交通状况如何？"
    ]
    tasks = [make_request(question) for question in questions]
    # 并发执行所有任务
    results = await asyncio.gather(*tasks)
    return results

if __name__ == "__main__":
    import time
    start_time = time.time()
    asyncio.run(main())
    end_time = time.time()
    print(f"总耗时: {end_time - start_time} 秒")