import asyncio
from typing import Any, Dict, List, Union

from langchain.schema import LLMResult, HumanMessage
from langchain_core.messages import BaseMessage
from langchain.callbacks.base import AsyncCallbackHandler, BaseCallbackHandler
from langchain.schema import AgentAction
import os, json

class MyCustomHandlerOne(BaseCallbackHandler):
    def on_llm_start(
        self, serialized: Dict[str, Any], prompts: List[str], **kwargs: Any
    ) -> Any:
        print(f"on_llm_start {serialized['name']}, prompts: {prompts}")

    def on_llm_new_token(self, token: str, **kwargs: Any) -> Any:
        print(f"on_new_token {token}")

    def on_llm_error(
        self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any
    ) -> Any:
        """当LLM出错时运行。"""
        print(f"on_llm_error {error}")


    def on_chain_start(
        self, serialized: Dict[str, Any], inputs: Dict[str, Any], **kwargs: Any
    ) -> Any:
        print(f"on_chain_start {serialized['name']}, inputs: {inputs}")

    def on_tool_start(
        self, serialized: Dict[str, Any], input_str: str, **kwargs: Any
    ) -> Any:
        print(f"on_tool_start {serialized['name']}, input_str: {input_str}")

    def on_agent_action(self, action: AgentAction, **kwargs: Any) -> Any:
        print(f"on_agent_action {action}")


class MyCustomHandlerTwo(BaseCallbackHandler):
    def on_llm_start(
        self, serialized: Dict[str, Any], prompts: List[str], **kwargs: Any
    ) -> Any:
        print(f"\n\n")
        print(f"on_llm_start (CallbackHandler2) {serialized['name']}.................................")

class MyCustomAsyncHandler(AsyncCallbackHandler):
    """用于处理来自langchain的回调的异步回调处理程序。"""

    async def on_llm_start(
        self, serialized: Dict[str, Any], prompts: List[str], **kwargs: Any
    ) -> None:
        """当链开始运行时运行。"""
        print("zzzz....")
        await asyncio.sleep(0.3)
        class_name = serialized["name"]
        print(f"开始访问LLM, prompts[{len(prompts)}]")
        # 逐个打印prompts数组元素
        for i, prompt in enumerate(prompts):
            print(f"prompt[{i}]: {prompt}")

    async def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None:
        """当链结束运行时运行。"""
        print("zzzz....")
        await asyncio.sleep(0.3)
        print(f"结束访问LLM, response: {type(response)}, response.generations[{len(response.generations)}]")
        # 逐个打印response数组元素
        for i, generation in enumerate(response.generations[0]):
            # json_str = json.dumps(generation, indent=4, ensure_ascii=False)
            # 依次打印generation对象
            print(f"generation[{i}]")
            print(f"text: {generation.text}")
            json_str = json.dumps(generation.generation_info, indent=4, ensure_ascii=False)
            print(f"generation_info: {json_str}")
    
    async def on_llm_error(
        self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any
    ) -> Any:
        """当LLM出错时运行。"""
        print(f"\non_llm_error {error}")

    async def on_chain_start(
        self, serialized: Dict[str, Any], inputs: Dict[str, Any], **kwargs: Any
    ) -> Any:
    	# 判断serialized['name']是否存在？
        if "name" in serialized:
            print(f"\non_chain_start {serialized['name']}, inputs: {inputs}")
        elif "type" in serialized:
            print(f"\non_chain_start type: {serialized['type']}, inputs: {inputs}")
        else:
            print(f"\non_chain_start serialized: {serialized}, inputs: {inputs}")

    async def on_tool_start(
        self, serialized: Dict[str, Any], input_str: str, **kwargs: Any
    ) -> Any:
        print(f"\non_tool_start {serialized['name']}, input_str: {input_str}")

    async def on_agent_action(self, action: AgentAction, **kwargs: Any) -> Any:
        print(f"\non_agent_action {action}")

    async def on_chat_model_start(
        self, serialized: Dict[str, Any], messages: List[List[BaseMessage]], **kwargs
    ) -> None:
        print(f"\non_chat_model_start {serialized['name']}, messages: {messages}")

def test_async_callback_handler_openai():
    from langchain_openai import OpenAI
    from langchain.agents import AgentType, initialize_agent
    from langchain_community.agent_toolkits.load_tools import load_tools

    # 实例化处理程序
    handler1 = MyCustomHandlerOne()
    handler2 = MyCustomHandlerTwo()

    handler3 = MyCustomAsyncHandler()

    # 设置代理。只有`llm`会为handler2发出回调
    llm = OpenAI(temperature=0, streaming=True, callbacks=[handler2])
    tools = load_tools(["llm-math"], llm=llm)
    agent = initialize_agent(tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION)

    # handler1的回调将由参与代理执行的每个对象发出（llm、llmchain、tool、agent executor）
    # agent.run("2的0.235次方是多少？", callbacks=[handler3])
    result = agent.run("What is 2 to the power of 0.235?", callbacks=[handler3])
    print(result)

def test_async_callback_handler_tongyi():
    from langchain_community.llms.tongyi import Tongyi
    from langchain.agents import AgentType, initialize_agent
    from langchain_community.agent_toolkits.load_tools import load_tools

    # 实例化处理程序
    handler1 = MyCustomHandlerOne()
    handler2 = MyCustomHandlerTwo()

    handler3 = MyCustomAsyncHandler()

    # 设置代理。只有`llm`会为handler2发出回调
    llm = Tongyi(temperature=0, streaming=True, callbacks=[handler2])
    tools = load_tools(["llm-math"], llm=llm)
    agent = initialize_agent(tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION)

    # handler1的回调将由参与代理执行的每个对象发出（llm、llmchain、tool、agent executor）
    # agent.run("What is 2 to the power of 0.235?", callbacks=[handler3])
    result = agent.run("2的0.235次方是多少？保留4位小数", callbacks=[handler3])
    print(f"最终结果: {result}")

if __name__ == "__main__":
    from dotenv import load_dotenv; load_dotenv()
    test_async_callback_handler_tongyi()
