# -*- coding: utf-8 -*-
"""
@Time    : 2024/7/18 19:15 
@Author  : ZhangShenao 
@File    : 4.自定义回调.py 
@Desc    : 通过继承BaseCallbackHandler和AsyncCallbackHandler,实现自定义的同步和异步回调策略
"""
import asyncio
import os
from typing import Optional, Union, Any, Dict, List
from uuid import UUID

import dotenv
from langchain_core.callbacks import BaseCallbackHandler, AsyncCallbackHandler
from langchain_core.messages import HumanMessage
from langchain_core.outputs import GenerationChunk, ChatGenerationChunk, LLMResult
from langchain_openai import ChatOpenAI


class FlowerShopSyncCallbackHandler(BaseCallbackHandler):
    """自定义同步回调策略"""

    def on_llm_new_token(
            self,
            token: str,
            *,
            chunk: Optional[Union[GenerationChunk, ChatGenerationChunk]] = None,
            run_id: UUID,
            parent_run_id: Optional[UUID] = None,
            **kwargs: Any,
    ) -> Any:
        """生成新token回调"""

        print(f'获取花卉数据token: {token}')


class FlowerShopAsyncCallbackHandler(AsyncCallbackHandler):
    """自定义异步回调策略"""

    async def on_llm_start(
            self,
            serialized: Dict[str, Any],
            input_str: str,
            *,
            run_id: UUID,
            parent_run_id: Optional[UUID] = None,
            tags: Optional[List[str]] = None,
            metadata: Optional[Dict[str, Any]] = None,
            inputs: Optional[Dict[str, Any]] = None,
            **kwargs: Any,
    ) -> None:
        """LLM启动时回调"""

        print('正在获取花卉数据...')

        # 模拟异步操作
        await asyncio.sleep(0.5)

        print('花卉数据获取完毕，开始提供建议...')

    async def on_llm_end(
            self,
            response: LLMResult,
            *,
            run_id: UUID,
            parent_run_id: Optional[UUID] = None,
            tags: Optional[List[str]] = None,
            **kwargs: Any,
    ) -> None:
        """LLM结束时回调"""

        print('开始整理花卉建议...')

        # 模拟异步操作
        await asyncio.sleep(0.5)

        print('花卉建议整理完成。祝您生活愉快！')


async def run():
    """异步执行主函数"""

    # 加载环境变量
    dotenv.load_dotenv()

    # 创建LLM
    # 通过callbacks设置回调策略
    llm = ChatOpenAI(model_name='gpt-3.5-turbo',
                     max_tokens=100,
                     streaming=True,  # 以stream方式生成
                     openai_api_base=os.getenv('OPENAI_API_BASE'),
                     callbacks=[FlowerShopSyncCallbackHandler(), FlowerShopAsyncCallbackHandler()])

    # 以stream方式,异步执行chain
    # 通过config设置同步和异步回调
    await llm.agenerate([[HumanMessage(content='哪种花卉最适合生日？只简单说3种，不超过50字')]])


if __name__ == '__main__':
    # 执行异步函数
    asyncio.run(run())
