import threading
import time

import edge_tts
import pygame
from io import BytesIO
from autogen_agentchat.agents import AssistantAgent
from autogen_agentchat.base import Handoff as HandoffBase,Response
from autogen_agentchat.messages import StructuredMessageFactory, BaseChatMessage, TextMessage, BaseAgentEvent
from autogen_core import CancellationToken, Component, ComponentModel, FunctionCall
from autogen_core.memory import Memory
from autogen_core.model_context import ChatCompletionContext, UnboundedChatCompletionContext
from autogen_core.models import ChatCompletionClient, FunctionExecutionResult, SystemMessage, ModelFamily
from autogen_core.tools import BaseTool, Workbench, FunctionTool, StaticWorkbench
from pydantic import BaseModel
import asyncio
import json
import logging
import warnings
from typing import (
    Any,
    AsyncGenerator,
    Awaitable,
    Callable,
    Dict,
    List,
    Mapping,
    Optional,
    Sequence,
    Tuple,
    Union,
)

class BaseAssistantAgentConfig(BaseModel):
    """声明一个基础的助手代理配置类。"""

    name: str
    model_client: ComponentModel
    tools: List[ComponentModel] | None = None
    workbench: List[ComponentModel] | None = None
    handoffs: List[HandoffBase | str] | None = None
    model_context: ComponentModel | None = None
    memory: List[ComponentModel] | None = None
    description: str
    system_message: str | None = None
    model_client_stream: bool = False
    reflect_on_tool_use: bool
    tool_call_summary_format: str
    metadata: Dict[str, str] | None = None
    structured_message_factory: ComponentModel | None = None


class BaseAssistantAgent(AssistantAgent, Component[BaseAssistantAgentConfig]):
    component_version = 2
    component_config_schema = BaseAssistantAgentConfig
    component_provider_override = "autogen_agentchat.agents.AssistantAgent"

    def __init__(
            self,
            name: str,
            model_client: ChatCompletionClient,
            *,
            tools: List[BaseTool[Any, Any] | Callable[..., Any] | Callable[..., Awaitable[Any]]] | None = None,
            workbench: Workbench | Sequence[Workbench] | None = None,
            handoffs: List[HandoffBase | str] | None = None,
            model_context: ChatCompletionContext | None = None,
            description: str = "一个智能助手，可以使用tools的搜集各种信息。",
            system_message: (
                    str | None
            ) = "你是一个智能助手，善于使用工具处理用户需求.",
            model_client_stream: bool = False,
            reflect_on_tool_use: bool | None = None,
            tool_call_summary_format: str = "{result}",
            tool_call_summary_formatter: Callable[[FunctionCall, FunctionExecutionResult], str] | None = None,
            output_content_type: type[BaseModel] | None = None,
            output_content_type_format: str | None = None,
            memory: Sequence[Memory] | None = None,
            metadata: Dict[str, str] | None = None,
    ):
        super().__init__(name=name, description=description,model_client=model_client,tools=tools,workbench=workbench,handoffs=handoffs,model_context=model_context,system_message=system_message,model_client_stream=model_client_stream,reflect_on_tool_use=reflect_on_tool_use,tool_call_summary_format=tool_call_summary_format,tool_call_summary_formatter=tool_call_summary_formatter,output_content_type=output_content_type,output_content_type_format=output_content_type_format,memory=memory,metadata=metadata)
        self.speech_task = None
        self.voice = "zh-CN-XiaoxiaoNeural"

    # def speech(self, message: str, voice:str = "zh-CN-XiaoxiaoNeural"):
    #     """
    #        使用edgetts库将消息转换为语音并播放。
    #        使用pygame播放音频，在新线程中执行，避免阻塞主线程。
    #     """
    #
    #     def _speech_task():
    #         # 初始化pygame mixer
    #         pygame.mixer.init(frequency=24000, channels=1)
    #         audio_buffer = BytesIO()
    #         try:
    #             # 定义异步任务
    #             async def _async_speech():
    #                 # 创建edge-tts流
    #                 communicate = edge_tts.Communicate(message,voice, rate='+25%')
    #                 # 异步收集音频数据
    #                 async for chunk in communicate.stream():
    #                     if chunk["type"] == "audio":
    #                         audio_buffer.write(chunk["data"])
    #                 return audio_buffer
    #
    #             # 在新线程中运行异步任务
    #             audio_buffer = asyncio.run(_async_speech())
    #             audio_buffer.seek(0)
    #
    #             # 加载并播放音频
    #             pygame.mixer.music.load(audio_buffer)
    #             pygame.mixer.music.play()
    #
    #             # 等待播放完成
    #             while pygame.mixer.music.get_busy():
    #                 time.sleep(0.1)
    #
    #         except Exception as e:
    #             print(f"语音播放出错: {e}")
    #         finally:
    #             # 清理资源
    #             pygame.mixer.music.stop()
    #             pygame.mixer.quit()
    #             if 'audio_buffer' in locals():
    #                 audio_buffer.close()
    #             self.speech_thread = None
    #
    #     # 创建并启动新线程
    #     thread = threading.Thread(target=_speech_task)
    #     thread.daemon = True  # 设置为守护线程，主线程退出时自动结束
    #     thread.start()
    #     self.speech_thread = thread
    #     return thread  # 返回线程对象，可用于后续控制

    async def async_speech(self, message: str, voice:str = "zh-CN-XiaoxiaoNeural"):
        """
           使用edgetts库将消息转换为语音并播放。
           使用pygame播放音频，在新线程中执行，避免阻塞主线程。
        """
        # 初始化pygame mixer
        pygame.mixer.init(frequency=24000, channels=1)
        audio_buffer = BytesIO()
        try:
            # 定义异步任务
            # 创建edge-tts流
            communicate = edge_tts.Communicate(message, voice, rate='+25%')
            # 异步收集音频数据
            async for chunk in communicate.stream():
                if chunk["type"] == "audio":
                    audio_buffer.write(chunk["data"])
            # 加载并播放音频
            audio_buffer.seek(0)
            pygame.mixer.music.load(audio_buffer)
            pygame.mixer.music.play()
            # 等待播放完成
            while pygame.mixer.music.get_busy():
                await asyncio.sleep(0.1)

        except Exception as e:
            print(f"语音播放出错: {e}")
        finally:
            # 清理资源
            pygame.mixer.music.stop()
            pygame.mixer.quit()
            audio_buffer.close()
            self.speech_task = None

    async def on_messages(self, messages: Sequence[BaseChatMessage], cancellation_token: CancellationToken) -> Response:
        print("---",messages)
        async for message in self.on_messages_stream(messages, cancellation_token):
            chat_message = message.chat_message
            if isinstance(chat_message, TextMessage):
               await self.async_speech(chat_message.content, self.voice)
               # 上一次播放未完成，立即取消
               # if self.speech_task is not None:
               #     self.speech_task.cancel()
               # self.speech_task = asyncio.create_task(self.async_speech(chat_message.content, self.voice))
            if isinstance(message, Response):
                print("is response",message)
                return message
        raise AssertionError("The stream should have returned the final result.")



# 测试代码
async def test_base_assistant_agent():
    from autogen_ext.models.openai import OpenAIChatCompletionClient
    model_client = OpenAIChatCompletionClient(model="modelscope.cn/Qwen/Qwen2.5-7B-Instruct-GGUF:q5_k_m",
                                              model_info={
                                                  "vision": False,
                                                  "function_calling": True,
                                                  "family": "Qwen3",
                                                  "structured_output": True,
                                                  "json_output": True,
                                                  "multiple_system_messages": True,
                                              },
                                              api_key="ollama",
                                              base_url="http://127.0.0.1:11434/v1")
    agent = BaseAssistantAgent(
        name="assistant",
        model_client=model_client,
        system_message="You are a helpful AI assistant.",
    )
    result = await agent.run(task="今天天气如何？")
    print(result.messages[-1].content)

if __name__ == "__main__":
    import asyncio
    asyncio.run(test_base_assistant_agent())