"""实时语音合成模块 - 通义千问 TTS Realtime"""
import os
import base64
import threading
import asyncio
from typing import Optional, Callable
import dashscope
from dashscope.audio.qwen_tts_realtime import (
    QwenTtsRealtime,
    QwenTtsRealtimeCallback,
    AudioFormat
)


class StreamingTTSCallback(QwenTtsRealtimeCallback):
    """流式TTS回调处理器"""

    def __init__(self, audio_callback: Optional[Callable] = None):
        """
        初始化回调处理器

        Args:
            audio_callback: 音频数据回调函数,接收base64编码的音频数据
        """
        self.audio_callback = audio_callback
        self.complete_event = threading.Event()
        self.session_id = None
        self.error = None

    def on_open(self) -> None:
        """连接建立时的回调"""
        print('[TTS] WebSocket连接已建立')

    def on_close(self, close_status_code, close_msg) -> None:
        """连接关闭时的回调"""
        print(f'[TTS] WebSocket连接已关闭: code={close_status_code}, msg={close_msg}')
        self.complete_event.set()

    def on_event(self, response: dict) -> None:
        """处理服务端事件"""
        try:
            event_type = response.get('type')

            if event_type == 'session.created':
                self.session_id = response['session']['id']
                print(f'[TTS] 会话已创建: {self.session_id}')

            elif event_type == 'session.updated':
                print('[TTS] 会话配置已更新')

            elif event_type == 'response.audio.delta':
                # 接收到音频数据增量
                audio_b64 = response.get('delta')
                if audio_b64 and self.audio_callback:
                    # 调用回调函数传递音频数据
                    self.audio_callback(audio_b64)

            elif event_type == 'response.done':
                print('[TTS] 响应完成')

            elif event_type == 'session.finished':
                print('[TTS] 会话结束')
                self.complete_event.set()

            elif event_type == 'error':
                error_msg = response.get('error', {}).get('message', 'Unknown error')
                print(f'[TTS] 错误: {error_msg}')
                self.error = error_msg
                self.complete_event.set()

        except Exception as e:
            print(f'[TTS] 处理事件时出错: {e}')
            self.error = str(e)
            self.complete_event.set()

    def wait_for_finished(self, timeout: Optional[float] = None) -> bool:
        """
        等待会话完成

        Args:
            timeout: 超时时间(秒),None表示无限等待

        Returns:
            是否成功完成(False表示超时或出错)
        """
        return self.complete_event.wait(timeout=timeout)


class RealtimeTTS:
    """实时语音合成客户端"""

    def __init__(
            self,
            api_key: Optional[str] = None,
            model: str = 'qwen3-tts-flash-realtime',
            voice: str = 'Cherry',
            language_type: str = 'Chinese',
            sample_rate: int = 24000,
            format: str = 'pcm',
            region: str = 'beijing'  # 'beijing' 或 'singapore'
    ):
        """
        初始化实时TTS客户端

        Args:
            api_key: DashScope API Key,如果为None则从环境变量获取
            model: 模型名称
            voice: 音色名称
            language_type: 语言类型
            sample_rate: 采样率
            format: 音频格式
            region: 服务区域
        """
        # 设置API Key
        if api_key:
            dashscope.api_key = api_key
        elif 'DASHSCOPE_API_KEY' in os.environ:
            dashscope.api_key = os.environ['DASHSCOPE_API_KEY']
        else:
            raise ValueError('未设置DASHSCOPE_API_KEY')

        # 设置服务URL
        if region == 'singapore':
            self.url = 'wss://dashscope-intl.aliyuncs.com/api-ws/v1/realtime'
        else:
            self.url = 'wss://dashscope.aliyuncs.com/api-ws/v1/realtime'

        self.model = model
        self.voice = voice
        self.language_type = language_type
        self.sample_rate = sample_rate
        self.format = format

        self.client: Optional[QwenTtsRealtime] = None
        self.callback: Optional[StreamingTTSCallback] = None

    def connect(self, audio_callback: Optional[Callable] = None) -> None:
        """
        建立WebSocket连接

        Args:
            audio_callback: 音频数据回调函数
        """
        self.callback = StreamingTTSCallback(audio_callback=audio_callback)
        self.client = QwenTtsRealtime(
            model=self.model,
            callback=self.callback,
            url=self.url
        )

        # 建立连接
        self.client.connect()

        # 配置会话参数
        response_format = self._get_audio_format()
        self.client.update_session(
            voice=self.voice,
            language_type=self.language_type,
            response_format=response_format,
            mode='server_commit'  # 使用server_commit模式,服务端自动断句
        )

    def _get_audio_format(self) -> AudioFormat:
        """根据采样率和格式获取AudioFormat枚举"""
        # 目前只支持 24000Hz PCM 格式
        if self.sample_rate != 24000:
            print(f'[TTS] 警告: 不支持的采样率 {self.sample_rate}Hz,使用默认 24000Hz')
        return AudioFormat.PCM_24000HZ_MONO_16BIT

    def synthesize_text(self, text: str) -> None:
        """
        合成文本为语音

        Args:
            text: 要合成的文本
        """
        if not self.client:
            raise RuntimeError('未建立连接,请先调用connect()')

        # 追加文本到缓冲区
        self.client.append_text(text)

    def finish(self) -> None:
        """结束合成,通知服务端不再有文本输入"""
        if self.client:
            self.client.finish()

    def close(self) -> None:
        """关闭连接"""
        if self.client:
            self.client.close()

    def wait_for_completion(self, timeout: Optional[float] = None) -> bool:
        """
        等待合成完成

        Args:
            timeout: 超时时间(秒)

        Returns:
            是否成功完成
        """
        if self.callback:
            return self.callback.wait_for_finished(timeout=timeout)
        return False

    def get_session_id(self) -> Optional[str]:
        """获取当前会话ID"""
        if self.callback:
            return self.callback.session_id
        return None

    def get_error(self) -> Optional[str]:
        """获取错误信息"""
        if self.callback:
            return self.callback.error
        return None


async def synthesize_stream_async(
        text_stream,
        audio_callback: Callable,
        api_key: Optional[str] = None,
        voice: str = 'Cherry',
        language_type: str = 'Chinese',
        sample_rate: int = 24000
) -> None:
    """
    异步流式合成文本为语音

    Args:
        text_stream: 文本流(异步生成器)
        audio_callback: 音频数据回调函数
        api_key: API Key
        voice: 音色
        language_type: 语言类型
        sample_rate: 采样率
    """
    tts = RealtimeTTS(
        api_key=api_key,
        voice=voice,
        language_type=language_type,
        sample_rate=sample_rate
    )

    try:
        # 建立连接
        tts.connect(audio_callback=audio_callback)

        # 流式发送文本
        async for text_chunk in text_stream:
            if text_chunk and text_chunk.strip():
                tts.synthesize_text(text_chunk)
                # 添加小延迟,避免发送过快
                await asyncio.sleep(0.05)

        # 通知结束
        tts.finish()

        # 等待完成
        await asyncio.get_event_loop().run_in_executor(
            None,
            tts.wait_for_completion,
            30.0  # 30秒超时
        )

        # 检查错误
        error = tts.get_error()
        if error:
            print(f'[TTS] 合成出错: {error}')

    finally:
        tts.close()


def synthesize_text_to_file(
        text: str,
        output_file: str,
        api_key: Optional[str] = None,
        voice: str = 'Cherry',
        language_type: str = 'Chinese',
        sample_rate: int = 24000
) -> bool:
    """
    将文本合成为音频文件

    Args:
        text: 要合成的文本
        output_file: 输出文件路径
        api_key: API Key
        voice: 音色
        language_type: 语言类型
        sample_rate: 采样率

    Returns:
        是否成功
    """
    audio_data = []

    def audio_callback(audio_b64: str):
        """收集音频数据"""
        audio_bytes = base64.b64decode(audio_b64)
        audio_data.append(audio_bytes)

    tts = RealtimeTTS(
        api_key=api_key,
        voice=voice,
        language_type=language_type,
        sample_rate=sample_rate
    )

    try:
        # 建立连接
        tts.connect(audio_callback=audio_callback)

        # 发送文本
        tts.synthesize_text(text)

        # 通知结束
        tts.finish()

        # 等待完成
        success = tts.wait_for_completion(timeout=30.0)

        if not success:
            print('[TTS] 合成超时或失败')
            return False

        # 检查错误
        error = tts.get_error()
        if error:
            print(f'[TTS] 合成出错: {error}')
            return False

        # 写入文件
        with open(output_file, 'wb') as f:
            for chunk in audio_data:
                f.write(chunk)

        print(f'[TTS] 音频已保存到: {output_file}')
        return True

    finally:
        tts.close()


# 示例用法
if __name__ == '__main__':
    # 测试文本转语音
    test_text = '你好,我是通义千问的实时语音合成服务。这是一段测试音频。'

    success = synthesize_text_to_file(
        text=test_text,
        output_file='test_output.pcm',
        voice='Cherry',
        language_type='Chinese',
        sample_rate=24000
    )

    if success:
        print('✅ 测试成功!')
    else:
        print('❌ 测试失败!')
