import io
import os
import wave

import dashscope
import pyaudio
from dashscope.audio.tts_v2 import *

import config
from common.comm_utils import get_file_name

# 将your-dashscope-api-key替换成您自己的API-KEY
dashscope.api_key = config.OPENAI_API_KEY
model = "cosyvoice-v1"
# model='sambert-zhichu-v1'
default_voice_id = "longxiaochun"
# url = "lucky_web/public/ai_voices/speech.wav"


class Callback(ResultCallback):
    def __init__(self, param_format:int = pyaudio.paInt16, channels:int = 1, rate:int = 22050, output_filename:str = "speech.wav"):
        self._player = None
        self._stream = None
        self._wave_file = None
        # 音频参数
        self.format = param_format
        self.channels = channels  # 单声道
        self.rate = rate  # 采样率
        self.output_filename = output_filename


    def on_open(self):
        print("websocket is open.")
        """初始化音频播放器和文件保存"""
        self._player = pyaudio.PyAudio()

        # 创建音频流
        self._stream = self._player.open(
            format=self.format,
            channels=self.channels,
            rate=self.rate,
            output=True  # True表示输出流（播放）
        )

        # 如果指定了输出文件名，创建wave文件
        if self.output_filename:
            # 确保目录存在
            save_dir = "lucky_web/public/ai_voices"
            os.makedirs(save_dir, exist_ok=True)

            # 确保文件名有.wav后缀
            if not self.output_filename.endswith('.wav'):
                self.output_filename += '.wav'

            # 创建完整的文件路径
            file_path = os.path.join(save_dir, self.output_filename)

            # 创建wave文件
            self._wave_file = wave.open(file_path, 'wb')
            self._wave_file.setnchannels(self.channels)
            self._wave_file.setsampwidth(self._player.get_sample_size(self.format))
            self._wave_file.setframerate(self.rate)

    def on_complete(self):
        print("speech synthesis task complete successfully.")

    def on_error(self, message: str):
        print(f"speech synthesis task failed, {message}")

    def on_close(self):
        print("websocket is closed.")
        """关闭所有资源"""
        if self._stream:
            self._stream.stop_stream()
            self._stream.close()

        if self._player:
            self._player.terminate()

        if self._wave_file:
            self._wave_file.close()

    def on_event(self, message):
        print(f"recv speech synthsis message {message}")

    def on_data(self, audio_data: bytes) -> None:
        print("开始接收到音频数据:", audio_data)

        """播放并保存音频数据"""
        try:
            # 播放音频
            self._stream.write(audio_data)

            # 如果已打开wave文件，保存数据
            if self._wave_file:
                self._wave_file.writeframes(audio_data)
                print("接收到音频数据:", audio_data)
        except Exception as e:
            print(f"处理音频数据时出错: {e}")

def text2speech(text: str) -> bytes:
    audio_chunks = []

    # 使用统一的文件名生成函数
    file_name = get_file_name(text)
    if not file_name.endswith('.wav'):
        file_name += '.wav'

    class StreamingCallback(Callback):
        def __init__(self):
            super().__init__(output_filename=file_name)  # 传入生成的文件名
        
        def on_data(self, audio_data: bytes) -> None:
            print("收到音频数据，长度:", len(audio_data))
            audio_chunks.append(audio_data)

    callback = StreamingCallback()
    
    try:
        synthesizer = SpeechSynthesizer(
            model=model,
            voice=default_voice_id,
            format=AudioFormat.PCM_22050HZ_MONO_16BIT,
            callback=callback,
        )
        
        synthesizer.streaming_call(text)
        synthesizer.streaming_complete()

        if not audio_chunks:
            raise Exception("未生成音频数据")
        
        # 将PCM数据转换为WAV格式
        wav_buffer = io.BytesIO()
        with wave.open(wav_buffer, 'wb') as wav_file:
            wav_file.setnchannels(1)  # 单声道
            wav_file.setsampwidth(2)  # 16位采样
            wav_file.setframerate(22050)  # 采样率
            wav_file.writeframes(b''.join(audio_chunks))
        
        return wav_buffer.getvalue()
        
    except Exception as e:
        print(f"语音合成错误: {str(e)}")
        raise


def get_voice_id(url:str):
    # 创建语音注册服务实例
    service = VoiceEnrollmentService()
    prefix = 'prefix'
    # 调用create_voice方法复刻声音，并生成voice_id
    voice_id = service.create_voice(target_model=model, prefix=prefix, url=url)
    print("requestId: ", service.get_last_request_id())
    print(f"your voice id is {voice_id}")
    return voice_id