from datetime import datetime
from pathlib import Path
from openai import OpenAI
import io
import os
import yaml
import re
import requests
from pydub import AudioSegment

# AudioSegment.converter = r"D:\ffmpeg-7.1.1-essentials_build\bin\ffmpeg.exe"
# AudioSegment.ffprobe = r"D:\ffmpeg-7.1.1-essentials_build\bin\ffprobe.exe"
import tempfile
from config.config import Config
import logging
import emoji


class TTSClient:
    def __init__(self):
        """
        初始化硅基流动TTS服务
        :param api_key: 硅基流动API密钥（从 https://cloud.siliconflow.cn/account/ak 获取）
        :param base_url: API基础地址
        """
        self.api_key = Config.tts["OPENAI_API_KEY"]
        self.base_url = Config.tts["BASE_URL"]
        self.pause_duration = Config.tts["pause_duration"]
        self.voice_source_dir = Config.tts["voice_source_dir"]
        self.voice_source_buffer = {}

        if not self.api_key:
            raise ValueError(
                "Missing API key. Set SILICONFLOW_API_KEY environment variable or pass api_key parameter"
            )

        self.client = OpenAI(api_key=self.api_key, base_url=self.base_url)

    def generate_speech_helper(
        self,
        text,
        voice="FunAudioLLM/CosyVoice2-0.5B:anna",
        model="FunAudioLLM/CosyVoice2-0.5B",
        response_format="mp3",
        emotion="",
        save_path=None,
    ):
        """
        生成语音并返回字节流
        :param text: 要转换的文本
        :param voice: 声音预设（默认为'alex'）
        :param model: 使用的模型
        :param response_format: 音频格式（mp3, wav, pcm, opus）
        :param emotion: 情感提示（可选）
        :param save_path: 保存路径（可选）
        :return: 音频字节流
        """
        # 添加情感提示（如果提供）
        if emotion:
            full_text = f"你能用{emotion}的情感说吗？<|endofprompt|>{text}"
        else:
            full_text = text
        print(text)
        # 创建流式响应
        with self.client.audio.speech.with_streaming_response.create(
            model=model, voice=voice, input=full_text, response_format=response_format
        ) as response:
            # 创建内存字节流
            audio_stream = io.BytesIO()
            for chunk in response.iter_bytes():
                audio_stream.write(chunk)

            audio_stream.seek(0)

            # if save_path:
            #     self.save_audio(audio_stream, save_path)

            # return audio_stream

            # 将完整数据写入临时文件
            with tempfile.NamedTemporaryFile(
                suffix=f".{response_format}", delete=False
            ) as temp_audio:
                temp_audio.write(audio_stream.getvalue())
                temp_file_path = temp_audio.name
            temp_audio.close()  # 手动关闭
            # 转换为AudioSegment对象
            import os

            # print(
            #     "临时音频文件路径：",
            #     temp_file_path,
            #     "存在吗？",
            #     os.path.exists(temp_file_path),
            # )
            if response_format == "mp3":
                return AudioSegment.from_mp3(temp_file_path)
                # return AudioSegment.from_file(temp_file_path, format=response_format)
            elif response_format == "wav":
                return AudioSegment.from_wav(temp_file_path)
            else:
                # 对于其他格式，尝试使用通用方法
                return AudioSegment.from_file(temp_file_path, format=response_format)

    def process_and_split_text(self, text):
        # 1. 删除表情符号（emoji）
        text = emoji.replace_emoji(text, replace="")

        # 2. 删除各种括号及括号中的内容
        # 中文括号：（）、【】、《》；英文括号：() [] <>
        text = re.sub(r"[\(\[（【<《].*?[\)\]）】>》]", "", text)

        # 3. 按标点符号切分，保留标点在句尾
        # 支持中文标点：。！？；，英文标点：.!?;,
        sentence_endings = r"([。！？!?；;.,，：:])"
        parts = re.split(sentence_endings, text)

        # 合并每个句子和它的标点
        sentences = []
        for i in range(0, len(parts) - 1, 2):
            sentence = parts[i].strip()
            punct = parts[i + 1]
            if sentence:
                sentences.append(sentence + punct)

        # 处理末尾未加标点的内容
        if len(parts) % 2 == 1:
            last = parts[-1].strip()
            if last:
                sentences.append(last)

        # 过滤空字符串并去重
        sentences = [s.strip() for s in sentences if s.strip()]
        
        # 如果分割后没有句子，返回原文本
        if not sentences:
            return [text.strip()] if text.strip() else []
            
        return sentences

    def generate_speech(
        self,
        text,
        voice_source,
        model="FunAudioLLM/CosyVoice2-0.5B",
        response_format="mp3",
        emotion="",
        save_path=None,
    ):
        processed_text = self.process_and_split_text(text)
        voice = None
        if voice_source in Config.tts["voice_source"]:
            voice = model + f":{voice_source}"
        else:

            if voice_source not in self.voice_source_buffer.keys():

                voice_source_path = os.path.join(self.voice_source_dir, voice_source)
                voice_file = os.path.join(voice_source_path, "voice.wav")
                word_file = os.path.join(voice_source_path, "word.txt")
                url = "https://api.siliconflow.cn/v1/uploads/audio/voice"
                headers = {"Authorization": f"Bearer {self.api_key}"}
                files = {"file": open(voice_file, "rb")}  # 参考音频文件

                # 读取文字内容
                with open(word_file, "r", encoding="utf-8") as f:
                    text_content = f.read().strip()
                data = {
                    "model": "FunAudioLLM/CosyVoice2-0.5B",  # 模型名称
                    "customName": voice_source,  # 参考音频名称
                    "text": text_content,  # 参考音频的文字内容
                }

                response = requests.post(url, headers=headers, files=files, data=data)
                if response.status_code != 200:
                    logging.error(
                        f"上传参考音频失败: {response.status_code} - {response.text}"
                    )
                voice = self.voice_source_buffer[voice_source] = response.json()["uri"]

        audio_segments = []
        for segment in processed_text:
            if segment:  # 跳过空片段
                print(segment)
                audio_seg = self.generate_speech_helper(
                    segment,
                    voice,
                    model,
                    response_format,
                    emotion=emotion,
                    save_path=save_path,
                )
                audio_segments.append(audio_seg)

        if not audio_segments:
            raise ValueError("没有可用的音频片段生成")

        # 3. 拼接音频
        combined_audio = AudioSegment.empty()
        for i, seg in enumerate(audio_segments):
            combined_audio += seg
            # 在片段间添加短暂停顿（除了最后一个片段）
            if i < len(audio_segments) - 1 and self.pause_duration > 0:
                combined_audio += AudioSegment.silent(duration=self.pause_duration)

        # 4. 转换为字节流
        output_bytes = io.BytesIO()
        if response_format == "mp3":
            combined_audio.export(output_bytes, format="mp3")
        elif response_format == "wav":
            combined_audio.export(output_bytes, format="wav")
        else:
            combined_audio.export(output_bytes, format=response_format)

        output_bytes.seek(0)

        # 5. 保存结果（如果需要）
        if save_path:
            Path(save_path).parent.mkdir(parents=True, exist_ok=True)
            with open(save_path, "wb") as f:
                f.write(output_bytes.getvalue())
            output_bytes.seek(0)  # 重置指针位置

        return combined_audio, output_bytes

    def save_audio(self, audio_stream, save_path):
        """
        将音频流保存到文件
        :param audio_stream: 音频字节流
        :param save_path: 保存路径
        """
        # 确保目录存在
        Path(save_path).parent.mkdir(parents=True, exist_ok=True)

        # 写入文件
        with open(save_path, "wb") as f:
            f.write(audio_stream.getbuffer())

        # 重置流位置
        audio_stream.seek(0)
##################################################################
##################################################################
# 连冬杰改，添加了流式生成语音的功能
##################################################################
##################################################################
    
    def generate_speech_stream(
        self,
        text,
        voice_source,
        model="FunAudioLLM/CosyVoice2-0.5B",
        response_format="mp3",
        emotion="",
        chunk_size=4096  # 新增: 控制每次返回的数据块大小
    ):
        """
        流式生成语音
        :param text: 要合成的文本
        :param voice_source: 音色源
        :param model: 使用的模型
        :param response_format: 音频格式（mp3, wav, pcm, opus）
        :param emotion: 情感提示（可选）
        :param chunk_size: 每次返回的音频块大小（字节）
        :return: 生成器，每次产生一个音频块
        """
        # 处理文本为句子列表
        sentences = self.process_and_split_text(text)
        if not sentences:
            yield b''  # 返回空数据
            return
        
        # 获取或创建语音资源
        voice = self._get_voice_resource(voice_source, model)
        
        # 创建内存缓冲区
        audio_buffer = io.BytesIO()
        
        # 生成每个句子的语音流
        for i, sentence in enumerate(sentences):
            # 跳过空句子
            if not sentence.strip():
                continue
                
            # 生成当前句子的语音
            sentence_audio = self._generate_sentence_stream(
                sentence, 
                voice, 
                model, 
                response_format, 
                emotion
            )
            
            # 写入缓冲区
            for data in sentence_audio:
                audio_buffer.write(data)
            
            # 在句子间添加静音
            if i < len(sentences) - 1 and self.pause_duration > 0:
                silence = AudioSegment.silent(duration=self.pause_duration)
                silence_buffer = io.BytesIO()
                silence.export(silence_buffer, format=response_format)
                audio_buffer.write(silence_buffer.getvalue())
        
        # 重置缓冲区位置准备读取
        audio_buffer.seek(0)
        total_size = audio_buffer.getbuffer().nbytes
        
        # 流式返回数据
        while True:
            chunk = audio_buffer.read(chunk_size)
            if not chunk:
                break
            yield chunk
    
    def _get_voice_resource(self, voice_source, model):
        """获取或创建语音资源"""
        if voice_source in Config.tts["voice_source"]:
            return model + f":{voice_source}"
            
        if voice_source not in self.voice_source_buffer:
            voice_file = os.path.join(self.voice_source_dir, voice_source, "voice.wav")
            word_file = os.path.join(self.voice_source_dir, voice_source, "word.txt")
            
            if not os.path.exists(voice_file) or not os.path.exists(word_file):
                raise ValueError(f"Voice source {voice_source} not found")
            
            # 上传参考音频
            url = "https://api.siliconflow.cn/v1/uploads/audio/voice"
            headers = {"Authorization": f"Bearer {self.api_key}"}
            
            with open(voice_file, "rb") as f:
                files = {"file": f}
                
                with open(word_file, "r", encoding="utf-8") as txt:
                    text_content = txt.read().strip()
                    
                data = {
                    "model": model,
                    "customName": voice_source,
                    "text": text_content,
                }
                
                response = requests.post(url, headers=headers, files=files, data=data)
                if response.status_code != 200:
                    raise RuntimeError(f"Failed to upload voice: {response.text}")
                
                self.voice_source_buffer[voice_source] = response.json()["uri"]
                
        return self.voice_source_buffer[voice_source]
    
    def _generate_sentence_stream(
        self, 
        sentence, 
        voice, 
        model, 
        response_format, 
        emotion
    ):
        """生成单个句子的语音流"""
        # 添加情感提示（如果提供）
        if emotion:
            full_text = f"你能用{emotion}的情感说吗？<|endofprompt|>{sentence}"
        else:
            full_text = sentence
        
        # 创建流式响应
        with self.client.audio.speech.with_streaming_response.create(
            model=model, 
            voice=voice, 
            input=full_text, 
            response_format=response_format
        ) as response:
            for chunk in response.iter_bytes():
                yield chunk

if __name__ == "__main__":
    tts_model = TTSClient()
    # tts_model.generate_speech_helper("生成语音并返回字节流.", save_path="./data/test.mp3")
    tts_model.generate_speech(
        "我今天考试没及格，所以我现在很不舒服",
        voice_source="jianli",
        save_path="./data/test.mp3",
    )
    # tts_model.generate_speech("生成语音并返回字节流.", voice_source="jianli", save_path="./data/test.mp3")
