from datetime import datetime
from pathlib import Path
from openai import OpenAI
import io
import os
import yaml
import re
import requests
from pydub import AudioSegment
import tempfile
from config.config import Config
import logging
import emoji

# class Config:
#     # 数据库配置
#     SQLALCHEMY_DATABASE_URI = os.getenv(
#         "DATABASE_URL", "postgresql://user:password@localhost/mindcare"
#     )
#     SQLALCHEMY_TRACK_MODIFICATIONS = False

#     # JWT配置
#     JWT_SECRET_KEY = os.getenv("JWT_SECRET_KEY", "your_jwt_secret_key")
#     JWT_ACCESS_TOKEN_EXPIRES = 900  # 15分钟
#     JWT_REFRESH_TOKEN_EXPIRES = 604800  # 7天

#     # Redis配置
#     REDIS_URL = os.getenv("REDIS_URL", "redis://localhost:6379/0")

#     # 短信服务配置（以阿里云为例）
#     SMS_ACCESS_KEY_ID = os.getenv("SMS_ACCESS_KEY_ID")
#     SMS_ACCESS_KEY_SECRET = os.getenv("SMS_ACCESS_KEY_SECRET")
#     SMS_SIGN_NAME = "MindCare"
#     SMS_TEMPLATE_CODE = "SMS_123456789"

#     """
#     简单的配置类，将YAML文件内容直接映射为类属性。
#     只处理第一层级，更深的层级将以字典形式存储。
#     """

#     config_path = None

#     @classmethod
#     def load_config(cls, file_path):
#         # if cls.config_path is not None:
#             # return
#         cls.config_path = file_path
#         with open(file_path, 'r', encoding='utf-8') as f:
#             config_dict = yaml.safe_load(f)

#         # 设置类属性
#         for key, value in config_dict.items():
#             setattr(cls, key, value)


# Config.load_config('./config/config.yaml')


class TTSClient:
    def __init__(self):
        """
        初始化硅基流动TTS服务
        :param api_key: 硅基流动API密钥（从 https://cloud.siliconflow.cn/account/ak 获取）
        :param base_url: API基础地址
        """
        self.api_key = Config.tts["OPENAI_API_KEY"]
        self.base_url = Config.tts["BASE_URL"]
        self.pause_duration = Config.tts["pause_duration"]
        self.voice_source_dir = Config.tts["voice_source_dir"]
        self.voice_source_buffer = {}

        if not self.api_key:
            raise ValueError(
                "Missing API key. Set SILICONFLOW_API_KEY environment variable or pass api_key parameter"
            )

        self.client = OpenAI(api_key=self.api_key, base_url=self.base_url)

    def generate_speech_helper(
        self,
        text,
        voice="FunAudioLLM/CosyVoice2-0.5B:anna",
        model="FunAudioLLM/CosyVoice2-0.5B",
        response_format="mp3",
        emotion="",
        save_path=None,
    ):
        """
        生成语音并返回字节流
        :param text: 要转换的文本
        :param voice: 声音预设（默认为'alex'）
        :param model: 使用的模型
        :param response_format: 音频格式（mp3, wav, pcm, opus）
        :param emotion: 情感提示（可选）
        :param save_path: 保存路径（可选）
        :return: 音频字节流
        """
        # 添加情感提示（如果提供）
        if emotion:
            full_text = f"你能用{emotion}的情感说吗？<|endofprompt|>{text}"
        else:
            full_text = text

        # 创建流式响应
        with self.client.audio.speech.with_streaming_response.create(
            model=model, voice=voice, input=full_text, response_format=response_format
        ) as response:
            # 创建内存字节流
            audio_stream = io.BytesIO()
            for chunk in response.iter_bytes():
                audio_stream.write(chunk)

            audio_stream.seek(0)

            # if save_path:
            #     self.save_audio(audio_stream, save_path)

            # return audio_stream

            # 将完整数据写入临时文件
            with tempfile.NamedTemporaryFile(
                suffix=f".{response_format}", delete=False
            ) as temp_audio:
                temp_audio.write(audio_stream.getvalue())
                temp_file_path = temp_audio.name

            # 转换为AudioSegment对象
            if response_format == "mp3":
                return AudioSegment.from_mp3(temp_file_path)
                # return AudioSegment.from_file(temp_file_path, format=response_format)
            elif response_format == "wav":
                return AudioSegment.from_wav(temp_file_path)
            else:
                # 对于其他格式，尝试使用通用方法
                return AudioSegment.from_file(temp_file_path, format=response_format)

    def process_and_split_text(self, text):
        # 1. 删除表情符号（emoji）

        text = emoji.replace_emoji(text, replace="")

        # 2. 删除各种括号及括号中的内容
        # 中文括号：（）、【】、《》；英文括号：() [] <>
        text = re.sub(r"[\(\[（【<《].*?[\)\]）】>》]", "", text)

        # 3. 按标点符号切分，保留标点在句尾
        # 支持中文标点：。！？；，英文标点：.!?;,
        sentence_endings = r"([。！？!?；;.,，：:])"
        parts = re.split(sentence_endings, text)

        # 合并每个句子和它的标点
        sentences = []
        for i in range(0, len(parts) - 1, 2):
            sentence = parts[i].strip()
            punct = parts[i + 1]
            if sentence:
                sentences.append(sentence + punct)

        # 处理末尾未加标点的内容
        if len(parts) % 2 == 1:
            last = parts[-1].strip()
            if last:
                sentences.append(last)

        return sentences

    def generate_speech(
        self,
        text,
        voice_source,
        model="FunAudioLLM/CosyVoice2-0.5B",
        response_format="mp3",
        emotion="",
        save_path=None,
    ):
        processed_text = self.process_and_split_text(text)
        voice = None
        if voice_source in Config.tts["voice_source"]:
            voice = model + f":{voice_source}"
        else:

            if voice_source not in self.voice_source_buffer.keys():

                voice_source_path = os.path.join(self.voice_source_dir, voice_source)
                voice_file = os.path.join(voice_source_path, "voice.wav")
                word_file = os.path.join(voice_source_path, "word.txt")
                url = "https://api.siliconflow.cn/v1/uploads/audio/voice"
                headers = {"Authorization": f"Bearer {self.api_key}"}
                files = {"file": open(voice_file, "rb")}  # 参考音频文件

                # 读取文字内容
                with open(word_file, "r", encoding="utf-8") as f:
                    text_content = f.read().strip()
                data = {
                    "model": "FunAudioLLM/CosyVoice2-0.5B",  # 模型名称
                    "customName": voice_source,  # 参考音频名称
                    "text": text_content,  # 参考音频的文字内容
                }

                response = requests.post(url, headers=headers, files=files, data=data)
                if response.status_code != 200:
                    logging.error(
                        f"上传参考音频失败: {response.status_code} - {response.text}"
                    )
                voice = self.voice_source_buffer[voice_source] = response.json()["uri"]

        audio_segments = []
        for segment in processed_text:
            if segment:  # 跳过空片段
                audio_seg = self.generate_speech_helper(
                    segment,
                    voice,
                    model,
                    response_format,
                    emotion=emotion,
                    save_path=save_path,
                )
                audio_segments.append(audio_seg)

        if not audio_segments:
            raise ValueError("没有可用的音频片段生成")

        # 3. 拼接音频
        combined_audio = AudioSegment.empty()
        for i, seg in enumerate(audio_segments):
            combined_audio += seg
            # 在片段间添加短暂停顿（除了最后一个片段）
            if i < len(audio_segments) - 1 and self.pause_duration > 0:
                combined_audio += AudioSegment.silent(duration=self.pause_duration)

        # 4. 转换为字节流
        output_bytes = io.BytesIO()
        if response_format == "mp3":
            combined_audio.export(output_bytes, format="mp3")
        elif response_format == "wav":
            combined_audio.export(output_bytes, format="wav")
        else:
            combined_audio.export(output_bytes, format=response_format)

        output_bytes.seek(0)

        # 5. 保存结果（如果需要）
        if save_path:
            Path(save_path).parent.mkdir(parents=True, exist_ok=True)
            with open(save_path, "wb") as f:
                f.write(output_bytes.getvalue())
            output_bytes.seek(0)  # 重置指针位置

        return combined_audio, output_bytes

    def save_audio(self, audio_stream, save_path):
        """
        将音频流保存到文件
        :param audio_stream: 音频字节流
        :param save_path: 保存路径
        """
        # 确保目录存在
        Path(save_path).parent.mkdir(parents=True, exist_ok=True)

        # 写入文件
        with open(save_path, "wb") as f:
            f.write(audio_stream.getbuffer())

        # 重置流位置
        audio_stream.seek(0)


if __name__ == "__main__":
    tts_model = TTSClient()
    # tts_model.generate_speech_helper("生成语音并返回字节流.", save_path="./data/test.mp3")
    tts_model.generate_speech(
        "生成语音并返回字节流.(sile)😊没关系天天在家",
        voice_source="jianli",
        save_path="./data/test.mp3",
    )
    # tts_model.generate_speech("生成语音并返回字节流.", voice_source="jianli", save_path="./data/test.mp3")
