import os
import sys
from pathlib import Path

now_dir = os.getcwd()
# print(now_dir)
sys.path.append(now_dir)
sys.path.append("%s/voice_v2pro1/GPT_SoVITS" % (now_dir))

current_dir = Path(__file__).parent.resolve()
# print(f"Current directory: {current_dir}")

# 添加项目根目录到系统路径
project_root = current_dir / "voice_v2pro1"
sys.path.append(str(project_root))

# 添加GPT_SoVITS目录到系统路径
gpt_sovits_path = project_root / "GPT_SoVITS"
sys.path.append(str(gpt_sovits_path))

# 获取当前文件(local_api.py)所在的目录的绝对路径
current_file_dir = Path(__file__).parent.resolve()

# 将这个目录添加到sys.path中，这样Python就能找到它下面的'tools'和'GPT_SoVITS'了
if str(current_file_dir) not in sys.path:
    sys.path.append(str(current_file_dir))
# --- 修改结束 ---
gpt_sovits_path = current_file_dir / "GPT_SoVITS"
if str(gpt_sovits_path) not in sys.path:
    sys.path.insert(3, str(gpt_sovits_path)) # 使用 insert(0, ...) 提高优先级


import argparse
import subprocess
import numpy as np
import soundfile as sf
from io import BytesIO
from .tools.i18n.i18n import I18nAuto
from .GPT_SoVITS.TTS_infer_pack.TTS import TTS, TTS_Config
from .GPT_SoVITS.TTS_infer_pack.text_segmentation_method import get_method_names as get_cut_method_names

i18n = I18nAuto()
cut_method_names = get_cut_method_names()

parser = argparse.ArgumentParser(description="GPT-SoVITS api")
parser.add_argument("-c", "--tts_config", type=str, default="model/voice_v2pro1/GPT_SoVITS/configs/tts_infer.yaml", help="tts_infer路径")
args = parser.parse_args()
config_path = args.tts_config
if config_path in [None, ""]:
    config_path = "GPT-SoVITS-v2pro/GPT-SoVITS/configs/tts_infer.yaml"

def pack_ogg(io_buffer: BytesIO, data: np.ndarray, rate: int):
    with sf.SoundFile(io_buffer, mode="w", samplerate=rate, channels=1, format="ogg") as audio_file:
        audio_file.write(data)
    return io_buffer


def pack_raw(io_buffer: BytesIO, data: np.ndarray, rate: int):
    io_buffer.write(data.tobytes())
    return io_buffer


def pack_wav(io_buffer: BytesIO, data: np.ndarray, rate: int):
    io_buffer = BytesIO()
    sf.write(io_buffer, data, rate, format="wav")
    return io_buffer


def pack_aac(io_buffer: BytesIO, data: np.ndarray, rate: int):
    process = subprocess.Popen(
        [
            "ffmpeg",
            "-f",
            "s16le",  # 输入16位有符号小端整数PCM
            "-ar",
            str(rate),  # 设置采样率
            "-ac",
            "1",  # 单声道
            "-i",
            "pipe:0",  # 从管道读取输入
            "-c:a",
            "aac",  # 音频编码器为AAC
            "-b:a",
            "192k",  # 比特率
            "-vn",  # 不包含视频
            "-f",
            "adts",  # 输出AAC数据流格式
            "pipe:1",  # 将输出写入管道
        ],
        stdin=subprocess.PIPE,
        stdout=subprocess.PIPE,
        stderr=subprocess.PIPE,
    )
    out, _ = process.communicate(input=data.tobytes())
    io_buffer.write(out)
    return io_buffer


def pack_audio(io_buffer: BytesIO, data: np.ndarray, rate: int, media_type: str):
    if media_type == "ogg":
        io_buffer = pack_ogg(io_buffer, data, rate)
    elif media_type == "aac":
        io_buffer = pack_aac(io_buffer, data, rate)
    elif media_type == "wav":
        io_buffer = pack_wav(io_buffer, data, rate)
    else:
        io_buffer = pack_raw(io_buffer, data, rate)
    io_buffer.seek(0)
    return io_buffer



class InitTTS():
    def __init__(self,
                 t2s_weights_path:str = "model/voice_v2pro1/GPT_SoVITS/pretrained_models/gsv-v2final-pretrained/s1bert25hz-5kh-longer-epoch=12-step=369668.ckpt",
                 vits_weights_path:str = "model/voice_v2pro1/GPT_SoVITS/pretrained_models/gsv-v2final-pretrained/s2G2333k.pth",
                 bert_base_path:str = "model/voice_v2pro1/GPT_SoVITS/pretrained_models/chinese-roberta-wwm-ext-large",
                 cnhuhbert_base_path:str = "model/voice_v2pro1/GPT_SoVITS/pretrained_models/chinese-hubert-base",
                 ):
        """

        Args:
            t2s_weights_path:  t2s 权重路径
            vits_weights_path:  vits 权重路径
            bert_base_path:  bert 路径
            cnhuhbert_base_path:  cnhuh 路径
        """
        # 1. 获取 local_api.py 所在的目录，这是我们所有路径的基准点
        base_dir = Path(__file__).parent.resolve()

        # 2. 定义预训练模型的基础路径
        pretrained_models_dir = base_dir


        tts_config = TTS_Config(config_path)
        # if str(now_dir).find("voice_v2pro1") == -1:
        #     tts_config.device = "cuda"
        #     tts_config.is_half = True

        #     tts_config.vits_weights_path = str(pretrained_models_dir  / vits_weights_path)
        #     tts_config.t2s_weights_path = str(pretrained_models_dir  /t2s_weights_path)
        #     tts_config.bert_base_path = str(pretrained_models_dir  /bert_base_path)
        #     tts_config.cnhuhbert_base_path = str(pretrained_models_dir  /cnhuhbert_base_path)
        # else:
        tts_config.vits_weights_path = vits_weights_path
        tts_config.t2s_weights_path = t2s_weights_path
        tts_config.bert_base_path = bert_base_path
        tts_config.cnhuhbert_base_path = cnhuhbert_base_path
        print(tts_config)
        self.tts_pipeline = TTS(tts_config)


    def tts_handle(
            self,
            text: str,
            text_lang: str,
            ref_audio_path: str,
            prompt_text: str,
            prompt_lang: str,
            text_split_method: str,
            save_path: str,
            top_k: int = 5,
            top_p: float = 1,
            temperature: float = 1,
            batch_size: int = 1,
            batch_threshold: float = 0.75,
            split_bucket: bool = True,
            speed_factor: float = 1.0,
            fragment_interval: float = 0.3,
            seed: int = -1,
            media_type: str = "wav",
            parallel_infer: bool = True,
            repetition_penalty: float = 1.35,
            sample_steps: int = 32,
            super_sampling: bool = False

    ):
        """
        Text to speech handler.

        Args:
            req (dict):
                {
                    "text": "",                   # str.(required) text to be synthesized
                    "text_lang: "",               # str.(required) language of the text to be synthesized
                    "ref_audio_path": "",         # str.(required) reference audio path
                    "aux_ref_audio_paths": [],    # list.(optional) auxiliary reference audio paths for multi-speaker synthesis
                    "prompt_text": "",            # str.(optional) prompt text for the reference audio
                    "prompt_lang": "",            # str.(required) language of the prompt text for the reference audio
                    "top_k": 5,                   # int. top k sampling
                    "top_p": 1,                   # float. top p sampling
                    "temperature": 1,             # float. temperature for sampling
                    "text_split_method": "cut5",  # str. text split method, see text_segmentation_method.py for details.
                    "batch_size": 1,              # int. batch size for inference
                    "batch_threshold": 0.75,      # float. threshold for batch splitting.
                    "split_bucket: True,          # bool. whether to split the batch into multiple buckets.
                    "speed_factor":1.0,           # float. control the speed of the synthesized audio.
                    "fragment_interval":0.3,      # float. to control the interval of the audio fragment.
                    "seed": -1,                   # int. random seed for reproducibility.
                    "media_type": "wav",          # str. media type of the output audio, support "wav", "raw", "ogg", "aac".
                    "streaming_mode": False,      # bool. whether to return a streaming response.
                    "parallel_infer": True,       # bool.(optional) whether to use parallel inference.
                    "repetition_penalty": 1.35    # float.(optional) repetition penalty for T2S model.
                    "sample_steps": 32,           # int. number of sampling steps for VITS model V3.
                    "super_sampling": False,       # bool. whether to use super-sampling for audio when using VITS model V3.
                    "save_path"                   # str 需要指定音频存储路径 同时指定音频文件名称
                }
        returns:
            StreamingResponse: audio stream response.
        """

        media_type = media_type if media_type != "wav" else "wav"

        req = {
            "text": text,
            "text_lang": text_lang,
            "ref_audio_path": ref_audio_path,
            "aux_ref_audio_paths": [],
            "prompt_text": prompt_text,
            "prompt_lang": prompt_lang,
            "text_split_method": text_split_method,
            "top_k": top_k,
            "top_p": top_p,
            "temperature": temperature,
            "batch_size": batch_size,
            "batch_threshold": batch_threshold,
            "split_bucket": split_bucket,
            "speed_factor": speed_factor,
            "fragment_interval": fragment_interval,
            "seed": seed,
            "media_type": media_type,
            "parallel_infer": parallel_infer,
            "repetition_penalty": repetition_penalty,
            "sample_steps": sample_steps,
            "super_sampling": super_sampling,
        }
        # print(req)
        audio_data_list = []
        # try:
        tts_generator = self.tts_pipeline.run(req)
        return tts_generator
        # except Exception as e:
        #     return f"Error: {str(e)}"


if __name__ == "__main__":
    tts = InitTTS(t2s_weights_path =r'checkpoints\tts\0718-e10.ckpt',vits_weights_path=r'checkpoints\tts\0718_e8_s400.pth')
    tts.tts_handle(
        text="家人们，今天这款五彩椒不是一般的蔬菜，快来抢购吧！",
        text_lang="zh",
        ref_audio_path="20250701.wav",
        prompt_text="哇，看到爱吃草莓的小仙女都来了，太给力了，今天所有的产品",
        prompt_lang="zh",
        text_split_method="cut5",
        save_path="1.wav"
    )