import os, uuid, base64, subprocess
import re
import time
from time import sleep
import asyncio
from subprocess import CalledProcessError
import traceback
from typing import List
import multiprocessing, threading
from concurrent.futures import Future, ThreadPoolExecutor, as_completed
from pydub import AudioSegment
from plus.libUtils import gc

import numpy as np
import sentencepiece as spm
import torch
import torchaudio
from torch.nn.utils.rnn import pad_sequence
from omegaconf import OmegaConf
from tqdm import tqdm

import warnings

warnings.filterwarnings("ignore", category=FutureWarning)
warnings.filterwarnings("ignore", category=UserWarning)

from libs.BigVGAN.models import BigVGAN as Generator
from libs.utils.checkpoint import load_checkpoint
from libs.utils.feature_extractors import MelSpectrogramFeatures
from libs.utils.front import TextNormalizer, TextTokenizer
from plus.model_vllm import UnifiedVoice

import matplotlib.pyplot as plt

# def fade_in_out(wav, fade_in=int(24000*0.05), fade_out=int(24000*0.05)):
#     wav = wav.astype(np.float32)
#     print("wav", np.abs(wav).max(), np.abs(wav).mean(), np.abs(wav).min())
    
#     if fade_in > 0:
#         wav[:fade_in] *= np.linspace(0, 1, fade_in)[:, None]
    
#     if fade_out > 0:
#         wav[-fade_out:] *= np.linspace(1, 0, fade_out)[:, None]
    
#     wav = np.clip(wav, -32768, 32767).astype(np.int16)
#     wav = np.concatenate([np.zeros((int(0.4 * 24000), 1)), wav], axis=0).astype(np.int16)
#     return wav

def trim_and_pad_silence(wav_data, threshold=1000, min_silence=int(24000*0.4)):
    # # 1. 去除前端静音
    # abs_data = np.abs(wav_data).flatten()
    # first_non_silent = np.argmax(abs_data >= threshold)  # 第一个≥threshold的索引
    # wav_data = wav_data[max(0, first_non_silent-int(24000*0.1)):]  # 切片保留后端
    
    # 2. 处理后端静音
    abs_trimmed = np.abs(wav_data).flatten()
    last_non_silent = len(abs_trimmed) - np.argmax(abs_trimmed[::-1] >= threshold)  # 最后一个≥threshold的索引+1
    
    # 计算后端静音长度
    back_silence_length = len(wav_data) - last_non_silent
    if back_silence_length < min_silence:
        pad_length = min_silence - back_silence_length
        padded = np.vstack([wav_data, np.zeros((pad_length, 1))])  # 补0
    else:
        padded = wav_data
    
    return padded.astype(np.int16)


class IndexTTS:
    def __init__(
        self,
        cfg_path="checkpoints/config.yaml",
        cache_dir="/dev/shm",  # os.getcwd()
        model_dir="checkpoints",
        gpu_memory_utilization=0.25,
        dtype="auto",
        max_num_seqs=128,
        is_fp16=True,
        device=None,
        use_cuda_kernel=None,
    ):
        """
        Args:
            cfg_path (str): path to the config file.
            model_dir (str): path to the model directory.
            is_fp16 (bool): whether to use fp16.
            device (str): device to use (e.g., 'cuda:0', 'cpu'). If None, it will be set automatically based on the availability of CUDA or MPS.
            use_cuda_kernel (None | bool): whether to use BigVGan custom fused activation CUDA kernel, only for CUDA device.
        """
        self.cache_dir = cache_dir
        self.wavs = dict()
        self.gpt_gen_time = dict()
        self.bigvgan_time = dict()
        self.tasks = dict()
        self.tasks_fail = dict()
        if device is not None:
            self.device = device
            self.is_fp16 = False if device == "cpu" else is_fp16
            self.use_cuda_kernel = use_cuda_kernel is not None and use_cuda_kernel and device.startswith("cuda")
        elif torch.cuda.is_available():
            self.device = "cuda:0"
            self.is_fp16 = is_fp16
            self.use_cuda_kernel = use_cuda_kernel is None or use_cuda_kernel
        elif hasattr(torch, "mps") and torch.backends.mps.is_available():
            self.device = "mps"
            self.is_fp16 = False # Use float16 on MPS is overhead than float32
            self.use_cuda_kernel = False
        else:
            self.device = "cpu"
            self.is_fp16 = False
            self.use_cuda_kernel = False
            print(">> Be patient, it may take a while to run in CPU mode.")

        self.cfg = OmegaConf.load(cfg_path)
        self.model_dir = model_dir
        self.dtype = torch.float16 if self.is_fp16 else None
        self.stop_mel_token = self.cfg.gpt.stop_mel_token

        self.gpt = UnifiedVoice(gpu_memory_utilization, dtype, max_num_seqs, **self.cfg.gpt, model_dir=model_dir)
        self.gpt_path = os.path.join(self.model_dir, self.cfg.gpt_checkpoint)
        load_checkpoint(self.gpt, self.gpt_path)
        self.gpt = self.gpt.to(self.device)
        # if self.is_fp16:
        #     self.gpt.eval().half()
        # else:
        #     self.gpt.eval()
        self.gpt.eval()
        print(">> GPT weights restored from:", self.gpt_path)

        if self.use_cuda_kernel:
            # preload the CUDA kernel for BigVGAN
            try:
                from libs.BigVGAN.alias_free_activation.cuda import load
                anti_alias_activation_cuda = load.load()
                print(">> Preload custom CUDA kernel for BigVGAN", anti_alias_activation_cuda)
            except Exception as ex:
                traceback.print_exc()
                print(">> Failed to load custom CUDA kernel for BigVGAN. Falling back to torch.")
                self.use_cuda_kernel = False
        self.bigvgan = Generator(self.cfg.bigvgan, use_cuda_kernel=self.use_cuda_kernel)
        self.bigvgan_path = os.path.join(self.model_dir, self.cfg.bigvgan_checkpoint)
        vocoder_dict = torch.load(self.bigvgan_path, map_location="cpu")
        self.bigvgan.load_state_dict(vocoder_dict["generator"])
        self.bigvgan = self.bigvgan.to(self.device)
        # remove weight norm on eval mode
        self.bigvgan.remove_weight_norm()
        self.bigvgan.eval()
        print(">> bigvgan weights restored from:", self.bigvgan_path)
        self.bpe_path = os.path.join(self.model_dir, "bpe.model")  # self.cfg.dataset["bpe_model"]
        self.normalizer = TextNormalizer()
        self.normalizer.load()
        print(">> TextNormalizer loaded")
        self.tokenizer = TextTokenizer(self.bpe_path, self.normalizer)
        print(">> bpe model loaded from:", self.bpe_path)

        self.speaker_dict = {}
    
    def remove_long_silence(self, codes: list, latent: torch.Tensor, max_consecutive=15, silent_token=52):
        assert latent.dim() == 3 and latent.size(0) == 1, "Latent should be (1, seq_len, dim)"
        seq_len, dim = latent.size(1), latent.size(2)
        # print("latent", latent.shape)
        
        if self.stop_mel_token in codes:
            try:
                stop_idx = codes.index(self.stop_mel_token)
                valid_len = max(stop_idx - 1, 0)  # 保留至停止标记前一位
            except ValueError:
                valid_len = len(codes)
        else:
            valid_len = len(codes)
        
        valid_codes = codes[:min(valid_len, len(codes))]
        valid_latent = latent[0, :seq_len]  # 保持维度兼容性
        
        keep_indices = []
        silence_counter = 0
        
        for idx, token in enumerate(valid_codes):
            if token == silent_token:
                silence_counter += 1
            else:
                silence_counter = 0
            
            if silence_counter <= max_consecutive:
                keep_indices.append(idx)
        
        filtered_latent = valid_latent[keep_indices].unsqueeze(0)  # [1, new_seq, dim]
        # print("filtered_latent", filtered_latent.shape)
        return filtered_latent
    
    @torch.no_grad()
    async def worker(self, sessionid, sent, seed, auto_conditioning, sampling_params, speech_conditioning_latent, index=None):
        text_tokens = self.tokenizer.convert_tokens_to_ids(sent)
        text_tokens = torch.tensor(text_tokens, dtype=torch.int32, device=self.device).unsqueeze(0)
        m_start_time = time.perf_counter()
        # 设置采样参数的seed
        if sampling_params is not None:
            self.gpt.samplingParamsUpdate(**sampling_params)
        if seed is not None:
            self.gpt.sampling_params.seed = int(seed)
        else:
            self.gpt.sampling_params.seed = None
        codes, latent = await self.gpt.inference_speech(
            speech_conditioning_latent,
            text_tokens,
            # cond_mel_lengths=torch.tensor([auto_conditioning.shape[-1]], device=text_tokens.device)
        )
        self.gpt_gen_time[sessionid] += time.perf_counter() - m_start_time

        # # remove ultra-long silence if exits
        # # temporarily fix the long silence bug.
        # latent = self.remove_long_silence(codes, latent)

        codes = torch.tensor(codes, dtype=torch.long, device=self.device).unsqueeze(0)
        code_lens = torch.tensor([codes.shape[-1]], device=codes.device, dtype=codes.dtype)
        try:
            latent = self.gpt(
                speech_conditioning_latent, text_tokens,
                torch.tensor([text_tokens.shape[-1]], device=text_tokens.device), codes,
                code_lens*self.gpt.mel_length_compression,
                cond_mel_lengths=torch.tensor([speech_conditioning_latent.shape[-1]], device=text_tokens.device),
                return_latent=True, clip_inputs=False
            )
            m_start_time = time.perf_counter()
            wav, _ = self.bigvgan(latent, [ap_.transpose(1, 2) for ap_ in auto_conditioning])
            self.bigvgan_time[sessionid] += time.perf_counter() - m_start_time
            wav = wav.squeeze(1)
            wav = torch.clamp(32767 * wav, -32767.0, 32767.0)
            print(f">> {sessionid} --> wav shape: {wav.shape}", "min:", wav.min(), "max:", wav.max())
            
            wav_cpu = wav.detach().cpu()
            # 先从计算图中分离，再移动到CPU（使用 .detach() 方法。.detach() 会返回一个新的张量，这个新张量与原始张量共享数据存储，但切断了与计算图的联系。此时，再调用 .cpu()，就得到了一个纯粹的、位于 CPU 上的数据张量，不再持有任何 GPU 显存的引用。）
            
            # self.wavs[sessionid].append(wav[:, :-512])
            if index is None or len(self.wavs[sessionid]) <= index:
                self.wavs[sessionid].append(wav_cpu)  # to cpu before
            else:
                self.wavs[sessionid][index]=wav_cpu
        except torch.cuda.OutOfMemoryError as ex:
            print("="*50)
            print("Caught an OutOfMemoryError!")
            print("Reason: GPU ran out of memory.")
            print("Error details:", ex)
            print("="*50)
            self.tasks_fail[sessionid] = True
        except Exception as ex:
            print("Error details:", ex)
            self.tasks_fail[sessionid] = True
        gc()
    
    def dyz(self, text):
        text = text.replace("嗯", "EN4")
        text = text.replace("嘿", "HEI1")
        text = text.replace("嗨", "HAI4")
        text = text.replace("哈哈", "HA1HA1")
        return text
    
    def torchLoadAudio(self, ap_, sampling_rate = 24000):
        audio, sr = torchaudio.load(ap_)
        audio = torch.mean(audio, dim=0, keepdim=True)
        if audio.shape[0] > 1:
            audio = audio[0].unsqueeze(0)
        audio = torchaudio.transforms.Resample(sr, sampling_rate)(audio)
        cond_mel = MelSpectrogramFeatures()(audio).to(self.device)
        return cond_mel

    def readPtFile(self, pt_file):
        data = torch.load(pt_file)
        data = {
            "file_name": data['file_name'] if 'file_name' in data else None,
            "file_data": data['file_data'] if 'file_data' in data else None,
            "seed": int(data['seed'] if 'seed' in data else -1),
            "top_p": float(data['top_p'] if 'top_p' in data else 0.8),
            "top_k": int(data['top_k'] if 'top_k' in data else 30),
            "temperature": float(data['temperature'] if 'temperature' in data else 1),
            "repetition_penalty": float(data['repetition_penalty'] if 'repetition_penalty' in data else 10),
            "max_tokens": int(data['max_tokens'] if 'max_tokens' in data else 600),
            "cond_mel": data['cond_mel'] if 'cond_mel' in data else None,
        }
        filaPath = None
        if 'file_data' in data and data['file_data'] is not None:
            cache_dir2 = os.path.join(f'{self.cache_dir}', 'pt_file')
            if os.path.exists(cache_dir2) is False:
                os.makedirs(cache_dir2)
            filaPath = os.path.join(cache_dir2, os.path.basename(data['file_name']))
            if isinstance(data['file_data'], str):
                if data['file_data'].find(';base64,') > -1:
                    cache_dir2 = os.path.join(f'{self.cache_dir}', 'pt_file')
                    if os.path.exists(cache_dir2) is False:
                        os.makedirs(cache_dir2)
                    filaPath = os.path.join(cache_dir2, os.path.basename(data['file_name']))
                    if os.path.exists(filaPath) is False:
                        data['file_data'] = data['file_data'].split(';base64,')[1]
                        data['file_data'] = base64.b64decode(data['file_data'])
            if os.path.exists(filaPath) is False and isinstance(data['file_data'], bytes):
                with open(filaPath, 'wb') as f:
                    f.write(data['file_data'])
        data['file'] = filaPath if os.path.exists(filaPath) else None
        return data

    def convert_wav_to_aac(self, input_wav_file, output_aac_file, bitrate='22k'):
        """
        Convert a WAV file to AAC using ffmpeg.

        :param input_wav_file: The input WAV file path.
        :param output_aac_file: The output AAC file path.
        """
        command = [
            'ffmpeg',
            '-i', input_wav_file,
            # '-hide_banner',
            '-nostats', '-err_detect', 'ignore_err', '-ignore_unknown', '-loglevel', 'error', '-y',
            '-acodec', 'aac',  # Specify the audio codec as AAC
            '-b:a', bitrate,  # Specify the bitrate (optional)
            output_aac_file
        ]

        try:
            result = subprocess.run(
                command,
                check=True,          # 如果返回码非零，抛出 CalledProcessError
            )
        except subprocess.CalledProcessError as e:
            print(f"An error occurred: {e}")
            
    async def infer_stream(
        self,
        audio_prompt:list,
        text:str ="",
        infer_mode:str ="流式推理",
        verbose:bool=False,
        seed:int=None,
        max_text_tokens_per_sentence:int=150,
        sentences_bucket_max_size:int=4,
        sessionid:str=None,
        output_path:str=None,
        over_write:bool=False, 
        sampling_params=None
    ):
        if sessionid is None:
            sessionid = str(uuid.uuid4())
        output_path_ = output_path
        self.tasks_fail[sessionid] = False
        if sessionid in self.tasks:
            if output_path_ is not None:
                yield output_path_
        else:
            self.tasks[sessionid] = 1
            text = self.dyz(text)
            sampling_rate, target_sr = 24000, 22050
            default_data = None
            print(">> start inference...")
            start_time = time.perf_counter()
            auto_conditioning = []
            for ap_ in audio_prompt:
                cond_mel = None
                if ap_.endswith('.pt'):
                    data = self.readPtFile(ap_)
                    file = data['file'] if 'file' in data else None
                    if seed is None:
                        seed = data['seed']
                    if sampling_params is None:
                        sampling_params = dict(
                            top_p = data['top_p'],
                            top_k = data['top_k'],
                            temperature = data['temperature'],
                            repetition_penalty = data['repetition_penalty'],
                            max_tokens = data['max_tokens']
                        )
                    cond_mel = data['cond_mel']
                    if cond_mel is None and file is not None:
                        cond_mel = self.torchLoadAudio(file, sampling_rate)
                else:
                    cond_mel = self.torchLoadAudio(ap_, sampling_rate)
                    # cond_mel_frame = cond_mel.shape[-1]
                if cond_mel is not None:
                    auto_conditioning.append(cond_mel)
            print(f">> audio_prompt Parsing completed: {time.perf_counter()-start_time:.2f} seconds")

            text_tokens_list = self.tokenizer.tokenize(text)
            if max_text_tokens_per_sentence is None:
                sentences = self.tokenizer.split_sentences(text_tokens_list)
            else:
                sentences = self.tokenizer.split_sentences(text_tokens_list, max_text_tokens_per_sentence)
            # lang = "EN"
            # lang = "ZH"
            speech_conditioning_latent = []
            for cond_mel in auto_conditioning:
                speech_conditioning_latent_ = self.gpt.get_conditioning(
                    cond_mel,  # .half()
                    torch.tensor([cond_mel.shape[-1]], device=self.device)
                )
                speech_conditioning_latent.append(speech_conditioning_latent_)
            speech_conditioning_latent = torch.stack(speech_conditioning_latent).sum(dim=0)
            speech_conditioning_latent = speech_conditioning_latent / len(auto_conditioning)

            processes = []
            self.wavs[sessionid] = []
            self.gpt_gen_time[sessionid] = 0
            self.bigvgan_time[sessionid] = 0
            for index in range(len(sentences)):
                sent = sentences[index]
                self.wavs[sessionid] = []
                isExists = False
                output_path_ = output_path
                if output_path_ is not None:
                    if output_path_.endswith('.mp3'):
                        output_path_ = output_path_.replace('.mp3', f'_{index}.mp3')
                    elif output_path_.endswith('.m3u8'):
                        output_path_ = output_path_.replace('.m3u8', f'_{index}.aac')
                    if os.path.exists(output_path_) and over_write == False:
                        isExists = True
                wav = None
                end_time = time.perf_counter()
                # print(f'{index} --> ', output_path_, isExists)
                if isExists == False:
                    print(f">> Generated audio begin")
                    await self.worker(sessionid, sent, seed, auto_conditioning, sampling_params, speech_conditioning_latent)
                    wav = torch.cat(self.wavs[sessionid], dim=1)
                    wav_length = wav.shape[-1] / sampling_rate
                    end_time = time.perf_counter()
                    # save audio
                    wav = wav.cpu()  # to cpu
                    wav_data = wav.type(torch.int16)
                    wav_data = wav_data.numpy().T
                    wav_data = trim_and_pad_silence(wav_data)
                    print(f">> Generated audio length: {wav_length:.2f} seconds")
                    print(f">> RTF: {(end_time - start_time) / wav_length:.4f}")
                print(f">> gpt_gen_time: {self.gpt_gen_time[sessionid]:.2f} seconds")
                print(f">> bigvgan_time: {self.bigvgan_time[sessionid]:.2f} seconds")
                print(f">> Chunk inference time: {end_time - start_time:.2f} seconds")
                if output_path:
                    # 直接保存音频到指定路径中
                    if os.path.isfile(output_path) and over_write == True:
                        os.remove(output_path)
                        print(">> remove old wav file:", output_path)
                    print('>> os.path.dirname(output_path) --> ', os.path.dirname(output_path))
                    if os.path.dirname(output_path) != "":
                        os.makedirs(os.path.dirname(output_path), exist_ok=True)
                    if wav is not None:
                        wav_file_path = output_path.replace('.m3u8', f'_{index}.wav')
                        wav_file_path = wav_file_path.replace('.mp3', f'_{index}.wav')
                        torchaudio.save(wav_file_path, wav.type(torch.int16), sampling_rate)
                        if output_path.endswith('.mp3'):
                            output_path_ = output_path.replace('.mp3', f'_{index}.mp3')
                            audio = AudioSegment.from_wav(wav_file_path)
                            audio.export(output_path_, format="mp3")
                            os.remove(wav_file_path)
                            print(">> mp3 file saved to:", output_path_)
                        elif output_path.endswith('.m3u8'):
                            output_path_ = output_path.replace('.m3u8', f'_{index}.aac')
                            self.convert_wav_to_aac(wav_file_path, output_path_)
                            # audio = AudioSegment.from_wav(wav_file_path)
                            # audio.export(output_path_, format="aac")
                            os.remove(wav_file_path)
                            print(">> aac file saved to:", output_path_)
                        else:
                            output_path_ = wav_file_path
                            print(">> wav file saved to:", wav_file_path)
                    yield output_path_
                else:
                    yield (sampling_rate, wav_data)

            torch.cuda.empty_cache()
            del self.tasks[sessionid]
            del self.wavs[sessionid]
            del self.gpt_gen_time[sessionid]
            del self.bigvgan_time[sessionid]

    async def infer(
        self,
        multiple_speaker:bool,
        audio_prompt:list,
        text:str="",
        infer_mode:str="普通推理",
        output_path:str=None,
        verbose:bool=False,
        seed:int=None,
        max_text_tokens_per_sentence:int=150,
        sentences_bucket_max_size:int=4,
        sessionid:str=None,
        sampling_params=None
    ):
        if sessionid is None:
            sessionid = str(uuid.uuid4())
        while sessionid in self.tasks:
            time.sleep(1)
            pass
        self.tasks[sessionid] = 1
        if text is not None:
            text = self.dyz(text)
        print(">> start inference...")
        start_time = time.perf_counter()
        sampling_rate, target_sr = 24000, 22050
        default_data = None

        self.wavs[sessionid] = []
        self.gpt_gen_time[sessionid] = 0
        self.bigvgan_time[sessionid] = 0
        self.tasks_fail[sessionid] = False
        if multiple_speaker == False:
            auto_conditioning = []
            for ap_ in audio_prompt:
                cond_mel = None
                if ap_.endswith('.pt'):
                    data = self.readPtFile(ap_)
                    file = data['file'] if 'file' in data else None
                    if seed is None:
                        seed = data['seed']
                    if sampling_params is None:
                        sampling_params = dict(
                            top_p = data['top_p'],
                            top_k = data['top_k'],
                            temperature = data['temperature'],
                            repetition_penalty = data['repetition_penalty'],
                            max_tokens = data['max_tokens']
                        )
                    cond_mel = data['cond_mel']
                    if cond_mel is None and file is not None:
                       cond_mel = self.torchLoadAudio(file, sampling_rate)
                else:
                    cond_mel = self.torchLoadAudio(ap_, sampling_rate)
                    # cond_mel_frame = cond_mel.shape[-1]
                if cond_mel is not None:
                    auto_conditioning.append(cond_mel)
            print(f">> audio_prompt Parsing completed: {time.perf_counter()-start_time:.2f} seconds")

            text_tokens_list = self.tokenizer.tokenize(text)
            if max_text_tokens_per_sentence is None:
                sentences = self.tokenizer.split_sentences(text_tokens_list)
            else:
                sentences = self.tokenizer.split_sentences(text_tokens_list, max_text_tokens_per_sentence)
            speech_conditioning_latent = []
            for cond_mel in auto_conditioning:
                speech_conditioning_latent_ = self.gpt.get_conditioning(
                    cond_mel,  # .half()
                    torch.tensor([cond_mel.shape[-1]], device=self.device)
                )
                speech_conditioning_latent.append(speech_conditioning_latent_)
            speech_conditioning_latent = torch.stack(speech_conditioning_latent).sum(dim=0)
            speech_conditioning_latent = speech_conditioning_latent / len(auto_conditioning)

            if infer_mode == '并行推理':
                async def workers():
                    nonlocal sentences, seed, auto_conditioning, sampling_params, speech_conditioning_latent
                    tasks = []
                    for sent in sentences:
                        self.wavs[sessionid].append(default_data)
                        tasks.append(self.worker(sessionid, sent, seed, auto_conditioning, sampling_params, speech_conditioning_latent, len(self.wavs[sessionid]) - 1))
                        if len(tasks) > 0:
                            if sentences_bucket_max_size == len(tasks):
                                results = await asyncio.gather(*tasks)
                                tasks = []
                    if len(tasks) > 0:
                        results = await asyncio.gather(*tasks)
                    return results
                await workers()
            else:
                for sent in sentences:
                    tasks_fail = self.tasks_fail[sessionid] if sessionid in self.tasks_fail else False
                    if tasks_fail == False:
                        await self.worker(sessionid, sent, seed, auto_conditioning, sampling_params, speech_conditioning_latent)
        else:
            tasks = []
            for index in range(len(audio_prompt)):
                speaker = audio_prompt[index]
                auto_conditioning = []
                seed = -1
                top_p = 30
                top_k = 0.8
                temperature = 1
                repetition_penalty = 10
                max_tokens = 600
                cond_mel = None
                getSettings = False
                input_text_single = ''
                if len(speaker) == 2:
                    prompt_audio = speaker[0]
                    input_text_single = f"{' ' if index > 0 else ''}{speaker[1]}"
                elif len(speaker) == 9:
                    input_text_single = f"{' ' if index > 0 else ''}{speaker[1]}"
                    prompt_audio = speaker[2]
                    seed = speaker[3]
                    top_p = speaker[4]
                    top_k = speaker[5]
                    temperature = speaker[6]
                    repetition_penalty = speaker[7]
                    max_tokens = speaker[8]
                    getSettings = True
                else:
                    raise ValueError(f"The data format is incorrect!!!")
                if prompt_audio.endswith('.pt'):
                    data = self.readPtFile(prompt_audio)
                    file = data['file']
                    cond_mel = data['cond_mel']
                    if getSettings==False:
                        seed = data['seed']
                        top_p = data['top_p']
                        top_k = data['top_k']
                        temperature = data['temperature']
                        repetition_penalty = data['repetition_penalty']
                        max_tokens = data['max_tokens']
                else:
                    cond_mel = self.torchLoadAudio(prompt_audio, sampling_rate)
                sampling_params = dict(
                    top_p = top_p,
                    top_k = top_k,
                    temperature = temperature,
                    repetition_penalty = repetition_penalty,
                    max_tokens = max_tokens,
                )
                if cond_mel is not None:
                    auto_conditioning = [cond_mel]
                    speech_conditioning_latent = []
                    text_tokens_list = self.tokenizer.tokenize(input_text_single)
                    if max_text_tokens_per_sentence is None:
                        sentences = self.tokenizer.split_sentences(text_tokens_list)
                    else:
                        sentences = self.tokenizer.split_sentences(text_tokens_list, max_text_tokens_per_sentence)
                    for cond_mel in auto_conditioning:
                        speech_conditioning_latent_ = self.gpt.get_conditioning(
                            cond_mel,  # .half()
                            torch.tensor([cond_mel.shape[-1]], device=self.device)
                        )
                        speech_conditioning_latent.append(speech_conditioning_latent_)
                    speech_conditioning_latent = torch.stack(speech_conditioning_latent).sum(dim=0)
                    speech_conditioning_latent = speech_conditioning_latent / len(auto_conditioning)
                    if infer_mode == '并行推理':
                        for sent in sentences:
                            self.wavs[sessionid].append(default_data)
                            tasks.append(self.worker(sessionid, sent, seed, auto_conditioning, sampling_params, speech_conditioning_latent, len(self.wavs[sessionid]) - 1))
                    else:
                        for sent in sentences:
                            tasks_fail = self.tasks_fail[sessionid] if sessionid in self.tasks_fail else False
                            if tasks_fail == False:
                                await self.worker(sessionid, sent, seed, auto_conditioning, sampling_params, speech_conditioning_latent)
            if infer_mode == '并行推理':
                async def multiple_workers():
                    nonlocal tasks
                    tasks_step = []
                    for task in tasks:
                        tasks_step.append(task)
                        if sentences_bucket_max_size == len(tasks_step):
                            tasks_fail = self.tasks_fail[sessionid] if sessionid in self.tasks_fail else False
                            if tasks_fail == False:
                                await asyncio.gather(*tasks_step)
                                tasks_step = []
                    tasks_fail = self.tasks_fail[sessionid] if sessionid in self.tasks_fail else False
                    if tasks_fail == False and len(tasks_step) > 0:
                        await asyncio.gather(*tasks_step)
                await multiple_workers()
        
        wavs = []
        for wav in self.wavs[sessionid]:
            if wav is not None:
                wavs.append(wav)
        torch.cuda.empty_cache()
        end_time = time.perf_counter()
        wav = torch.cat(wavs, dim=1)
        wav_length = wav.shape[-1] / sampling_rate
        if infer_mode != '并行推理':
            print(f">> gpt_gen_time: {self.gpt_gen_time[sessionid]:.2f} seconds")
            print(f">> bigvgan_time: {self.bigvgan_time[sessionid]:.2f} seconds")
        print(f">> Total inference time: {end_time - start_time:.2f} seconds")
        print(f">> Generated audio length: {wav_length:.2f} seconds")
        print(f">> RTF: {(end_time - start_time) / wav_length:.4f}")

        del self.tasks[sessionid]
        del self.tasks_fail[sessionid]
        del self.wavs[sessionid]
        del self.gpt_gen_time[sessionid]
        del self.bigvgan_time[sessionid]
        # save audio
        wav = wav.cpu()  # to cpu
        if output_path:
            # 直接保存音频到指定路径中
            if os.path.isfile(output_path):
                os.remove(output_path)
                print(">> remove old wav file:", output_path)
            if os.path.dirname(output_path) != "":
                os.makedirs(os.path.dirname(output_path), exist_ok=True)
            if output_path.endswith('.mp3'):
                wav_file_path = output_path.replace('.mp3', '.wav')
                torchaudio.save(wav_file_path, wav.type(torch.int16), sampling_rate)
                audio = AudioSegment.from_wav(wav_file_path)
                audio.export(output_path, format="mp3")
                print(">> mp3 file saved to:", output_path)
                os.remove(wav_file_path)
            else:
                torchaudio.save(output_path, wav.type(torch.int16), sampling_rate)
                print(">> wav file saved to:", output_path)
            return output_path
        else:
            # 返回以符合Gradio的格式要求
            wav_data = wav.type(torch.int16)
            wav_data = wav_data.numpy().T
            wav_data = trim_and_pad_silence(wav_data)
            return (sampling_rate, wav_data)
        
    @torch.no_grad()
    def registry_speaker(self, speaker: str, audio_paths: List[str]):
        auto_conditioning = []
        for ap_ in audio_paths:
            audio, sr = torchaudio.load(ap_)
            audio = torch.mean(audio, dim=0, keepdim=True)
            if audio.shape[0] > 1:
                audio = audio[0].unsqueeze(0)
            audio = torchaudio.transforms.Resample(sr, 24000)(audio)
            cond_mel = MelSpectrogramFeatures()(audio).to(self.device)
            # cond_mel_frame = cond_mel.shape[-1]
            auto_conditioning.append(cond_mel)

        speech_conditioning_latent = []
        for cond_mel in auto_conditioning:
            speech_conditioning_latent_ = self.gpt.get_conditioning(
                cond_mel,  # .half()
                torch.tensor([cond_mel.shape[-1]], device=self.device)
            )
            speech_conditioning_latent.append(speech_conditioning_latent_)
        speech_conditioning_latent = torch.stack(speech_conditioning_latent).sum(dim=0)
        speech_conditioning_latent = speech_conditioning_latent / len(auto_conditioning)

        self.speaker_dict[speaker] = {
            "auto_conditioning": auto_conditioning,
            "speech_conditioning_latent": speech_conditioning_latent
        }
        print(f"Speaker: {speaker} registered")
