import os
import warnings
import shutil
import torch
import io, base64
import logging
from datetime import datetime

os.environ['MODELSCOPE_LOG_LEVEL'] = str(logging.ERROR)
verbose = os.environ['SERVICE_VERBOSE']
tts_model_dir = os.environ['TTS_MODEL_DIR']
denoise_model_dir = os.environ['DENOISE_MODEL_DIR']
ttsDefaultValue = os.environ['TTS_DEFAULT']
warnings.filterwarnings("ignore", category=FutureWarning)
warnings.filterwarnings("ignore", category=UserWarning)
current_dir = os.path.dirname(os.path.abspath(__file__))
wsWelcome = f'If there is no interaction data within 40 seconds, connection will automatically disconnect'
cache_dir = '/dev/shm' # os.getcwd()

import sys
import threading, multiprocessing
import time
import json
import random
import argparse
import asyncio
import numpy as np
import soundfile as sf
from mutagen.aac import AAC
from plus.denoise import DeNoise
from contextlib import asynccontextmanager
from fastapi import FastAPI, Request, Response, WebSocket, UploadFile, HTTPException, BackgroundTasks # type: ignore
from fastapi.responses import JSONResponse, StreamingResponse
from fastapi.middleware.cors import CORSMiddleware
from fastapi.staticfiles import StaticFiles # type: ignore
from pydantic import BaseModel, Field # type: ignore
from sse_starlette.sse import EventSourceResponse # type: ignore
from starlette.middleware.base import BaseHTTPMiddleware # type: ignore
from starlette.requests import Request # type: ignore
from starlette.responses import FileResponse, Response # type: ignore
from starlette.middleware.gzip import GZipMiddleware # type: ignore
from starlette.websockets import WebSocketState, WebSocketDisconnect # type: ignore
from plus.libUtils import gc, logLib, timeStamp, clear, clearCache, clear__pycache__, uuid, toMd5, b64Encode, b64Decode, media2type, file2Dir, gpusInfo, catchError, SharedMemoryManager, send_message, taskEnd, generate_random_numbers, chkinput, isNaN, getParams, makeRequest, memcache, cacheBasePath, getCache

cache_file = os.path.join(cacheBasePath, 'cache')
tasks = {
    "tts": {},
    "tts_ws": {},
    "asr": {}
}
ASRModel = None
TTSModel = None
@asynccontextmanager
async def lifespan(app: FastAPI):  # collects GPU memory
    yield
    gc()
app = FastAPI(lifespan=lifespan)
app.add_middleware(
    GZipMiddleware,
    # 添加 gzip 中间件
    minimum_size=1000,
)
app.add_middleware(
    CORSMiddleware,
    # 允许跨域的源列表，例如 ["http://www.example.org"] 等等，["*"] 表示允许任何源
    allow_origins=["*"],
    # 跨域请求是否支持 cookie，默认是 False，如果为 True，allow_origins 必须为具体的源，不可以是 ["*"]
    allow_credentials=False,
    # 允许跨域请求的 HTTP 方法列表，默认是 ["GET"]
    allow_methods=["*"],
    # 允许跨域请求的 HTTP 请求头列表，默认是 []，可以使用 ["*"] 表示允许所有的请求头
    allow_headers=["*"],
    # 当然 Accept、Accept-Language、Content-Language 以及 Content-Type 总之被允许的
    expose_headers=["*"]
    # 可以被浏览器访问的响应头, 默认是 []，一般很少指定
    # max_age=1000
    # 设定浏览器缓存 CORS 响应的最长时间，单位是秒。默认为 600，一般也很少指定
)

def isTrue(value):
    return value.lower()=='true' or value==True or value==1 or value =='1'

def clearDict(obj:dict) -> dict:
    result = {}
    for key, value in obj.items():
        if value is not None:
            result[key] = value
    return result

def getMessage(message):
    """
    从传入的消息中提取所需的内容。

    如果消息是 JSON 格式的字符串，会尝试解析为字典；
    如果消息本身就是字典，直接处理。
    最终从解析后的字典中提取 `choices` 列表中第一个元素的 `message` 字段下的 `content` 值。

    :param message: 待处理的消息，可以是字符串或字典
    :return: 提取的内容字符串，如果提取失败则返回空字符串
    """
    result = ''
    # 检查消息是否为字符串或字典类型
    if isinstance(message, str) or isinstance(message, dict):
        if isinstance(message, str):
            if (message.startswith('data:')):
                message = message[5:]
            message = message.strip()
            if message == '[DONE]':
                return message
            # 检查字符串是否为 JSON 格式（列表或字典）
            if (message.startswith('[') and message.endswith(']')) or (message.startswith('{') and message.endswith('}')):
                try:
                    # 尝试将 JSON 字符串解析为字典
                    result = json.loads(message)
                except json.JSONDecodeError as e:
                    print(e)
                    pass
        elif isinstance(message, dict):
            result = message
        if isinstance(result, dict) and 'choices' in message and len(result['choices']) > 0:
            # 从解析后的字典中提取所需内容
            result = result['choices'][0]
            if isinstance(result, dict) and 'message' in result and 'content' in result['message']:
                result = result['message']['content']
            elif isinstance(result, dict) and 'delta' in result and 'content' in result['delta']:
                result = result['delta']['content']
            else:
                result = ''
    return result

async def getRequestItems(request: Request, uploadPath:str = '/upload', upload2Base64: bool = True):
    query = dict(request.query_params)
    data = {}
    if request.method == "GET":
        pass
    else:
        if "Content-Type" in request.headers:
            if request.headers["Content-Type"].startswith("application/json"):
                data = await request.json()
            elif request.headers["Content-Type"].startswith("application/x-www-form-urlencoded"):
                form_data = await request.form()
                data = {key: value for key, value in form_data.items()}
            elif request.headers["Content-Type"].startswith("multipart/form-data"):
                form_data = await request.form()
                files = {}
                values= {}
                # # form_data 是一个 MultiDict，包含了所有的表单键值对
                # data = {key: value for key, value in form_data.items()}
                # # 这里可以添加将数据保存到数据库的逻辑
                for field, value in form_data.items():
                    if hasattr(value, 'content_type'):
                        # if value.content_type == 'application/octet-stream':
                        val = value.filename.split('.')
                        ext = val[len(val) - 1]
                        uploadPath = f"{cacheBasePath}{uploadPath}/{datetime.now().strftime('%Y-%m-%d')}"
                        if os.path.exists(uploadPath) == False:
                            try:
                                os.makedirs(uploadPath)
                            except:
                                pass
                        file_path = f"{uploadPath}/{timeStamp()}.{ext}"
                        # 打开文件并写入上传文件的内容
                        content = await value.read()
                        with open(file_path, "wb") as f:
                            f.write(content)
                        files[field] = file_path.replace(cacheBasePath, '')
                        if upload2Base64 == True:
                            b64 = b64Encode(content, ext)
                            files[field] = b64
                        # print(b64Decode(b64, uploadPath))
                        # print(b64Encode('112233'))
                        # print(b64Encode(123456))
                        # print(b64Encode(content, ext))
                        # print(b64Encode(file_path, ext))
                    else:
                        # 处理非文件字段
                        if field == 'data' or field == 'json':
                            try:
                                values[field] = json.loads(value)
                            except:
                                values[field] = value
                        else:
                            values[field] = value
                data = {
                    **values,
                    **files
                }
    if query is not None:
        if isinstance(query, dict):
            for key, value in query.items():
                data[key] = value
    return data

def requestGet(requests, item, defaultValue = ''):
    if item in requests:
        result = requests[item]
        return result
    else:
        return defaultValue

def getMaxMin(value, default, min, max, valueType=None):
    if value is None:
        return default
    if valueType is None:
        valueType = type(min)
    if isinstance(value, int) or isinstance(value, float):
        value = valueType(value)
        if value < min:
            return min
        elif value > max:
            return max
        else:
            return value
    else:
        return default

def response(code, result, useTime):
    gc()
    return JSONResponse(
        content={'code':code,'data':result,'useTime':useTime},
        status_code=code,
        headers={"x-token": "boatToken"},
        media_type="application/json" #"text/html"
    )

def getTaskId():
    return toMd5(f'{timeStamp()}{random.randrange(1000,9999)}')

class WebSocketManager:
    def __init__(self):
        self.active_connections: List[WebSocket] = []

    async def connect(self, websocket: WebSocket):
        await websocket.accept()
        self.active_connections.append(websocket)

    def disconnect(self, websocket: WebSocket):
        self.active_connections.remove(websocket)

    async def send_personal_message(self, message: any, websocket: WebSocket):
        try:
            if isinstance(message, str):
                await websocket.send_text(message)
            elif isinstance(message, dict):
                await websocket.send_text(json.dumps(message, ensure_ascii=False))
            else:
                await websocket.send_bytes(message)
        except:
            pass

    async def broadcast(self, message: str):
        for connection in self.active_connections:
            await connection.send_text(message)

async def accept(websocket: WebSocket) -> threading.Timer:
    await websocket.accept()
    await send_message(wsWelcome, websocket)
    def close(websocket: WebSocket):
        try:
            asyncio.run(websocket.close())
        except:
            pass
    timer = threading.Timer(40, close, kwargs={'websocket': websocket})
    timer.start()
    return timer

def getService(
    funasr=None
):
    global infer_mode_choices, ASRModel, TTSModel, verbose, app, ttsDefaultValue, tts_model_dir, denoise_model_dir
    
    sys.path.append(current_dir)
    sys.path.append(os.path.join(current_dir, "indextts"))

    ttsDefaultValue_ = {
        'audio_prompt':'男声-台湾',
        'seed':-1,
        'top_k':30,
        'top_p':0.8,
        'temperature':1,
        'repetition_penalty':10,
        'max_tokens':600,
        'sentences_bucket_max_size':8,
        'max_text_tokens_per_sentence':120,
        'infer_mode': '普通推理',
        'base64': 0,
        'streamType': '',
    }
    if ttsDefaultValue is not None and ttsDefaultValue != '':
        if (ttsDefaultValue.startswith('{') and ttsDefaultValue.endswith('}')) or (ttsDefaultValue.startswith('[') and ttsDefaultValue.endswith(']')):
            try:
                ttsDefaultValue=json.loads(ttsDefaultValue)
            except:
                ...
    if isinstance(ttsDefaultValue, dict):
        for key, value in ttsDefaultValue_.items():
            if key not in ttsDefaultValue:
                ttsDefaultValue[key]=value
    if tts_model_dir is None:
        tts_model_dir=""
    if denoise_model_dir is None:
        denoise_model_dir=""
    logLevel = os.environ['LOG_LEVEL']
    try:
        use_webui = int(os.environ['USE_WEBUI'])
    except:
        use_webui = 0
    try:
        gpu_memory_utilization = float(os.environ['GPU_MEMORY_UTILIZATION'])
    except:
        gpu_memory_utilization = 0.25
    try:
        dtype = str(os.environ['SERVICE_DTYPE']).lower()
    except:
        dtype = "auto"
    try:
        max_num_seqs = int(os.environ['MAX_NUM_SEQS'])
    except:
        max_num_seqs = 128
    try:
        module = os.environ['SERVICE_MODULE'].lower()
        module = module.split('|')
    except:
        module = ['tts', 'asr']
    grContainer = None
    pt_files = []
    multiple_sentence = []
    infer_mode_choices= ["普通推理", "并行推理", "流式推理"] if ttsDefaultValue['sentences_bucket_max_size'] > 1 else ["普通推理", "流式推理"]
    deNoise = DeNoise(model_dir=denoise_model_dir, logLevel=logLevel)
    cache_dir2 = os.path.join(f'{cache_dir}', 'deNoise')
    if os.path.exists(os.path.join(f'{cache_dir}', 'deNoise')):
        shutil.rmtree(cache_dir2)
    if 'tts' in module:
        import pandas as pd
        from plus.infer_vllm import IndexTTS
        # from tools.i18n.i18n import I18nAuto
        # i18n = I18nAuto(language="zh_CN")
        cfg_path = os.path.join(tts_model_dir, "config.yaml")
        TTSModel = IndexTTS(model_dir=tts_model_dir, cache_dir=cache_dir, cfg_path=cfg_path, gpu_memory_utilization=gpu_memory_utilization, dtype=dtype, max_num_seqs=max_num_seqs)
        # pt_files = []
        # example_cases = []
    
    def getSpeaker():
        global infer_mode_choices
        pt_files = []
        audio_files = []
        for root, dirs, files in os.walk("assets/speaker"):
            for file in files:
                if file.endswith(".pt"):
                    pt_files.append(os.path.join(root, file))
        if len(pt_files) > 0:
            pt_files.sort(key=lambda x: os.path.basename(x).lower())
        for root, dirs, files in os.walk("assets/speaker"):
            for file in files:
                if file.endswith(".mp3") or file.endswith(".wav"):
                    audio_files.append(os.path.join(root, file))
        if len(audio_files) > 0:
            audio_files.sort(key=lambda x: os.path.basename(x).lower())
        names=[]
        for file in pt_files:
            names.append(file)
        for file in audio_files:
            names.append(file)
        return names

    def getExample():
        global infer_mode_choices
        example_cases = []
        with open("assets/cases.jsonl", "r", encoding="utf-8") as f:
            for line in f:
                line = line.strip()
                if not line:
                    continue
                example = json.loads(line)
                infer_mode = example.get("infer_mode")
                has = False
                for mode in infer_mode_choices:
                    if infer_mode == mode:
                        has = True
                        break
                if has == False:
                    infer_mode = '普通推理'
                example_cases.append([
                    example.get("text"),
                    infer_mode,
                ])
        return example_cases
    if 'asr' in module:
        from plus.funasr import FunASR
        asr_model_dir = os.environ['ASR_MODEL_DIR']
        vad_model_dir = os.environ['VAD_MODEL_DIR']
        punc_model_dir = os.environ['PUNC_MODEL_DIR']
        spk_model_dir = os.environ['SPK_MODEL_DIR']
        spkd_model_dir = os.environ['SPKD_MODEL_DIR']
        if spk_model_dir is None:
            spk_model_dir=""
        ASRModel = FunASR(
            funasr,
            model_dir=f"{asr_model_dir}",
            vad_model_dir=f"{vad_model_dir}",
            punc_model_dir=f"{punc_model_dir}",
            spk_model_dir=f"{spk_model_dir}",
            spkd_model_dir=f"{spkd_model_dir}",
            deNoiseFunc=deNoise.deNoiseFunc,
            logLevel=logLevel
        )

    if use_webui == 1:
        import gradio as gr
        jsScript = """
            setTimeout(() => {
                const btn = document.getElementById('clear_button');
                if (btn) {
                    btn.setAttribute('title', '清空所有说话人');
                }
            }, 500);
        """
        css = """
            :root {--cell-width-0:180px;--cell-width-1:auto;--cell-width-2:80px;}
            .padding0 { padding:0px;}
            .block_mpty span[data-testid='block-info'] {display:none;}
            .hbtn {height:60px;}
            .wav {position:relative;}
            .padding5 { padding:5px;}
            .padding10 { padding:10px;}
            .speaker, .speaker>div {display:flex;flex-direction:row;flex-wrap:nowrap; width:100%}
            .speaker_list>div{flex-direction:unset !important}
            .speaker_btn{min-width:40px !important; width:40px !important;flex-grow:unset !important;}
            .speaker_radio{min-height:210px;max-height:300px;overflow:auto;}
            .speaker_radio, .speaker_radio label {width:100% !important}
            .speaker_radio label span {white-space:nowrap;overflow:hidden;text-overflow:ellipsis;width:fit-content;}
        """
        with gr.Blocks(css=css) as grContainer:
            mutex = threading.Lock()
            gr.HTML('''<h2><center>DeepInside.AI 语音相关</center></h2>''')
            with gr.Tabs() as tabs:
                if 'tts' in module:
                    with gr.Tab("语音克隆预训练"):
                        with gr.Row():
                            with gr.Column():
                                with gr.Row():
                                    with gr.Column(scale=1.5,):
                                        with gr.Group():
                                            prompt_audio2 = gr.Dropdown(
                                                choices=[os.path.basename(path) for path in pt_files],
                                                value=None,
                                                label="选择已创建的音色",
                                                info="（*.pt 文件为音色配置文件带有音色种子与参数配置，其他为纯音频文件）"
                                            )
                                            add_button = gr.Button("加入对话", variant="primary", visible=False)
                                    with gr.Column(scale=1,):
                                        with gr.Group():
                                            with gr.Row():
                                                asr_de_noise=gr.Checkbox(label="开启后可以进行降噪", value=False, info="建议使用，但是会降低原有的音频的音量")
                                            with gr.Row():
                                                multiple_speaker=gr.Checkbox(label="开启多人配音模式", value=False, info="多人配音模式推理时不分句，在算力允许的前提下尽量使用并行推理，并且在WEBUI模式下不能多人编辑")
                                with gr.Row():
                                    with gr.Column(scale=4, ):
                                        prompt_audio = gr.Audio( 
                                            label="上传参考音频",
                                            sources=["upload","microphone"],
                                            type="filepath"
                                        )
                                with gr.Row():
                                    input_text_single = gr.TextArea(
                                        label="输入要转换的文本",
                                        placeholder="请输入想要转换成语音的文本内容...",
                                        lines=3
                                    )
                                with gr.Group(visible=False, elem_classes=["padding10", "speaker"]) as multiple:
                                    with gr.Column(scale=5, elem_classes="speaker_list"):
                                        speaker_container = gr.Radio(choices=multiple_sentence, label="已选说话人", elem_classes="speaker_radio")
                                    with gr.Column(scale=1, elem_classes="speaker_btn"):
                                        up_button = gr.Button("↑")
                                        down_button = gr.Button("↓")
                                        del_button = gr.Button("×")
                                        clear_button = gr.Button("清", elem_id="clear_button")
                                        edit_button = gr.Button("存", visible=False, variant="primary")
                                with gr.Row():
                                    with gr.Column(scale=1):
                                        infer_mode = gr.Radio(choices=infer_mode_choices, label="推理模式",info="并行推理：更适合长句，性能翻倍；流式推理：适合边推理边输出",value="普通推理")
                                    with gr.Group():
                                        seed = gr.Slider(
                                            label="Seed", minimum=-1, maximum=np.iinfo(np.int32).max, step=1, value=-1
                                        )
                                        randomize_seed = gr.Button("随机抽种子")
                                        def on_seed_click():
                                            newValue = random.randint(-1, np.iinfo(np.int32).max)
                                            return {
                                                seed: newValue
                                            }
                                        randomize_seed.click(
                                            on_seed_click,
                                            inputs=[],
                                            outputs=[seed]
                                        )
                                with gr.Group():
                                    with gr.Row():
                                        gr.Markdown("**分句设置** _参数会影响音频质量和生成速度_", elem_classes=['padding5'])
                                    with gr.Row():
                                        max_text_tokens_per_sentence = gr.Slider(
                                            label="分句最大Token数", value=ttsDefaultValue['max_text_tokens_per_sentence'], minimum=20, maximum=TTSModel.cfg.gpt.max_text_tokens, step=2, key="max_text_tokens_per_sentence",
                                            info="建议80~200之间，值越大，分句越长；值越小，分句越碎；过小过大都可能导致音频质量不高",
                                        )
                                        sentences_bucket_max_size = gr.Slider(
                                            label="分句分桶的最大容量（并行推理生效）", value=ttsDefaultValue['sentences_bucket_max_size'], minimum=1, maximum=16, step=1, key="sentences_bucket_max_size",
                                            info="建议2-8之间，值越大，一并行推理包含的分句数越多，过大可能导致内存溢出",
                                            visible=False
                                        )
                                with gr.Accordion("预览分句结果", open=True) as sentences_settings:
                                    sentences_preview = gr.Dataframe(
                                        headers=["序号", "分句内容", "Token数"],
                                        key="sentences_preview",
                                        wrap=True,
                                )
                            with gr.Column():
                                output_audio = gr.Audio(label="生成结果", type="filepath", visible=True, key="output_audio")
                                output_audio_stream = gr.Audio(label="生成结果 - 流式", streaming=True, visible=False, type="filepath", key="output_audio_stream")
                                with gr.Accordion("高级生成参数设置", open=False):
                                    with gr.Row():
                                        with gr.Column(scale=1):
                                            gr.Markdown("**GPT2 采样设置** _参数会影响音频多样性和生成速度详见[Generation strategies](https://hf-mirror.com/docs/transformers/main/en/generation_strategies)_")
                                            with gr.Row():
                                                temperature = gr.Slider(label="temperature", minimum=0.1, maximum=2.0, value=ttsDefaultValue['temperature'], step=0.1)
                                                top_p = gr.Slider(label="top_p 值越大语速越快", minimum=0.0, maximum=1.0, value=ttsDefaultValue['top_p'], step=0.01)
                                                top_k = gr.Slider(label="top_k 值越大语气变化越大", minimum=0, maximum=100, value=ttsDefaultValue['top_k'], step=1)
                                            with gr.Row():
                                                max_mel_tokens = gr.Slider(label="max_mel_tokens", value=ttsDefaultValue['max_tokens'], minimum=50, maximum=TTSModel.cfg.gpt.max_mel_tokens, step=10, info="生成Token最大数量，过小导致音频被截断")
                                                repetition_penalty = gr.Slider(label="repetition_penalty", precision=None, value=ttsDefaultValue['repetition_penalty'], minimum=0.1, maximum=20.0, step=0.1, info="用于控制文本生成时重复词或短语的惩罚系数参数")
                                            # with gr.Row():
                                            #     typical_sampling = gr.Checkbox(label="typical_sampling", value=False, info="不建议使用")
                                            #     typical_mass = gr.Slider(label="typical_mass", value=0.9, minimum=0.0, maximum=1.0, step=0.1)
                                    advanced_params = [
                                        top_p, top_k, temperature,
                                        repetition_penalty, max_mel_tokens,
                                        # typical_sampling, typical_mass,
                                    ]
                                with gr.Row():
                                    with gr.Column(min_width="100px", scale=1):
                                        gen_button = gr.Button("生成语音", variant="primary", elem_classes="hbtn",)
                                        gen_button_stream = gr.Button("生成语音", variant="primary", elem_classes="hbtn", visible=False)
                                    with gr.Column(min_width="100px", scale=2.5, visible = False) as saveAs:
                                        with gr.Row():
                                            with gr.Column(min_width="100px", scale=5, elem_classes="block_mpty",):
                                                pt_name = gr.Text(
                                                    label='',
                                                    placeholder="请填写所要保存的音色名称",
                                                )
                                            with gr.Column(min_width="100px", scale=1):
                                                pt_save_button = gr.Button("保存", variant="primary", elem_classes="hbtn",)
                                    with gr.Column(min_width="100px", scale=1, visible = False) as del_btn_form:
                                        del_btn = gr.Button("删除当前音色", elem_classes="hbtn")
                                with gr.Column(scale=2):
                                    gr.Markdown("---")
                                    exampleList = gr.Examples(
                                        label='示例文案',
                                        examples=getExample(),
                                        inputs=[
                                            input_text_single, infer_mode
                                        ],
                                        outputs=[
                                            output_audio, output_audio_stream, saveAs
                                        ],
                                    )
                        def multiple_speaker_change(x):
                            sentences=get_sentences()
                            return {
                                sentences_settings: gr.update(visible=not x),
                                multiple: gr.update(visible=x),
                                add_button: gr.update(visible=x),
                                infer_mode: gr.update(choices=infer_mode_choices, value="并行推理"),
                                speaker_container: gr.update(choices=sentences, value = 0 if len(sentences) > 0 else None),
                            }
                        multiple_speaker.change(
                            multiple_speaker_change,
                            inputs=[multiple_speaker],
                            outputs=[sentences_settings, multiple, add_button, infer_mode, speaker_container,],
                        )
                        def get_sentences():
                            nonlocal multiple_sentence
                            sentences=[]
                            for index in range(len(multiple_sentence)):
                                data = multiple_sentence[index]
                                txt = data[1]
                                sentence = f'{data[0]} --> {txt}'
                                sentences.append([sentence, index])
                            return sentences
                        def remove_sentence(speaker_container):
                            nonlocal multiple_sentence
                            newLists = []
                            for index in range(len(multiple_sentence)):
                                sentence = multiple_sentence[index]
                                if index != speaker_container:
                                    newLists.append(sentence)
                            multiple_sentence = newLists
                            n_index = int(speaker_container)
                            if len(multiple_sentence) == 0:
                                n_index = None
                            else:
                                if len(multiple_sentence) < n_index:
                                    n_index = len(multiple_sentence)
                            sentences=get_sentences()
                            return gr.update(choices=sentences, value = n_index)

                        del_button.click(
                            remove_sentence,
                            inputs=[speaker_container],
                            outputs=[speaker_container]
                        )
                        def clear_sentence():
                            nonlocal multiple_sentence
                            multiple_sentence = []
                            return gr.update(choices=[], value = None)
                        clear_button.click(
                            clear_sentence,
                            inputs=[],
                            outputs=[speaker_container]
                        )
                        def up_sentence(speaker_container):
                            nonlocal multiple_sentence
                            if speaker_container <= 0:
                                sentences=get_sentences()
                                return gr.update(choices=sentences, value = 0)
                            newLists = []
                            n_index = speaker_container - 1
                            for sentence in multiple_sentence:
                                newLists.append(sentence)
                            newLists[n_index] = multiple_sentence[speaker_container]
                            newLists[speaker_container] = multiple_sentence[n_index]
                            multiple_sentence = newLists
                            sentences=get_sentences()
                            return gr.update(choices=sentences, value = n_index)
                        up_button.click(
                            up_sentence,
                            inputs=[speaker_container],
                            outputs=[speaker_container]
                        )
                        def down_sentence(speaker_container):
                            nonlocal multiple_sentence
                            if speaker_container == len(multiple_sentence) - 1:
                                sentences=get_sentences()
                                return gr.update(choices=sentences, value = speaker_container)
                            newLists = []
                            for sentence in multiple_sentence:
                                newLists.append(sentence)
                            n_index = speaker_container + 1
                            newLists[n_index] = multiple_sentence[speaker_container]
                            newLists[speaker_container] = multiple_sentence[n_index]
                            multiple_sentence = newLists
                            sentences=get_sentences()
                            return gr.update(choices=sentences, value = n_index)
                        down_button.click(
                            down_sentence,
                            inputs=[speaker_container],
                            outputs=[speaker_container]
                        )
                        def save_speaker(prompt_audio, pt_name, seed, *args,):
                            if pt_name is None or pt_name == '':
                                gr.Warning("请填写需要保存的新音色名称")
                                pt_files=getSpeaker()
                                return {
                                    saveAs: gr.update(visible=True),
                                    prompt_audio2:gr.update(choices=[os.path.basename(path) for path in pt_files],)
                                }
                            filePath = os.path.join("assets/speaker", f'{pt_name}.pt')
                            if os.path.exists(filePath):
                                gr.Warning(f"{pt_name} 已存在该操作将会覆盖原有音色")
                                os.remove(filePath)
                            top_p, top_k, temperature, repetition_penalty, max_mel_tokens = args
                            sampling_rate, target_sr = 24000, 22050
                            cond_mel=TTSModel.torchLoadAudio(prompt_audio, sampling_rate)
                            base64_encoded = None
                            with open(prompt_audio, 'rb') as file:
                                # 读取音频数据
                                file_content = file.read()
                                # 将字节编码为 Base64 字符串
                                # base64_encoded = base64.b64encode(file_content).decode('utf-8')
                                # if prompt_audio.endswith('.mp3'):
                                #     base64_encoded = f"data:audio/mpeg;base64,{base64_encoded}"
                                # elif prompt_audio.endswith('.wav'):
                                #     base64_encoded = f"data:audio/wav;base64,{base64_encoded}"
                                # else:
                                #     base64_encoded = f"data:application/octet-stream;base64,{base64_encoded}"
                                base64_encoded = file_content
                            data = {
                                "file_name": prompt_audio,
                                "file_data": base64_encoded,
                                "seed": int(seed),
                                "top_p": float(top_p),
                                "top_k": int(top_k) if int(top_k) > 0 else None,
                                "temperature": float(temperature),
                                "repetition_penalty": float(repetition_penalty),
                                "max_tokens": int(max_mel_tokens),
                                "cond_mel": cond_mel
                            }
                            torch.save(data, filePath)
                            pt_files=getSpeaker()
                            return [
                                gr.update(choices=[os.path.basename(path) for path in pt_files], value=os.path.basename(filePath)),
                                gr.update(visible=False),
                                gr.update(value='')
                            ]

                        pt_save_button.click(
                            save_speaker,
                            inputs=[
                                prompt_audio, pt_name, seed, *advanced_params,
                            ],
                            outputs=[
                                prompt_audio2, saveAs, pt_name
                            ],
                        )
                        def add_prompt(prompt_audio, prompt_audio2, input_text_single, seed, *args,):
                            top_p, top_k, temperature, repetition_penalty, max_mel_tokens = args
                            isOk = True
                            if prompt_audio2 is None or prompt_audio2 == "" or isinstance(prompt_audio2, str) == False:
                                gr.Warning("请先选择音色")
                                isOk = False
                            if input_text_single == "":
                                gr.Warning("请输入要转换的文本")
                                isOk = False
                            if isOk:
                                multiple_sentence.append([prompt_audio2, input_text_single, prompt_audio, seed, *args])
                            value = None
                            sentences=get_sentences()
                            return gr.update(choices=sentences, value=len(sentences)-1)
                            # if index == -1:
                            #     return gr.update(choices=speakers)
                            # else:
                            #     return gr.update(choices=speakers, value=index)
                        add_button.click(
                            add_prompt,
                            inputs=[prompt_audio, prompt_audio2, input_text_single, seed, *advanced_params,],
                            outputs=[speaker_container]
                        )
                        def speaker_container_change(speaker_container, prompt_audio, prompt_audio2, input_text_single, seed, top_p, top_k, temperature, repetition_penalty, max_mel_tokens):
                            speaker = None
                            try:
                                speaker = multiple_sentence[speaker_container]
                            except:
                                ...
                            if speaker is not None:
                                prompt_audio2 = speaker[0]
                                input_text_single = speaker[1]
                                prompt_audio = speaker[2]
                                seed = speaker[3]
                                top_p = speaker[4]
                                top_k = speaker[5]
                                temperature = speaker[6]
                                repetition_penalty = speaker[7]
                                max_mel_tokens = speaker[8]
                            return [
                                gr.update(value=prompt_audio),
                                gr.update(value=prompt_audio2),
                                gr.update(value=input_text_single),
                                gr.update(value=seed),
                                gr.update(value=top_p),
                                gr.update(value=top_k),
                                gr.update(value=temperature),
                                gr.update(value=repetition_penalty),
                                gr.update(value=max_mel_tokens),
                                gr.update(visible=True if speaker is not None else False),
                            ]
                        speaker_container.change(
                            speaker_container_change,
                            inputs=[speaker_container, prompt_audio, prompt_audio2, input_text_single, seed, top_p, top_k, temperature, repetition_penalty, max_mel_tokens],
                            outputs=[prompt_audio, prompt_audio2, input_text_single, seed, *advanced_params, edit_button]
                        )
                        def edit_button_click(speaker_container, prompt_audio, prompt_audio2, input_text_single, seed, *args,):
                            top_p, top_k, temperature, repetition_penalty, max_mel_tokens = args
                            speaker = None
                            try:
                                speaker = multiple_sentence[speaker_container]
                            except:
                                ...
                            if speaker is not None:
                                multiple_sentence[speaker_container] = [
                                    prompt_audio2,
                                    input_text_single,
                                    prompt_audio,
                                    seed,
                                    top_p,
                                    top_k,
                                    temperature,
                                    repetition_penalty,
                                    max_mel_tokens
                                ]
                                sentences=get_sentences()
                                return gr.update(choices=sentences, value=speaker_container)
                            else:
                                return add_prompt(prompt_audio, prompt_audio2, input_text_single, seed, *args,)

                        edit_button.click(
                            edit_button_click,
                            inputs=[speaker_container, prompt_audio, prompt_audio2, input_text_single, seed, *advanced_params,],
                            outputs=[speaker_container]
                        )
                        def del_speaker(prompt_audio2):
                            if prompt_audio2 is None or prompt_audio2 == "" or isinstance(prompt_audio2, str) == False:
                                gr.Warning("请先选择音色")
                                return [
                                    gr.update(value=None),
                                    gr.update(visible=False)
                                ]
                            file = os.path.join("assets/speaker", prompt_audio2)
                            if os.path.exists(file):
                                os.remove(file)
                            pt_files=getSpeaker()
                            return [
                                gr.update(choices=[os.path.basename(path) for path in pt_files]),
                                gr.update(visible=False)
                            ]
                        del_btn.click(
                            del_speaker,
                            inputs=[
                                prompt_audio2,
                            ],
                            outputs=[
                                prompt_audio2, del_btn_form
                            ],
                            
                        )
                        async def gen_single_stream(
                            multiple_speaker, prompts, text, infer_mode, seed,
                            max_text_tokens_per_sentence=120,
                            sentences_bucket_max_size=4,
                            *args, progress=gr.Progress()
                        ):
                            isPass = True
                            if multiple_speaker == True:
                                gr.Warning("多人对话模式不支持WEBUI模式下的流式输出！如需快速推理请使用 并行推理")
                                isPass = False
                            else:
                                if prompts == '' or prompts is None:
                                    gr.Warning("请选择需要参考的音频")
                                    isPass = False
                                if text == '':
                                    gr.Warning("请填写需要合成音频的文本")
                                    isPass = False
                            if isPass:
                                TTSModel.gr_progress = progress
                                top_p, top_k, temperature, repetition_penalty, max_mel_tokens = args
                                kwargs = {
                                    "top_p": float(top_p),
                                    "top_k": int(top_k) if int(top_k) > 0 else None,
                                    "temperature": float(temperature),
                                    "repetition_penalty": float(repetition_penalty),
                                    "max_tokens": int(max_mel_tokens),
                                    # "typical_sampling": bool(typical_sampling),
                                    # "typical_mass": float(typical_mass),
                                }
                                if isinstance(prompts, list):
                                    prompt_paths = [prompt for prompt in prompts if prompt is not None]
                                else:
                                    prompt_paths = [prompts] if prompts is not None else []
                                if multiple_speaker == True:
                                    prompt_paths = multiple_sentence
                                    kwargs = None
                                    text = ""
                                    
                                async for chunk in TTSModel.infer_stream(
                                    prompt_paths,
                                    text,
                                    infer_mode,
                                    verbose=verbose,
                                    seed=seed,
                                    max_text_tokens_per_sentence=max_text_tokens_per_sentence,
                                    sentences_bucket_max_size=sentences_bucket_max_size,
                                    sampling_params=kwargs
                                ):
                                    yield chunk

                        async def gen_single(
                            multiple_speaker, prompts, text, infer_mode, seed,
                            max_text_tokens_per_sentence=120,
                            sentences_bucket_max_size=4,
                            *args, progress=gr.Progress()
                        ):
                            if multiple_speaker == True:
                                if len(multiple_sentence) == 0:
                                    gr.Warning("请选择说话人音色")
                                    return {
                                        output_audio: gr.update(value=None, visible=True),
                                        saveAs: gr.update(visible=False)
                                    }
                            else:
                                if prompts == '' or prompts is None:
                                    gr.Warning("请选择需要参考的音频")
                                    return {
                                        output_audio: gr.update(value=None, visible=True),
                                        saveAs: gr.update(visible=False)
                                    }
                                if text == '':
                                    gr.Warning("请填写需要合成音频的文本")
                                    return {
                                        output_audio: gr.update(value=None, visible=True),
                                        saveAs: gr.update(visible=False)
                                    }
                            output_path = None
                            if not output_path:
                                output_path = os.path.join("outputs", f"spk_{int(time.time())}.wav")
                            TTSModel.gr_progress = progress
                            top_p, top_k, temperature, repetition_penalty, max_mel_tokens = args
                            kwargs = {
                                "top_p": float(top_p),
                                "top_k": int(top_k) if int(top_k) > 0 else None,
                                "temperature": float(temperature),
                                "repetition_penalty": float(repetition_penalty),
                                "max_tokens": int(max_mel_tokens),
                                # "typical_sampling": bool(typical_sampling),
                                # "typical_mass": float(typical_mass),
                            }
                            if isinstance(prompts, list):
                                prompt_paths = [prompt for prompt in prompts if prompt is not None]
                            else:
                                prompt_paths = [prompts] if prompts is not None else []
                            output_path = None
                            if multiple_speaker == True:
                                prompt_paths = multiple_sentence
                                kwargs = None
                                text = ""
                            output = await TTSModel.infer(
                                multiple_speaker,
                                prompt_paths,
                                text,
                                infer_mode,
                                output_path,
                                verbose=verbose,
                                seed=seed,
                                max_text_tokens_per_sentence=max_text_tokens_per_sentence,
                                sentences_bucket_max_size=sentences_bucket_max_size,
                                sampling_params=kwargs
                            )
                            return {
                                output_audio: gr.update(value=output,visible=True),
                                saveAs: gr.update(visible=not multiple_speaker)
                            }

                        def update_prompt_audio(asr_de_noise):
                            return {
                                gen_button: gr.update(interactive=True),
                                gen_button_stream: gr.update(interactive=True),
                                saveAs: gr.update(visible=False),
                                prompt_audio2: gr.update(value=None),
                                del_btn_form: gr.update(visible=False),
                            }
                        def on_input_text_change(text, max_tokens_per_sentence):
                            if text and len(text) > 0:
                                text_tokens_list = TTSModel.tokenizer.tokenize(text)

                                sentences = TTSModel.tokenizer.split_sentences(text_tokens_list, max_tokens_per_sentence=int(max_tokens_per_sentence))
                                data = []
                                for i, s in enumerate(sentences):
                                    sentence_str = ''.join(s)
                                    tokens_count = len(s)
                                    data.append([i, sentence_str, tokens_count])
                                
                                return {
                                    sentences_preview: gr.update(value=data, visible=True, type="array"),
                                    saveAs: gr.update(visible=False),
                                }
                            else:
                                df = pd.DataFrame([], columns=["序号", "分句内容", "Token数"])
                                return {
                                    sentences_preview: gr.update(value=df),
                                    saveAs: gr.update(visible=False),
                                }
                        
                        def on_prompt_audio2_change(value, asr_de_noise, seed, top_p, top_k, temperature, repetition_penalty, max_mel_tokens):
                            if value is None or value == '':
                                return gr.update(value=None)
                            file = os.path.join("assets/speaker", value)
                            file2 = None
                            if file.endswith('.pt'):
                                data = TTSModel.readPtFile(file)
                                file = data['file']
                                seed = data['seed']
                                top_p = data['top_p']
                                top_k = data['top_k']
                                temperature = data['temperature']
                                repetition_penalty = data['repetition_penalty']
                                max_mel_tokens = data['max_tokens']
                            if asr_de_noise:
                                cache_dir2 = os.path.join(f'{cache_dir}', 'deNoise')
                                if os.path.exists(cache_dir2) is False:
                                    os.makedirs(cache_dir2)
                                output = os.path.join(cache_dir2, os.path.basename(file))
                                if os.path.exists(output) == False:
                                    deNoise.deNoiseFunc(file, output)
                                file2 = file
                                file = output
                            # prompt_audio, seed, top_p, top_k, temperature, repetition_penalty, max_mel_tokens
                            ptName=value
                            if ptName.find(".") > -1:
                                ptName=ptName.split(".")[0]
                            return [
                                gr.update(value=file),
                                gr.update(value=seed),
                                gr.update(value=top_p),
                                gr.update(value=top_k),
                                gr.update(value=temperature),
                                gr.update(value=repetition_penalty),
                                gr.update(value=max_mel_tokens),
                                gr.update(visible=True),
                                gr.update(value=ptName)
                            ]

                        def infer_mode_change(value):
                            if value=='流式推理':
                                return {
                                    output_audio: gr.update(visible=False),
                                    output_audio_stream: gr.update(visible=True),
                                    gen_button: gr.update(visible=False),
                                    gen_button_stream: gr.update(visible=True),
                                    sentences_bucket_max_size: gr.update(visible=False),
                                }
                            elif value=='普通推理':
                                return {
                                    output_audio: gr.update(visible=True),
                                    output_audio_stream: gr.update(visible=False),
                                    gen_button: gr.update(visible=True),
                                    gen_button_stream: gr.update(visible=False),
                                    sentences_bucket_max_size: gr.update(visible=False),
                                }
                            return {
                                output_audio: gr.update(visible=True),
                                output_audio_stream: gr.update(visible=False),
                                gen_button: gr.update(visible=True),
                                gen_button_stream: gr.update(visible=False),
                                sentences_bucket_max_size: gr.update(visible=True if ttsDefaultValue['sentences_bucket_max_size'] > 1 else False),
                            }

                        input_text_single.change(
                            on_input_text_change,
                            inputs=[input_text_single, max_text_tokens_per_sentence],
                            outputs=[sentences_preview, saveAs,]
                        )
                        max_text_tokens_per_sentence.change(
                            on_input_text_change,
                            inputs=[input_text_single, max_text_tokens_per_sentence],
                            outputs=[sentences_preview, saveAs,]
                        )
                        prompt_audio2.change(
                            on_prompt_audio2_change,
                            inputs=[prompt_audio2, asr_de_noise, seed, top_p, top_k, temperature, repetition_penalty, max_mel_tokens],
                            outputs=[prompt_audio, seed, top_p, top_k, temperature, repetition_penalty, max_mel_tokens, del_btn_form, pt_name]
                        )
                        prompt_audio.upload(
                            update_prompt_audio,
                            inputs=[asr_de_noise],
                            outputs=[gen_button, gen_button_stream, saveAs, prompt_audio2, del_btn_form]
                        )
                        infer_mode.change(
                            infer_mode_change,
                            inputs=[infer_mode],
                            outputs=[gen_button, gen_button_stream, output_audio, output_audio_stream, sentences_bucket_max_size]
                        )
                        # Configure concurrency for the generation event
                        gen_button.click(
                            gen_single,
                            inputs=[
                                multiple_speaker, prompt_audio, input_text_single, infer_mode, seed,
                                max_text_tokens_per_sentence, sentences_bucket_max_size,
                                *advanced_params,
                            ],
                            outputs=[output_audio, saveAs],
                            concurrency_limit=16  # Allow up to 16 concurrent generations
                        )
                        gen_button_stream.click(
                            gen_single_stream,
                            inputs=[
                                multiple_speaker, prompt_audio, input_text_single, infer_mode, seed,
                                max_text_tokens_per_sentence, sentences_bucket_max_size,
                                *advanced_params,
                            ],
                            outputs=[output_audio_stream],
                            concurrency_limit=16  # Allow up to 16 concurrent generations
                        )
                    def initialize():
                        pt_files=getSpeaker()
                        example_cases=getExample()
                        return gr.update(choices=[os.path.basename(path) for path in pt_files],)
                        # return [
                        #     gr.update(choices=[os.path.basename(path) for path in pt_files],),
                        #     gr.update(examples=example_cases)
                        # ]
                    grContainer.load(initialize, outputs=[prompt_audio2])
                if 'asr' in module:
                    with gr.Tab("语音识别",):
                        with gr.Row():
                            with gr.Column(scale=2):
                                asr_audio = gr.Audio( 
                                    label="上传需要识别的音频",
                                    sources=["upload","microphone"],
                                    type="filepath"
                                )
                                # asr_hotword = gr.Textbox(label="多音字辅助设置：以空格分隔多个热词")
                                with gr.Row():
                                    with gr.Column(scale=3):
                                        with gr.Group():
                                            with gr.Row():
                                                asr_de_noise=gr.Checkbox(label="开启后可以进行降噪", value=False, info="建议使用，但是会降低原有的音频的音量")
                                                asr_use_emotion=gr.Checkbox(label="解析语境情绪", value=False, info="")
                                            with gr.Row(visible=True if spkd_model_dir != '' and spkd_model_dir is not None else False):
                                                asr_use_speaker_diarization=gr.Checkbox(label="说话人分离", value=False, info="将语音中的说话人分离出来（通过时间戳表达），开启后会占用更多算力")
                                    with gr.Column(scale=1):
                                        asr_btn=gr.Button("开始识别", variant="primary", elem_classes="hbtn",)
                                # with gr.Row():
                                #     with gr.Accordion("多说话人识别", open=False):
                                #         with gr.Row():
                                #             with gr.Column(min_width="100px", scale=5, elem_classes="block_mpty",):
                                #                 pt_name = gr.Text(
                                #                     label='',
                                #                     placeholder="请填写所要保存的音色名称",
                                #                 )
                                #             with gr.Column(min_width="100px", scale=1):
                                #                 pt_save_button = gr.Button("保存", variant="primary", elem_classes="hbtn",)
                            with gr.Column(scale=1):
                                asr_label = gr.Textbox(label="识别结果", value="", scale=1)
                                
                        def on_asr_btn(asr_audio,asr_de_noise,asr_use_emotion,asr_use_speaker_diarization):
                            if asr_audio is None or asr_audio == '':
                                gr.Warning("请选择需要识别的音频文件")
                                return {
                                    asr_label: gr.update(value=None), 
                                }
                            else:
                                result = ASRModel.speech(
                                    input=asr_audio,
                                    batch_size_s=300,
                                    deNoise=asr_de_noise,
                                    useEmo=asr_use_emotion,
                                    useSpeakerDiarization=asr_use_speaker_diarization
                                )
                                return {
                                    asr_label: gr.update(value=result['asr']), 
                                }

                        # def on_asr_use_emotion(asr_use_emotion):
                        #     return gr.update(visible=asr_use_emotion)
                        
                        asr_btn.click(
                            on_asr_btn,
                            inputs=[asr_audio,asr_de_noise,asr_use_emotion,asr_use_speaker_diarization],
                            outputs=[asr_label]
                        )
                        # asr_use_emotion.change(
                        #     on_asr_use_emotion,
                        #     inputs=[asr_use_emotion],
                        #     outputs=[asr_label]
                        # )
    
    if 'asr' in module:
        @app.api_route('/api/asr', methods=['POST', 'GET'])
        async def identify_api(request: Request):
            global tasks, ASRModel, cache_dir, cache_dir2, cache_file, cacheBasePath
            clearCache(f"{cacheBasePath}/cache", "asr")
            requests = await getRequestItems(request, uploadPath='/cache/asr', upload2Base64=False)
            code = 200
            result=dict()
            timeStamp=time.time()
            serviceType = 'ASR_API'
            hotword = ''
            asr_de_noise = False
            asr_use_emotion = False
            if 'taskId' not in requests:
                taskId = getTaskId()
            else:
                taskId = requests['taskId']
            tasks['asr'][taskId] = {}
            tasks['asr'][taskId]['hotword'] = ''
            if 'voice' not in requests:
                result = dict(
                    code=500,
                    status='fail',
                    data='請上傳需要識別的音頻'
                )
                return response(code=result['code'], result=result, useTime=float(f'{time.time()-timeStamp:.3f}'),)
            hotword = requestGet(requests, 'hotword')
            asr_de_noise = int(requestGet(requests, 'deNoise', 0))
            asr_use_emotion = int(requestGet(requests, 'useEmo', 0))
            input = f"{cacheBasePath}{requestGet(requests, 'voice')}"
            asrResult = ASRModel.speech(
                input=input,
                batch_size_s=300,
                deNoise=asr_de_noise,
                useEmo=asr_use_emotion,
                useSpeakerDiarization=asr_use_speaker_diarization
            )
            if asrResult['code'] != 200:
                result = asrResult
            else:
                if isinstance(asrResult['asr'], str):
                    asrResult['asr'] = json.loads(asrResult['asr'])
                result = dict(
                    code=200,
                    status='success',
                    data=asrResult['asr']
                )
            os.remove(input)
            del tasks['asr'][taskId]
            return response(code=result['code'], result=result, useTime=float(f'{time.time()-timeStamp:.3f}'),)
        
        @app.websocket("/ws/asr")
        async def wsIdentify(websocket: WebSocket):
            global tasks, ASRModel, cache_dir, cache_dir2, cache_file, cacheBasePath
            # 获取原始查询参数对象
            # 获取所有参数的字典形式
            requests = dict(websocket.query_params)
            code = 200
            result=dict()
            timeStamp=time.time()
            serviceType = 'ASR_WS'
            asr_de_noise = False
            asr_use_emotion = False
            if 'taskId' not in requests:
                taskId = getTaskId()
            else:
                taskId = requests['taskId']
            tasks['asr'][taskId] = {}
            tasks['asr'][taskId]['hotword'] = ''

            timer = await accept(websocket=websocket)
            try:
            # if 1==1:
                while True:
                    if websocket.client_state == WebSocketState.DISCONNECTED:
                        break
                    data = await websocket.receive()
                    Bytes = None
                    if 'bytes' in data or 'text' in data:
                        timer.cancel()
                        clearCache(f"{cacheBasePath}/cache", "asr")
                        if 'bytes' in data:
                            Bytes = data['bytes']
                            # Bytes = b64Encode(Bytes, 'mp3')
                            filePath = f"{cacheBasePath}/cache/asr/{datetime.now().strftime('%Y-%m-%d')}"
                            if os.path.exists(filePath) == False:
                                os.makedirs(filePath)
                            filePath = f"{filePath}/{taskId}.mp3"
                            with open(filePath, "wb") as f:
                                f.write(Bytes)
                            Bytes = filePath
                        elif 'text' in data:
                            text = data['text']
                            if text.find(';base64,') > -1:
                                text = f'data:audio/mp3;base64,{text}'
                                Bytes = b64Decode(text, os.path.join(cacheBasePath, f"cache/asr/{datetime.now().strftime('%Y-%m-%d')}"))
                            else:
                                tasks['asr'][taskId]['hotword'] = text
                        if Bytes is not None and os.path.exists(Bytes):
                            asrResult = ASRModel.speech(
                                input=Bytes,
                                batch_size_s=300,
                                deNoise=asr_de_noise,
                                useEmo=asr_use_emotion,
                                useSpeakerDiarization=asr_use_speaker_diarization
                            )
                            result = None
                            if asrResult['code'] != 200:
                                result = asrResult
                            else:
                                if isinstance(asrResult['asr'], str):
                                    asrResult['asr'] = json.loads(asrResult['asr'])
                                result = dict(
                                    code=200,
                                    status='success',
                                    data=asrResult['asr']
                                )
                            await send_message(dict(
                                code = 200,
                                data = result,
                                useTime = float(f'{time.time()-timeStamp:.3f}'),
                                taskId = taskId
                            ), websocket)
                            os.remove(Bytes)
                            del tasks['asr'][taskId]
                            ...
                    time.sleep(0.1)
            except WebSocketDisconnect:
                pass
            except Exception as ex:
                ex = str(ex)
                if ex.find('a disconnect') > -1:
                    pass
                elif ex.find('Need to call "accept" first') == -1:
                    catchError('wsIndentify')

    if 'tts' in module:
        def getTTSParamsData(text):
            global TTSModel, ttsDefaultValue
            audio_prompt = []
            if isinstance(text, list):
                for index in range(len(text)):
                    data = text[index]
                    noText = False
                    noPrompts = False
                    if 'prompts' in data:
                        if data['prompts'].find('.') == -1:
                            data['prompts'] = f'{data["prompts"]}.pt'
                        if data['prompts'].find('/') == -1:
                            data['prompts'] = os.path.join("assets/speaker", data['prompts'])
                        if os.path.exists(data['prompts']) == False:
                            noPrompts = True
                    else:
                        noPrompts = True
                    if 'text' not in data:
                        noText = True
                    if noText:
                        result = dict(
                            code = 500,
                            status = 'fail',
                            msg = '請上傳需要合成的文本信息'
                        )
                        return response(code=result['code'], result=result, useTime=float(f'{time.time()-timeStamp:.3f}'),)
                    if noPrompts:
                        result = dict(
                            code = 500,
                            status = 'fail',
                            msg = '請上傳參考音頻'
                        )
                        return response(code=result['code'], result=result, useTime=float(f'{time.time()-timeStamp:.3f}'),)
                    if data['prompts'].endswith('.pt'):
                        speaker = TTSModel.readPtFile(data['prompts'])
                        keys = ['seed', 'top_p', 'top_k', 'temperature', 'repetition_penalty', 'max_tokens']
                        defaultValues = dict(
                            seed=int(ttsDefaultValue['seed']),
                            top_p=float(ttsDefaultValue['top_p']),
                            top_k=int(ttsDefaultValue['top_k']),
                            temperature=float(ttsDefaultValue['temperature']),
                            repetition_penalty=int(ttsDefaultValue['repetition_penalty']),
                            max_tokens=int(ttsDefaultValue['max_tokens'])
                        )
                        tmp = {}
                        for key in keys:
                            if key in data:
                                tmp[key] = data[key]
                            else:
                                if key in speaker:
                                    tmp[key] = speaker[key]
                                else:
                                    tmp[key] = defaultValues[key]
                        tmp['text'] = data['text']
                        tmp['prompts'] = data['prompts'] 
                        audio_prompt.append([
                            '',
                            f"{' ' if index > 0 else ''}{tmp['text']}",
                            tmp['prompts'],
                            int(getMaxMin(tmp['seed'], ttsDefaultValue['seed'], -1, np.iinfo(np.int32).max, int)),
                            float(getMaxMin(tmp['top_p'], ttsDefaultValue['top_p'], 0, 1, float)),
                            int(getMaxMin(tmp['top_k'], ttsDefaultValue['top_k'], 0, 100, int)),
                            float(getMaxMin(tmp['temperature'], ttsDefaultValue['temperature'], 0.1, 2, float)),
                            int(getMaxMin(tmp['repetition_penalty'], ttsDefaultValue['repetition_penalty'], 0.1, 20, float)),
                            int(getMaxMin(tmp['max_tokens'], ttsDefaultValue['max_tokens'], 50, 900, int)),
                        ])
            return audio_prompt
        
        async def doTTSfunc(params, isbase64, isverbose):
            global TTSModel, tasks, cacheBasePath
            fileName = toMd5(json.dumps(params, ensure_ascii=False))
            taskCode = f'{fileName}_{"base64" if isbase64 == True else ""}'
            clearCache(f"{cacheBasePath}/cache", "tts")
            output_path = f"{cacheBasePath}/cache/tts/{datetime.now().strftime('%Y-%m-%d')}/{fileName}"
            output_path = f"{output_path}.mp3"
            result = dict(
                code = 400,
                status = 'fail',
                data = None
            )
            if taskCode in tasks['tts'] and isbase64 == True:
                result = tasks['tts'][taskCode]
            else:
                if os.path.exists(output_path):
                    output = output_path
                else:
                    options = dict(
                        **params,
                        verbose=isverbose,
                        output_path=output_path
                    )
                    output = await TTSModel.infer(**options)
                if isbase64 == True:
                    time.sleep(0.5)
                    with open(output, 'rb') as file:
                        # 读取音频数据
                        file_content = file.read()
                        base64_encoded = base64.b64encode(file_content).decode('utf-8')
                        if output.endswith('.mp3'):
                            base64_encoded = f"data:audio/mpeg;base64,{base64_encoded}"
                        elif output.endswith('.wav'):
                            base64_encoded = f"data:audio/wav;base64,{base64_encoded}"
                        else:
                            base64_encoded = f"data:application/octet-stream;base64,{base64_encoded}"
                        output = base64_encoded
                try:
                    output = output.replace(cacheBasePath, '')
                except:
                    print('output --> ', output)
                result = dict(
                    code = 200,
                    status = 'success',
                    data = output
                )
                if isbase64 == False:
                    tasks['tts'][taskCode] = result
            return result
        async def doTTSfunc_stream(params, fileName, output_path, isbase64, isverbose, stream_type='stream'):
            global TTSModel, tasks, cacheBasePath
            if stream_type == 'stream':
                output_path = f"{output_path}.mp3"
            else:
                output_path = f"{output_path}.m3u8"
            result = dict(
                code = 400,
                status = 'fail',
                data = None
            )
            options = dict(
                **params,
                output_path=output_path,
                sessionid=toMd5(f'{output_path}/{fileName}'),
                verbose=isverbose
            )
            if 'multiple_speaker' in options:
                del options['multiple_speaker']
            async for chunk in TTSModel.infer_stream(**options):
                if isinstance(chunk, str):
                    chunk_ = chunk
                    chunk = chunk.replace(cacheBasePath, '')
                    if stream_type=="stream":
                        if isbase64 == True:
                            if chunk not in tasks:
                                base64_encoded = None
                                with open(chunk_, 'rb') as file:
                                    # 读取音频数据
                                    file_content = file.read()
                                    # 将字节编码为 Base64 字符串
                                    base64_encoded = base64.b64encode(file_content).decode('utf-8')
                                    if chunk_.endswith('.mp3'):
                                        base64_encoded = f"data:audio/mpeg;base64,{base64_encoded}"
                                    elif chunk_.endswith('.wav'):
                                        base64_encoded = f"data:audio/wav;base64,{base64_encoded}"
                                    else:
                                        base64_encoded = f"data:application/octet-stream;base64,{base64_encoded}"
                                    # base64_encoded = file_content
                                tasks[chunk] = base64_encoded
                            chunk = f'data: {tasks[chunk]}\n\n'
                    else:
                        chunk = f'{output_path}\n\n{chunk_}'
                yield chunk

        def getKeyValue(data, key, splitStr = '\n'):
            datas = []
            if isinstance(data, str):
                datas = data.split(splitStr)
            elif isinstance(data, list):
                datas = data
            for item in datas:
                if isinstance(item, str):
                    if item.find(key)>-1:
                        return item.replace(key, '')
            return None
        async def doTTSfunc_stream_make_m3u8(*args, **kwargs):
            index = -1
            max_duration = 5
            playlistStr = ["#EXTM3U", "#EXT-X-PLAYLIST-TYPE:EVENT", "#EXT-X-TARGETDURATION:{max_duration}", "#EXT-X-VERSION:4", "#EXT-X-MEDIA-SEQUENCE:0"]
            playlist = playlistStr
            async for chunk in doTTSfunc_stream(*args, **kwargs):
                start_time = time.perf_counter()
                chunks = chunk.split('\n\n')
                m3u8 = chunks[0]
                aac = chunks[1]
                aacUri = os.path.basename(aac) # aac.replace(cacheBasePath, '')
                index += 1
                if os.path.exists(aac):
                    audio = AAC(aac)
                    max_duration = max(float(max_duration), audio.info.length + 1)
                    max_duration = float(f'{max_duration:.3f}')
                    playlist.append(f'#EXTINF:{audio.info.length:.3f},')
                    playlist.append(aacUri)
                    playlistStr = "\n".join(playlist)
                    playlistStr = playlistStr.replace('{max_duration}', str(max_duration))
                    with open(m3u8, 'w', encoding='utf-8') as f:
                        f.write(playlistStr)
            if index > 0:
                playlist.append('#EXT-X-ENDLIST')
            playlistStr = "\n".join(playlist)
            playlistStr = playlistStr.replace('{max_duration}', str(max_duration))
            with open(m3u8, 'w', encoding='utf-8') as f:
                f.write(playlistStr)
            # if playlist.find('#EXT-X-ENDLIST') == -1:
            #     playlist = f'{}'
            # print(111)

        def run_async_in_thread(async_func, *args, **kwargs):
            loop = asyncio.new_event_loop()
            asyncio.set_event_loop(loop)
            try:
                loop.run_until_complete(async_func(*args, **kwargs))
            finally:
                loop.close()
        
        @app.websocket("/ws/tts")
        async def wsTts(websocket: WebSocket):
            global tasks, TTSModel, cache_dir, cache_dir2, cache_file, cacheBasePath, verbose
            # 获取原始查询参数对象
            # 获取所有参数的字典形式
            requests = dict(websocket.query_params)
            code = 200
            result=dict()
            timeStamp=time.time()
            serviceType = 'TTS_WS'
            asr_de_noise = False
            asr_use_emotion = False
            if 'taskId' not in requests:
                taskId = getTaskId()
            else:
                taskId = requests['taskId']
            tasks['tts'][taskId] = {}
            tasks['tts'][taskId]['hotword'] = ''

            multiple_speaker = False
            infer_mode = 0
            timer = await accept(websocket=websocket)
            try:
            # if 1==1:
                while True:
                    if websocket.client_state == WebSocketState.DISCONNECTED:
                        break
                    data = await websocket.receive()
                    Bytes = None
                    if 'text' in data:
                        text = data['text']
                        timer.cancel()
                        if (text.startswith('{') and text.endswith('}')) or (text.startswith('[') and text.endswith(']')):
                            try:
                                promptData = json.loads(text)
                            except:
                                ...
                            formatFail = False
                            if isinstance(promptData, dict) == False:
                                await send_message(dict(
                                    code = 400,
                                    data = {
                                        "status": "fail",
                                        "msg": "提示词格式不对！"
                                    },
                                    useTime = float(f'{time.time()-timeStamp:.3f}'),
                                    taskId = taskId
                                ), websocket)
                            else:
                                if 'text' not in promptData:
                                    await send_message(dict(
                                        code = 400,
                                        data = {
                                            "status": "fail",
                                            "msg": "请输入需要合成的文本"
                                        },
                                        useTime = float(f'{time.time()-timeStamp:.3f}'),
                                        taskId = taskId
                                    ), websocket)
                                else:
                                    promptData['sentences_bucket_max_size']=promptData['max_sentence_bucket']
                                    del promptData['max_sentence_bucket']
                                    promptData['max_text_tokens_per_sentence']=promptData['max_sentence_tokens']
                                    del promptData['max_sentence_tokens']
                                    promptData['stream_type'] = 'stream'
                                    promptData['isbase64'] = True if promptData['base64'] == '1' or promptData['base64'] == 1 else False
                                    promptData['verbose'] = True if verbose == 1 else False
                                    for key, value in ttsDefaultValue.items():
                                        if key not in promptData:
                                            promptData[key]=value
                                        if key=='seed':
                                            promptData[key] = getMaxMin(promptData[key], ttsDefaultValue[key], -1, np.iinfo(np.int32).max, int)
                                        elif key=='top_k':
                                            promptData[key] = getMaxMin(promptData[key], ttsDefaultValue[key], 0, 100, int)
                                        elif key=='top_k':
                                            promptData[key] = getMaxMin(promptData[key], ttsDefaultValue[key], 0, 1.0, float)
                                        elif key=='temperature':
                                            promptData[key] = getMaxMin(promptData[key], ttsDefaultValue[key], 0.1, 2.0, float)
                                        elif key=='repetition_penalty':
                                            promptData[key] = getMaxMin(promptData[key], ttsDefaultValue[key], 0.1, 20.0, float)
                                        elif key=='sentences_bucket_max_size':
                                            promptData[key] = getMaxMin(promptData[key], ttsDefaultValue[key], 1, 16, int)
                                        elif key=='max_text_tokens_per_sentence':
                                            promptData[key] = getMaxMin(promptData[key], ttsDefaultValue[key], 20, 200, int)
                                text = promptData['text']
                                isbase64 = promptData['isbase64']
                                isverbose = promptData['verbose']
                                top_k = promptData['top_k']
                                top_p = promptData['top_p']
                                temperature = promptData['temperature']
                                repetition_penalty = promptData['repetition_penalty']
                                max_tokens = promptData['max_tokens']
                                sentences_bucket_max_size = promptData['sentences_bucket_max_size']
                                max_text_tokens_per_sentence = promptData['max_text_tokens_per_sentence']
                                infer_mode = promptData['infer_mode']
                                audio_prompt = promptData['audio_prompt']
                                stream_type = promptData['streamType']
                                seed = promptData['seed']
                                if infer_mode == 1:
                                    infer_mode = '并行推理'
                                elif infer_mode == 2:
                                    infer_mode = '流式推理'
                                    stream_type = 'stream'
                                else:
                                    infer_mode = '普通推理'
                                if isinstance(text, list):
                                    audio_prompt = getTTSParamsData(text)
                                    multiple_speaker = len(audio_prompt) > 0
                                    text = ""
                                    if infer_mode=='流式推理':
                                        await send_message(dict(
                                            code = 500,
                                            data = dict(
                                                status = 'fail',
                                                msg = '多人合成模式下不支持流式推理！推薦使用並行推理'
                                            ),
                                            useTime = float(f'{time.time()-timeStamp:.3f}'),
                                            taskId = taskId
                                        ), websocket)
                                else:
                                    if audio_prompt.find('.') == -1:
                                        audio_prompt = f'{audio_prompt}.pt'
                                    if audio_prompt.find('/') == -1:
                                        audio_prompt = os.path.join("assets/speaker", audio_prompt)
                                    if os.path.exists(audio_prompt) == False:
                                        await send_message(dict(
                                            code = 500,
                                            data = dict(
                                                status = 'fail',
                                                msg = '請上傳參考音頻'
                                            ),
                                            useTime = float(f'{time.time()-timeStamp:.3f}'),
                                            taskId = taskId
                                        ), websocket)
                                    audio_prompt = [audio_prompt]
                                sampling_params = dict(
                                    top_p = top_p,
                                    top_k = top_k,
                                    temperature = temperature,
                                    repetition_penalty = repetition_penalty,
                                    max_tokens = max_tokens,
                                )
                                params = dict(
                                    multiple_speaker=multiple_speaker,
                                    audio_prompt = audio_prompt,
                                    text = text,
                                    seed = seed,
                                    max_text_tokens_per_sentence = max_text_tokens_per_sentence,
                                    sentences_bucket_max_size = sentences_bucket_max_size,
                                    sampling_params=sampling_params,
                                )
                                params['infer_mode'] = infer_mode
                                fileName = toMd5(json.dumps(params, ensure_ascii=False))
                                output_path = f"{cacheBasePath}/cache/tts/{datetime.now().strftime('%Y-%m-%d')}/{fileName}"
                                taskid = toMd5(output_path)
                                if taskid not in tasks['tts_ws']:
                                    if stream_type == '':
                                        result = await doTTSfunc(params, isbase64, isverbose)
                                        await send_message(dict(
                                            code = result['code'],
                                            data = result['data'],
                                            useTime = float(f'{time.time()-timeStamp:.3f}'),
                                            taskId = taskId
                                        ), websocket)
                                    else:
                                        clearCache(f"{cacheBasePath}/cache", "tts")
                                        async for chunk in doTTSfunc_stream(params, fileName, output_path, isbase64, isverbose, stream_type):
                                            if isinstance(chunk, str):
                                                chunk = chunk.strip()
                                                if chunk.startswith('data:'):
                                                    chunk = chunk[5:].strip()
                                            await send_message(dict(
                                                code = 200,
                                                data = chunk,
                                                useTime = float(f'{time.time()-timeStamp:.3f}'),
                                                taskId = taskId
                                            ), websocket)
                                            timeStamp = time.time()
                    time.sleep(0.1)
            except WebSocketDisconnect:
                pass
            except Exception as ex:
                ex = str(ex)
                if ex.find('a disconnect') > -1:
                    pass
                elif ex.find('Need to call "accept" first') == -1:
                    catchError('wsIndentify')

        @app.api_route('/api/tts', methods=['POST'])
        async def tts_api(request: Request, background_tasks: BackgroundTasks):
            global tasks, TTSModel, cache_dir, cache_dir2, cache_file, cacheBasePath, verbose, ttsDefaultValue
            clearCache(f"{cacheBasePath}/cache", "tts")
            requests = await getRequestItems(request, uploadPath='/cache/tts', upload2Base64=False)
            code = 200
            result=dict()
            timeStamp=time.time()
            serviceType = 'TTS_API'
            if 'taskId' not in requests:
                taskId = getTaskId()
            else:
                taskId = requests['taskId']
            multiple_speaker = False
            text = requestGet(requests, 'text', '')

            infer_mode = int(requestGet(requests, 'infer_mode', ttsDefaultValue['infer_mode']))
            audio_prompt = requestGet(requests, 'prompts', ttsDefaultValue['audio_prompt']).strip()
            seed = int(requestGet(requests, 'seed', ttsDefaultValue['seed']))
            top_k = int(requestGet(requests, 'top_k', ttsDefaultValue['top_k']))
            top_p = float(requestGet(requests, 'top_p', ttsDefaultValue['top_p']))
            temperature = float(requestGet(requests, 'temperature', ttsDefaultValue['temperature']))
            repetition_penalty = int(requestGet(requests, 'repetition_penalty', ttsDefaultValue['repetition_penalty']))
            max_tokens = int(requestGet(requests, 'max_tokens', ttsDefaultValue['max_tokens']))
            sentences_bucket_max_size = int(requestGet(requests, 'max_sentence_bucket', ttsDefaultValue['sentences_bucket_max_size']))
            max_text_tokens_per_sentence = int(requestGet(requests, 'max_sentence_tokens', ttsDefaultValue['max_text_tokens_per_sentence']))

            seed = getMaxMin(seed, ttsDefaultValue["seed"], -1, np.iinfo(np.int32).max, int)
            top_k = getMaxMin(top_k, ttsDefaultValue["top_k"], 0, 100, int)
            top_p = getMaxMin(top_p, ttsDefaultValue["top_p"], 0, 1.0, float)
            temperature = getMaxMin(temperature, ttsDefaultValue["temperature"], 0.1, 2.0, float)
            repetition_penalty = getMaxMin(repetition_penalty, ttsDefaultValue["repetition_penalty"], 0.1, 20.0, float)
            max_tokens = getMaxMin(max_tokens, ttsDefaultValue["max_tokens"], 50, 900, int)
            sentences_bucket_max_size = getMaxMin(sentences_bucket_max_size, ttsDefaultValue["sentences_bucket_max_size"], 1, 16, int)
            max_text_tokens_per_sentence = getMaxMin(max_text_tokens_per_sentence, ttsDefaultValue["max_text_tokens_per_sentence"], 20, 200, int)

            isbase64 = int(requestGet(requests, 'base64', ttsDefaultValue['base64']))
            isverbose = int(requestGet(requests, 'verbose', verbose))
            stream_type= requestGet(requests, 'stream_type', ttsDefaultValue['streamType']) # hls,stream
            if isbase64 == 1 or isbase64 == '1':
                isbase64 = True
            else:
                isbase64 = False
            if isverbose == 1 or isverbose == '1':
                isverbose = True
            else:
                isverbose = False
            if text == '':
                result = dict(
                    code = 500,
                    status = 'fail',
                    msg = '請上傳需要合成的文本信息'
                )
                return response(code=result['code'], result=result, useTime=float(f'{time.time()-timeStamp:.3f}'),)
            else:
                if (text.startswith('{') and text.endswith('}')) or (text.startswith('[') and text.endswith(']')):
                    try:
                        text = json.loads(text)
                    except:
                        result = dict(
                            code = 500,
                            status = 'fail',
                            msg = '多人合成模式的数据不是正确的JSON格式！请确认'
                        )
                        return response(code=result['code'], result=result, useTime=float(f'{time.time()-timeStamp:.3f}'),)

            if stream_type == '':
                if infer_mode == 2:
                    infer_mode = 1
            else:
                if infer_mode != 2:
                    infer_mode = 2
            if infer_mode == 1:
                infer_mode = '并行推理'
            elif infer_mode == 2:
                infer_mode = '流式推理'
            else:
                infer_mode = '普通推理'
            if isinstance(text, list):
                audio_prompt = getTTSParamsData(text)
                multiple_speaker = len(audio_prompt) > 0
                text = ""
                if infer_mode=='流式推理':
                    result = dict(
                        code = 500,
                        status = 'fail',
                        msg = '多人合成模式下不支持流式推理！推薦使用並行推理'
                    )
                    return response(code=result['code'], result=result, useTime=float(f'{time.time()-timeStamp:.3f}'),)
            else:
                if audio_prompt.find('.') == -1:
                    audio_prompt = f'{audio_prompt}.pt'
                if audio_prompt.find('/') == -1:
                    audio_prompt = os.path.join("assets/speaker", audio_prompt)
                if os.path.exists(audio_prompt) == False:
                    result = dict(
                        code = 500,
                        status = 'fail',
                        msg = '請上傳參考音頻'
                    )
                    return response(code=result['code'], result=result, useTime=float(f'{time.time()-timeStamp:.3f}'),)
                audio_prompt = [audio_prompt]
            sampling_params = dict(
                top_p = top_p,
                top_k = top_k,
                temperature = temperature,
                repetition_penalty = repetition_penalty,
                max_tokens = max_tokens,
            )
            params = dict(
                multiple_speaker=multiple_speaker,
                audio_prompt = audio_prompt,
                text = text,
                seed = seed,
                max_text_tokens_per_sentence = max_text_tokens_per_sentence,
                sentences_bucket_max_size = sentences_bucket_max_size,
                sampling_params=sampling_params,
                infer_mode=infer_mode
            )
            if stream_type == '':
                result = await doTTSfunc(params, isbase64, isverbose)
                return response(code=result['code'], result=result, useTime=float(f'{time.time()-timeStamp:.3f}'),)
            else:
                fileName = toMd5(json.dumps(params, ensure_ascii=False))
                clearCache(f"{cacheBasePath}/cache", "tts")
                output_path = f"{cacheBasePath}/cache/tts/{datetime.now().strftime('%Y-%m-%d')}/{fileName}"
                if stream_type=='stream':
                    return StreamingResponse(
                        doTTSfunc_stream(params, fileName, output_path, isbase64, isverbose, stream_type),
                        media_type="text/event-stream"  # 设置媒体类型为纯文本
                    )
                else:
                    background_tasks.add_task(doTTSfunc_stream_make_m3u8, params, fileName, output_path, isbase64, isverbose, stream_type)
                    result = dict(
                        code=200,
                        data=f'{output_path.replace(cacheBasePath, "")}.m3u8'
                    )
                    timeout = 5
                    timeStamp2=time.time()
                    while os.path.exists(f'{output_path}_0.aac')==False and time.time()-timeStamp2 < timeout:
                        time.sleep(0.5)
                    return response(code=result['code'], result=result['data'], useTime=float(f'{time.time()-timeStamp:.3f}'),)

        @app.api_route('/api/tts/speaker', methods=['POST', 'GET'])
        async def tts_api(request: Request):
            global tasks, TTSModel, cache_dir, cache_dir2, cache_file, cacheBasePath
            clearCache(f"{cacheBasePath}/cache", "tts")
            requests = await getRequestItems(request, uploadPath='/cache/tts', upload2Base64=False)
            code = 200
            result=dict()
            timeStamp=time.time()
            serviceType = 'TTS_SPEAKER_API'
            speakers = []
            data = getSpeaker()
            for d in data:
                d = d.split('/')
                d = d[len(d) - 1]
                d = d.replace('.pt', '')
                speakers.append(d)
            result=dict(
                code=200,
                status="success",
                data=speakers
            )
            return response(code=result['code'], result=result, useTime=float(f'{time.time()-timeStamp:.3f}'),)

        @app.api_route('/api/cuda/gc', methods=['POST', 'GET'])
        async def guda_gc(request: Request):
            timeStamp=time.time()
            gc()
            result = dict(
                code = 200,
                status = 'success'
            )
            return response(code=result['code'], result=result, useTime=float(f'{time.time()-timeStamp:.3f}'),)

        @app.api_route('/api/tasks/clear', methods=['POST', 'GET'])
        async def task_clear(request: Request):
            global tasks, TTSModel, cache_dir, cache_dir2, cache_file, cacheBasePath
            requests = await getRequestItems(request)
            timeStamp=time.time()
            if requests['type'] == 'asr' or requests['type'] == 'tts':
                tasks[requests['type']]={}
            result = dict(
                code = 200,
                status = 'success'
            )
            return response(code=result['code'], result=result, useTime=float(f'{time.time()-timeStamp:.3f}'),)

        @app.api_route('/api/cache/clear', methods=['POST', 'GET'])
        async def task_clear(request: Request):
            global tasks, TTSModel, cache_dir, cache_dir2, cache_file, cacheBasePath
            requests = await getRequestItems(request)
            timeStamp=time.time()
            if requests['type'] == 'asr' or requests['type'] == 'tts':
                path = f'{cacheBasePath}/cache/{requests["type"]}'
                if os.path.exists(path):
                    shutil.rmtree(path)
            result = dict(
                code = 200,
                status = 'success'
            )
            return response(code=result['code'], result=result, useTime=float(f'{time.time()-timeStamp:.3f}'),)

    # 静态资源
    app.mount("/cache", StaticFiles(directory=os.path.join(cacheBasePath, 'cache')), name="cache")
    # app.mount("/output", StaticFiles(directory="output"), name="output")

    if grContainer is not None:
        app = gr.mount_gradio_app(app, grContainer, path="/")
    clear__pycache__()
    return app