import datetime
import glob
import os
import subprocess
import sys
import time
import traceback
from pathlib import Path

import torch
import soundfile
import librosa
import numpy as np
from fastapi import APIRouter
from starlette.responses import FileResponse

from models_code.so_vits_svc.inference.infer_tool import Svc
from logger import logger
from model import SoundSynthesisRequest, errorRes

gr = logger
debug = False
router = APIRouter()

# 本地模型跟目录
local_model_root = './trained'

cuda = {}
if torch.cuda.is_available():
    for i in range(torch.cuda.device_count()):
        device_name = torch.cuda.get_device_properties(i).name
        cuda[f"CUDA:{i} {device_name}"] = f"cuda:{i}"


# 模型加载
def modelAnalysis(model_path,  # 模型文件
                  config_path,  # 模型配置文件
                  cluster_model_path,  # 聚类模型或特征检索文件
                  device,  # 推理设备，默认为自动选择CPU和GPU
                  enhance,  # 使增强器适应更高的音域(单位为半音数)|默认为0
                  diff_model_path,  # 扩散模型文件
                  diff_config_path,  # 扩散模型配置文件
                  only_diffusion,  # 是否使用全扩散推理，开启后将不使用So-VITS模型，仅使用扩散模型进行完整扩散推理，默认关闭 False
                  use_spk_mix,  # 动态声线融合 False
                  local_model_enabled,  # 是否选择本地模型
                  local_model_selection):  # 本地模型选择
    global model
    global sid
    sid = {}
    try:
        device = cuda[device] if "CUDA" in device else device
        cluster_filepath = os.path.split(cluster_model_path) if cluster_model_path is not None else "no_cluster"
        # get model and config path
        if local_model_enabled:
            # local path
            model_path = glob.glob(os.path.join(local_model_selection, '*.pth'))[0]
            config_path = glob.glob(os.path.join(local_model_selection, '*.json'))[0]
        else:
            # upload from webpage
            model_path = model_path
            config_path = config_path
        fr = ".pkl" in cluster_filepath[1]
        model = Svc(model_path,
                    config_path,
                    device=device if device != "Auto" else None,
                    cluster_model_path=cluster_model_path if cluster_model_path is not None else "",
                    nsf_hifigan_enhance=enhance,
                    diffusion_model_path=diff_model_path if diff_model_path is not None else "",
                    diffusion_config_path=diff_config_path if diff_config_path is not None else "",
                    shallow_diffusion=True if diff_model_path is not None else False,
                    only_diffusion=only_diffusion,
                    spk_mix_enable=use_spk_mix,
                    feature_retrieval=fr
                    )
        spks = list(model.spk2id.keys())
        device_name = torch.cuda.get_device_properties(model.dev).name if "cuda" in str(model.dev) else str(model.dev)
        msg = f"成功加载模型到设备{device_name}上\n"
        if cluster_model_path is None:
            msg += "未加载聚类模型或特征检索模型\n"
        elif fr:
            msg += f"特征检索模型{cluster_filepath[1]}加载成功\n"
        else:
            msg += f"聚类模型{cluster_filepath[1]}加载成功\n"
        if diff_model_path is None:
            msg += "未加载扩散模型\n"
        else:
            msg += f"扩散模型{diff_model_path}加载成功\n"
        msg += "当前模型的可用音色：\n"
        for i in spks:
            msg += i + " "
        gr.info(msg)
        return sid.update(choices=spks, value=spks[0]), msg
    except Exception as e:
        if debug:
            traceback.print_exc()
        raise gr.error(e)


# 模型卸载
def modelUnload():
    global model
    global sid
    if model is None:
        return sid.update(choices=[], value=""), "没有模型需要卸载!"
    else:
        model.unload_model()
        model = None
        torch.cuda.empty_cache()
        return sid.update(choices=[], value=""), "模型卸载完毕!"


# 进行推理得到音频文件
def vc_infer(output_format, sid, audio_path, truncated_basename, vc_transform, auto_f0, cluster_ratio, slice_db,
             noise_scale, pad_seconds, cl_num, lg_num, lgr_num, f0_predictor, enhancer_adaptive_key, cr_threshold,
             k_step, use_spk_mix, second_encoding, loudness_envelope_adjustment):
    global model
    _audio = model.slice_inference(
        audio_path,
        sid,
        vc_transform,
        slice_db,
        cluster_ratio,
        auto_f0,
        noise_scale,
        pad_seconds,
        cl_num,
        lg_num,
        lgr_num,
        f0_predictor,
        enhancer_adaptive_key,
        cr_threshold,
        k_step,
        use_spk_mix,
        second_encoding,
        loudness_envelope_adjustment
    )
    model.clear_empty()
    # 构建保存文件的路径，并保存到results文件夹内
    str(int(time.time()))
    if not os.path.exists("results"):
        os.makedirs("results")
    key = "auto" if auto_f0 else f"{int(vc_transform)}key"
    cluster = "_" if cluster_ratio == 0 else f"_{cluster_ratio}_"
    isdiffusion = "sovits"
    if model.shallow_diffusion:
        isdiffusion = "sovdiff"

    if model.only_diffusion:
        isdiffusion = "diff"

    output_file_name = 'result_' + truncated_basename + f'_{sid}_{key}{cluster}{isdiffusion}.{output_format}'
    output_file = os.path.join("results", output_file_name)
    soundfile.write(output_file, _audio, model.target_sample, format=output_format)
    return output_file


# 音频转音频
def vc_fn(sid,
          input_audio,
          output_format,
          vc_transform,
          auto_f0,
          cluster_ratio,
          slice_db,
          noise_scale,
          pad_seconds,
          cl_num,
          lg_num,
          lgr_num,
          f0_predictor,
          enhancer_adaptive_key,
          cr_threshold,
          k_step,
          use_spk_mix,
          second_encoding,
          loudness_envelope_adjustment):
    global model
    try:
        if input_audio is None:
            return "You need to upload an audio", None
        if model is None:
            return "You need to upload an model", None
        if getattr(model, 'cluster_model', None) is None and model.feature_retrieval is False:
            if cluster_ratio != 0:
                return "You need to upload an cluster model or feature retrieval model before assigning cluster ratio!", None
        # print(input_audio)
        audio, sampling_rate = soundfile.read(input_audio)
        # print(audio.shape,sampling_rate)
        if np.issubdtype(audio.dtype, np.integer):
            audio = (audio / np.iinfo(audio.dtype).max).astype(np.float32)
        # print(audio.dtype)
        if len(audio.shape) > 1:
            audio = librosa.to_mono(audio.transpose(1, 0))
        # 未知原因Gradio上传的filepath会有一个奇怪的固定后缀，这里去掉
        truncated_basename = Path(input_audio).stem[:-6]
        processed_audio = os.path.join("raw", f"{truncated_basename}.wav")
        soundfile.write(processed_audio, audio, sampling_rate, format="wav")
        output_file = vc_infer(output_format, sid, processed_audio, truncated_basename, vc_transform, auto_f0,
                               cluster_ratio, slice_db, noise_scale, pad_seconds, cl_num, lg_num, lgr_num, f0_predictor,
                               enhancer_adaptive_key, cr_threshold, k_step, use_spk_mix, second_encoding,
                               loudness_envelope_adjustment)

        return "Success", output_file
    except Exception as e:
        if debug:
            traceback.print_exc()
        raise gr.error(e)


# 文字转音频
def vc_fn2(_text,  # 在此输入要转译的文字。注意，使用该功能建议打开F0预测，不然会很怪
           _lang,  # 选择语言，Auto为根据输入文字自动识别 choices=SUPPORTED_LANGUAGES, value = "Auto"
           _gender,  # 说话人性别 choices = ["男","女"], value = "男"
           _rate,  # TTS语音变速（倍速相对值）minimum = -1, maximum = 3, value = 0, step = 0.1
           _volume,  # TTS语音音量（相对值）minimum = -1, maximum = 1.5, value = 0, step = 0.1
           sid,  # 音色（说话人）
           output_format,  # 音频输出格式 choices=["wav", "flac", "mp3"], value = "wav"
           vc_transform,  # 变调（整数，可以正负，半音数量，升高八度就是12）value=0
           auto_f0,  # 自动f0预测，配合聚类模型f0预测效果更好,会导致变调功能失效（仅限转换语音，歌声勾选此项会究极跑调 value=False
           cluster_ratio,  # 聚类模型/特征检索混合比例，0-1之间，0即不启用聚类/特征检索。使用聚类/特征检索能提升音色相似度，但会导致咬字下降（如果使用建议0.5左右） value=0
           slice_db,  # 切片阈值 value=-40
           noise_scale,  # noise_scale 建议不要动，会影响音质，玄学参数 value=0.4
           pad_seconds,  # 推理音频pad秒数，由于未知原因开头结尾会有异响，pad一小段静音段后就不会出现 value=0.5
           cl_num,  # 音频自动切片，0为不切片，单位为秒(s)" value=0
           lg_num,  # 两端音频切片的交叉淡入长度，如果自动切片后出现人声不连贯可调整该数值，如果连贯建议采用默认值0，注意，该设置会影响推理速度，单位为秒/s value=0
           lgr_num,  # 自动音频切片后，需要舍弃每段切片的头尾。该参数设置交叉长度保留的比例，范围0-1,左开右闭 value=0.75
           f0_predictor,  # 选择F0预测器,可选择crepe,pm,dio,harvest,rmvpe,默认为pm(注意：crepe为原F0使用均值滤波器) choices=["pm","dio","harvest","crepe","rmvpe"], value="pm"
           enhancer_adaptive_key,  # 使增强器适应更高的音域(单位为半音数)|默认为0 value=0
           cr_threshold,  # F0过滤阈值，只有启动crepe时有效. 数值范围从0-1. 降低该值可减少跑调概率，但会增加哑音 value=0.05
           k_step,  # 浅扩散步数，只有使用了扩散模型才有效，步数越大越接近扩散模型的结果 value=100, minimum = 1, maximum = 1000
           use_spk_mix,  # 动态声线融合 value=False
           second_encoding,  # 二次编码，浅扩散前会对原始音频进行二次编码，玄学选项，效果时好时差，默认关闭 value=False
           loudness_envelope_adjustment  # 输入源响度包络替换输出响度包络融合比例，越靠近1越使用输出响度包络 value=0.5
           , gr):
    global model
    try:
        if model is None:
            return "You need to upload an model", None
        if getattr(model, 'cluster_model', None) is None and model.feature_retrieval is False:
            if cluster_ratio != 0:
                return "You need to upload an cluster model or feature retrieval model before assigning cluster ratio!", None
        _rate = f"+{int(_rate * 100)}%" if _rate >= 0 else f"{int(_rate * 100)}%"
        _volume = f"+{int(_volume * 100)}%" if _volume >= 0 else f"{int(_volume * 100)}%"
        if _lang == "Auto":
            _gender = "Male" if _gender == "男" else "Female"
            subprocess.run(
                [sys.executable, "C:\\Users\Administrator\\PycharmProjects\\ailpha-ai\\models_code\\so_vits_svc\\edgetts\\tts.py", _text, _lang, _rate, _volume, "tts.wav", _gender])
        else:
            subprocess.run([sys.executable, "C:\\Users\Administrator\\PycharmProjects\\ailpha-ai\\models_code\\so_vits_svc\\edgetts\\edgetts\\tts.py", _text, _lang, _rate, _volume, "tts.wav"])
        target_sr = 44100
        y, sr = librosa.load("tts.wav")
        resampled_y = librosa.resample(y, orig_sr=sr, target_sr=target_sr)
        soundfile.write("tts.wav", resampled_y, target_sr, subtype="PCM_16")
        input_audio = "tts.wav"
        # audio, _ = soundfile.read(input_audio)
        output_file_path = vc_infer(output_format, sid, input_audio, "tts", vc_transform, auto_f0, cluster_ratio,
                                    slice_db, noise_scale, pad_seconds, cl_num, lg_num, lgr_num, f0_predictor,
                                    enhancer_adaptive_key, cr_threshold, k_step, use_spk_mix, second_encoding,
                                    loudness_envelope_adjustment)
        # os.remove("tts.wav")
        return "Success", output_file_path
    except Exception as e:
        if debug: traceback.print_exc()  # noqa: E701
        raise gr.error(e)


def ffm_get(_text, _lang, _gender, _rate, _volume):
    # 使用os.path.exists检查文件夹是否存在
    if not os.path.exists("out"):
        # 使用os.makedirs创建文件夹，包括所有不存在的父目录
        os.makedirs("out")
    voice_file_path = f"out/tts-{datetime.datetime.now().timestamp()}.wav"
    if _lang == "Auto":
        _gender = "Male" if _gender == "男" else "Female"
        subprocess.run(
            [sys.executable,
             "models_code/so_vits_svc/edgetts/tts.py", _text,
             _lang, _rate, _volume, voice_file_path, _gender])
    else:
        subprocess.run(
            [sys.executable,
             "models_code/so_vits_svc/edgetts/tts.py", _text,
             _lang, _rate, _volume, voice_file_path])
    return voice_file_path


@router.post("/text/toVoice",
             summary="文字转语音大模型",
             description="将文字识别为语音,模型选择为: ffm")
async def text_to_voice(soundSynthesisRequest: SoundSynthesisRequest):
    _rate = f"+{int(soundSynthesisRequest.rate * 100)}%" if soundSynthesisRequest.rate >= 0 else f"{int(soundSynthesisRequest.rate * 100)}%"
    _volume = f"+{int(soundSynthesisRequest.volume * 100)}%" if soundSynthesisRequest.volume >= 0 else f"{int(soundSynthesisRequest.volume * 100)}%"
    _gender = "Male" if soundSynthesisRequest.gender == "男" else "Female"
    voice_file_path = ffm_get(soundSynthesisRequest.text, "Auto", soundSynthesisRequest.gender, _rate, _volume)
    file = Path(voice_file_path)
    if not file.is_file():
        return errorRes("File not found")
    return FileResponse(voice_file_path,
                        media_type="application/octet-stream", filename=file.name)


# def test():
#     test_text = "大家好，我叫谢振瑜，很高兴认识大家。"
#     test_lang = "Auto"
#     test_gender = "男"
#     test_rate = 0
#     test_volume = 0
#     test_sid = "ferocious"
#     test_output_format = "wav"
#     test_vc_transform = 0
#     test_auto_f0 = True
#     test_cluster_ratio = 0
#     test_slice_db = -40
#     test_noise_scale = 0.4
#     test_pad_seconds = 0.4
#     test_cl_num = 0
#     test_lg_num = 0
#     test_lgr_num = 0.75
#     test_f0_predictor = "dio"
#     test_enhancer_adaptive_key = 0
#     test_cr_threshold = 0.05
#     test_k_step = 100
#     test_use_spk_mix = False
#     test_second_encoding = False
#     test_loudness_envelope_adjustment = 0.5
#     test_output_file_path = vc_fn2(test_text,  # 在此输入要转译的文字。注意，使用该功能建议打开F0预测，不然会很怪
#                                    test_lang,  # 选择语言，Auto为根据输入文字自动识别 choices=SUPPORTED_LANGUAGES, value = "Auto"
#                                    test_gender,  # 说话人性别 choices = ["男","女"], value = "男"
#                                    test_rate,  # TTS语音变速（倍速相对值）minimum = -1, maximum = 3, value = 0, step = 0.1
#                                    test_volume,  # TTS语音音量（相对值）minimum = -1, maximum = 1.5, value = 0, step = 0.1
#                                    test_sid,  # 音色（说话人）
#                                    test_output_format,  # 音频输出格式 choices=["wav", "flac", "mp3"], value = "wav"
#                                    test_vc_transform,  # 变调（整数，可以正负，半音数量，升高八度就是12）value=0
#                                    test_auto_f0,  # 自动f0预测，配合聚类模型f0预测效果更好,会导致变调功能失效（仅限转换语音，歌声勾选此项会究极跑调 value=False
#                                    test_cluster_ratio,  # 聚类模型/特征检索混合比例，0-1之间，0即不启用聚类/特征检索。使用聚类/特征检索能提升音色相似度，但会导致咬字下降（如果使用建议0.5左右） value=0
#                                    test_slice_db,  # 切片阈值 value=-40
#                                    test_noise_scale,  # noise_scale 建议不要动，会影响音质，玄学参数 value=0.4
#                                    test_pad_seconds,  # 推理音频pad秒数，由于未知原因开头结尾会有异响，pad一小段静音段后就不会出现 value=0.5
#                                    test_cl_num,  # 音频自动切片，0为不切片，单位为秒(s)" value=0
#                                    test_lg_num,  # 两端音频切片的交叉淡入长度，如果自动切片后出现人声不连贯可调整该数值，如果连贯建议采用默认值0，注意，该设置会影响推理速度，单位为秒/s value=0
#                                    test_lgr_num,  # 自动音频切片后，需要舍弃每段切片的头尾。该参数设置交叉长度保留的比例，范围0-1,左开右闭 value=0.75
#                                    test_f0_predictor,  # 选择F0预测器,可选择crepe,pm,dio,harvest,rmvpe,默认为pm(注意：crepe为原F0使用均值滤波器) choices=["pm","dio","harvest","crepe","rmvpe"], value="pm"
#                                    test_enhancer_adaptive_key,  # 使增强器适应更高的音域(单位为半音数)|默认为0 value=0
#                                    test_cr_threshold,  # F0过滤阈值，只有启动crepe时有效. 数值范围从0-1. 降低该值可减少跑调概率，但会增加哑音 value=0.05
#                                    test_k_step, # 浅扩散步数，只有使用了扩散模型才有效，步数越大越接近扩散模型的结果 value=100, minimum = 1, maximum = 1000
#                                    test_use_spk_mix,  # 动态声线融合 value=False
#                                    test_second_encoding,  # 二次编码，浅扩散前会对原始音频进行二次编码，玄学选项，效果时好时差，默认关闭 value=False
#                                    test_loudness_envelope_adjustment  # 输入源响度包络替换输出响度包络融合比例，越靠近1越使用输出响度包络 value=0.5
#                                    )
#     print(test_output_file_path)
#
# if __name__ == '__main__':
#     model_path_local = "C:\\Users\\Administrator\\PycharmProjects\\ailpha-ai\\bigmodel\\so-vits-svc\\bark.pth"
#     config_path_local = "C:\\Users\\Administrator\\PycharmProjects\\ailpha-ai\\bigmodel\\so-vits-svc\\bark.json"
#     cluster_model_path_local = "C:\\Users\\Administrator\\PycharmProjects\\ailpha-ai\\bigmodel\\so-vits-svc\\feature_and_index.pkl"
#     device_default = "cpu"
#     enhance_default = 0
#     diff_model_path_local = "C:\\Users\\Administrator\\PycharmProjects\\ailpha-ai\\bigmodel\\so-vits-svc\\diffusion.pt"
#     diff_config_path_local = "C:\\Users\\Administrator\\PycharmProjects\\ailpha-ai\\bigmodel\\so-vits-svc\\diffusion.yaml"
#     only_diffusion_default = False
#     use_spk_mix_default = False
#     # 加载模型
#     modelAnalysis(model_path_local,
#                   config_path_local,
#                   cluster_model_path_local,
#                   device_default,
#                   enhance_default,
#                   diff_model_path_local,
#                   diff_config_path_local,
#                   only_diffusion_default,
#                   use_spk_mix_default,
#                   None,
#                   None)
#     test()




