import sys, os
import threading

#macos系统的加数文件
if sys.platform == "darwin":
    os.environ["PYTORCH_ENABLE_MPS_FALLBACK"] = "1"

import ai_xinghuo
import torch
import argparse
# from text.symbols import symbols
import commons
import utils
from models import SynthesizerTrn
from text import cleaned_text_to_sequence, get_bert, symbols
from text.cleaner import clean_text
#保存音频
from scipy.io.wavfile import write
#播放音频
# import pygame
#统计时间
# import time

#将音频转换字节流传回前端
# from flask import Response
# import io

# #flask相关的库
# from flask import Flask
# from flask import request
# from flask_cors import CORS
# app = Flask(__name__)
# CORS(app, resources=r'/*')
#一些全局初始参数
net_g = None
speakers = "ayaka"
sdp_ratio = 0.2
noise_scale = 0.6
noise_scale_w = 0.8
length_scale = 1.0



def get_text(text, language_str, hps):
    # ./text/cleaner.py文件内的两个方法
    norm_text, phone, tone, word2ph = clean_text(text, language_str)
    phone, tone, language = cleaned_text_to_sequence(phone, tone, language_str)

    if hps.data.add_blank:
        phone = commons.intersperse(phone, 0)
        tone = commons.intersperse(tone, 0)
        language = commons.intersperse(language, 0)
        for i in range(len(word2ph)):
            word2ph[i] = word2ph[i] * 2
        word2ph[0] += 1
    bert = get_bert(norm_text, word2ph, language_str)
    del word2ph

    assert bert.shape[-1] == len(phone)

    phone = torch.LongTensor(phone)
    tone = torch.LongTensor(tone)
    language = torch.LongTensor(language)

    return bert, phone, tone, language

def infer(text, sdp_ratio, noise_scale, noise_scale_w, length_scale, sid):
    global net_g
    '''
         ||   对文本进行处理
         ||
         \/  
    '''
    bert, phones, tones, lang_ids = get_text(text, "ZH", hps)

    with torch.no_grad():
        '''
            这行代码是在将一个张量（tensor）转移到指定的设备（如GPU或CPU）上，并在第0维（即最外层维度）上增加一个新的维度。
            这里的 phones 是一个张量，to(device) 是将张量转移到指定的设备上，
            unsqueeze(0) 是在第0维上增加一个新的维度。
        '''
        x_tst=phones.to(device).unsqueeze(0)
        tones=tones.to(device).unsqueeze(0)
        lang_ids=lang_ids.to(device).unsqueeze(0)
        bert = bert.to(device).unsqueeze(0)
        x_tst_lengths = torch.LongTensor([phones.size(0)]).to(device)
        del phones
        speakers = torch.LongTensor([hps.data.spk2id[sid]]).to(device)
        audio = net_g.infer(x_tst, x_tst_lengths, speakers, tones, lang_ids, bert, sdp_ratio=sdp_ratio
                           , noise_scale=noise_scale, noise_scale_w=noise_scale_w,
                            length_scale=length_scale)[0][0,0].data.cpu().float().numpy()
        del x_tst, tones, lang_ids, bert, x_tst_lengths, speakers
        return audio

def tts_fn(text, speaker, sdp_ratio, noise_scale, noise_scale_w, length_scale):
    #无梯度
    with torch.no_grad():
        #
        audio = infer(text, sdp_ratio=sdp_ratio, noise_scale=noise_scale,
                      noise_scale_w=noise_scale_w, length_scale=length_scale, sid=speaker)
    return "Success", (hps.data.sampling_rate, audio)

'''********************************断层，上面是模型部分，下面是交互部分*******************************************************'''


# @app.route("/text")
# def txt_to_audio():
#     text = request.args["text"]
#     print(text)
#
#     text_output, audio_output = tts_fn(text, speakers, sdp_ratio, noise_scale, noise_scale_w, length_scale)
#     print(audio_output)
#
#
#     buffer = io.BytesIO()
#     write(buffer, audio_output[0], audio_output[1])
#     buffer.seek(0)
#
#     return Response(buffer.read(), mimetype='audio/wav')

hps = utils.get_hparams_from_file('./configs/config.json')
#确认在那个地方计算（GPU，CPU）
device = (
    "cuda:0"
    if torch.cuda.is_available()
    else (
        "mps"
        if sys.platform == "darwin" and torch.backends.mps.is_available()
        else "cpu"
    )
)

'''
    net_g = SynthesizerTrn(...)：创建一个SynthesizerTrn类的实例，
    传入一些参数，如符号表长度、滤波器长度、分段大小、说话人数量等。
'''
net_g = SynthesizerTrn(
    len(symbols),
    hps.data.filter_length // 2 + 1,
    hps.train.segment_size // hps.data.hop_length,
    n_speakers=hps.data.n_speakers,
    **hps.model).to(device)
_ = net_g.eval()
'''
    加载预训练模型权重
    调用utils.load_checkpoint函数，加载预训练的模型权重到net_g实例中。
    其中，args.model是预训练模型的路径，skip_optimizer=True表示不加载优化器的权重。
'''
_ = utils.load_checkpoint('./logs/mymodels/nahida/G_21000.pth', net_g, None, skip_optimizer=True)

text = "你好呀，又是阳光明媚的一天"

text_output, audio_output = tts_fn(text , speakers , sdp_ratio , noise_scale , noise_scale_w , length_scale)

# import numpy as np
# np.set_printoptions(threshold=np.inf)
print(text_output)
# print(type(text_output), '|||', type(audio_output))
print(audio_output)
print(len(audio_output[1]))

write('D:/python_language/bert-vits-yun/myBert/output/1.wav', audio_output[0], audio_output[1])

del text_output , audio_output , text


def text_to_audio(input_text):
    input_text = ai_xinghuo.re_ai_speak(input_text)
    text_output, audio_output = tts_fn(input_text, speakers, sdp_ratio, noise_scale, noise_scale_w, length_scale)
    # print(text_output, audio_output)
    return text_output, audio_output

