import warnings
warnings.filterwarnings('ignore')
import numpy as np
import paddle
import soundfile as sf
import yaml
from paddlespeech.t2s.frontend.zh_frontend import Frontend
from paddlespeech.t2s.models.fastspeech2 import FastSpeech2
from paddlespeech.t2s.models.fastspeech2 import StyleFastSpeech2Inference
from paddlespeech.t2s.models.parallel_wavegan import PWGGenerator
from paddlespeech.t2s.models.parallel_wavegan import PWGInference
from paddlespeech.t2s.modules.normalizer import ZScore
from yacs.config import CfgNode
import threading
import time
import socket

# 限制线程数
NUM_THREADS = 1
semapthore = threading.Semaphore(NUM_THREADS)
data_lock = threading.Lock()
directory = "./output/"

def open_config(fastspeech2_config, pwg_config):
    # 读取 conf 配置文件并结构化
    with open(fastspeech2_config, encoding="utf-8") as f:
        fastspeech2_config = CfgNode(yaml.safe_load(f))
    with open(pwg_config, encoding="utf-8") as f:
        pwg_config = CfgNode(yaml.safe_load(f))
    return fastspeech2_config, pwg_config

def initialize(phones_dict, fastspeech2_config, fastspeech2_checkpoint, fastspeech2_stat, pwg_config, pwg_checkpoint, pwg_stat):
    # 文本前端
    frontend = Frontend(phone_vocab_path=phones_dict)
    
    # 初始化声学模型
    with open(phones_dict, "r", encoding="utf-8") as f:
        phn_id = [line.strip().split() for line in f.readlines()]
    vocab_size = len(phn_id)
    # print("vocab_size:", vocab_size)
    odim = fastspeech2_config.n_mels

    stat = np.load(fastspeech2_stat)
    # 读取数据预处理阶段数据集的均值和标准差
    mu, std = stat
    mu, std = paddle.to_tensor(mu), paddle.to_tensor(std)
    # 构造归一化的新模型
    fastspeech2_normalizer = ZScore(mu, std)
    
    # 读取数据预处理阶段数据集的均值和标准差
    stat = np.load(pwg_stat)
    mu, std = stat
    mu, std = paddle.to_tensor(mu), paddle.to_tensor(std)
    pwg_normalizer = ZScore(mu, std)

    # 为多线程构建列表 防止冲突
    fastspeech2_inferences = []
    pwg_inferences = []
    for i in range(NUM_THREADS):
        # 构建归一化的模型
        model = FastSpeech2(
        idim=vocab_size, odim=odim, **fastspeech2_config["model"])
        # 加载预训练模型参数
        model.set_state_dict(paddle.load(fastspeech2_checkpoint)["main_params"])
        # 推理阶段不启用 batch norm 和 dropout
        model.eval()
        fastspeech2_inference = StyleFastSpeech2Inference(fastspeech2_normalizer, model)
        fastspeech2_inference.eval()
        fastspeech2_inferences.append(fastspeech2_inference)
        
        # 初始化声码器
        vocoder = PWGGenerator(**pwg_config["generator_params"])
        # 模型加载预训练参数
        vocoder.set_state_dict(paddle.load(pwg_checkpoint)["generator_params"])
        vocoder.remove_weight_norm()
        # 推理阶段不启用 batch norm 和 dropout
        vocoder.eval()
        pwg_inference = PWGInference(pwg_normalizer, vocoder)
        pwg_inference.eval()
        pwg_inferences.append(pwg_inference)
    
    return frontend, fastspeech2_inferences, pwg_inferences

def compute_wav(index, phoneme, phone_ids, fastspeech2_inferences, pwg_inferences, results, tids):
    semapthore.acquire()
    # 分配模型
    with data_lock:
        id = tids.index(False)
        tids[id] = True
    fastspeech2_inference = fastspeech2_inferences[id]
    pwg_inference = pwg_inferences[id]

    print("Start computing for index", index, "phone size:", len(phone_ids))
    with paddle.no_grad():  
        mel, duration = fastspeech2_inference(phone_ids)
        wav = pwg_inference(mel)
        
    duration = duration.cpu().numpy().tolist()
        
    wav = wav.numpy()
    #dir = directory+"out"+str(index)+".wav"
    dir = "out" + str(index) + ".wav"
    sf.write(dir, wav, samplerate=24000)
    time = wav.shape[0]
        
    results[index] = dir, phoneme, duration, time
    # print(index, ": ", p_d)
    print("Finished computing for index", index)
    with data_lock:
        tids[id] = False
    semapthore.release()

def send_helper(result, sock: socket.socket, address):
    if not sock:
        return
    output = {"type": "chat"}
    output["wav"] = result[0]
    output["phoneme"] = result[1]
    output["duration"] = result[2]
    output["time"] = result[3]
    output = str(output)
    sock.sendto(output.encode('utf-8'), address)

def tts(input:str, frontend, fastspeech2_inferences, pwg_inferences, sock: socket.socket=None, address=None):
    input_ids = frontend.get_input_ids(input, merge_sentences=False)
    phone_ids = input_ids["phone_ids"]

    print("分割长度", len(phone_ids))
    # 获得音素
    phoneme = frontend.get_phonemes(input, merge_sentences=False)
    
    # 多线程
    print("Start parallel computing")   
    next_id = 0
    results = [None] * len(phone_ids)
    threads = []
    tids = [False] * NUM_THREADS
    for i in range(len(phone_ids)):
        thread = threading.Thread(target=compute_wav, args=(i, phoneme[i], phone_ids[i], fastspeech2_inferences, pwg_inferences, results, tids))
        threads.append(thread)
        thread.start()

    print("Waiting for all threading done...")
    for thread in threads:
        thread.join()
        if results[next_id] is not None:
            print("next_id:", next_id)
            send_helper(results[next_id], sock, address)
            next_id += 1
            
    # 发送剩余的
    while next_id < len(phone_ids):
        if results[next_id] is not None:
            print("next_id:", next_id)
            send_helper(results[next_id], sock, address)
            next_id += 1
    print("All threading done.")
                
if __name__ == "__main__":
    fastspeech2_config = "download/fastspeech2_nosil_baker_ckpt_0.4/default.yaml"
    fastspeech2_checkpoint = "download/fastspeech2_nosil_baker_ckpt_0.4/snapshot_iter_76000.pdz"
    fastspeech2_stat = "download/fastspeech2_nosil_baker_ckpt_0.4/speech_stats.npy"
    pwg_config = "download/pwg_baker_ckpt_0.4/pwg_default.yaml"
    pwg_checkpoint = "download/pwg_baker_ckpt_0.4/pwg_snapshot_iter_400000.pdz"
    pwg_stat = "download/pwg_baker_ckpt_0.4/pwg_stats.npy"
    phones_dict = "download/fastspeech2_nosil_baker_ckpt_0.4/phone_id_map.txt"
    
    fastspeech2_config, pwg_config = open_config(fastspeech2_config, pwg_config)
    frontend, fastspeech2_inference, pwg_inference = initialize(phones_dict, fastspeech2_config, fastspeech2_checkpoint, fastspeech2_stat, pwg_config, pwg_checkpoint, pwg_stat)
    
    text="语言是人类进行沟通交流的表达方式，其储存着丰富的文化信息，传承着民族血脉，也支撑着文明的发展与演进。然而，一些少数民族语言、方言却正在无声无息地消失，与之密切相连的地域文化、历史文化也正面临濒危风险。“大约平均两周就会有一种语言消亡”，联合国教科文组织的这一调查数据让人触目惊心，且世界上正在使用的约6,000种语言，至少有43%面临濒危。而在中国，也有25种语言使用人口已不足千人。"
    # text = "测试不拉布拉不阿"
    
    start = time.time()
    tts(text, frontend, fastspeech2_inference, pwg_inference)
    print("TTS finished in", (time.time()-start))