import logging
import torch
import torch.nn as nn
from hparams import hparams
from model import Tacotron2
import os 
import soundfile as sf
import numpy as np
import librosa
from preprocessing import pinyin_2_phoneme
from xpinyin import Pinyin

import numpy as np
import librosa.display
import pyworld as pw
import matplotlib.pyplot as plt

def text2speech(model,para,coded_text_in,device):
    '''
    coded_text: 编码后的文本
    '''
    text_in = torch.from_numpy(coded_text_in)
    text_in = text_in.unsqueeze(0).to(device) # 增加维度
    
    # 进行解码
    with torch.no_grad():
        eval_outputs = m_model.inference(text_in)
        mel_out = eval_outputs[1] # postnet后的输出
        mel_out = mel_out.squeeze(0) # 去掉batch维度
        mel_out = mel_out.cpu().detach().numpy() 
    
    print(mel_out.shape)   
    # 加载统计信息
    file_static = os.path.join(para.path_fea,'static.npy')
    static_mel = np.load(file_static,allow_pickle=True) 
    
    mean_mel = np.float64(static_mel[0])
    std_mel =  np.float64(static_mel[1])
    
    # 反正则，重构
    generated_mel =  mel_out* std_mel + mean_mel
    
    # 进行解码
    inv_fbank = librosa.db_to_power(generated_mel)
    # 将 Mel 频谱特征转换回音频波形，Griffin-Lim 算法
    inv_wav = librosa.feature.inverse.mel_to_audio(inv_fbank,
                                           sr =para.fs,
                                           n_fft = para.n_fft,
                                           win_length = para.win_length,
                                           hop_length = para.hop_length,
                                           fmin= para.fmin, 
                                           fmax= para.fmax)
                                           
    return inv_wav
    

def generate_text_code(words,dic_phoneme):
    # 去掉#
    new_words = words.replace('#','')
    # 去掉数字
    new_words = ''.join([i for i in new_words if not i.isdigit()])
    p = Pinyin()
    # 加拼音
    out_pinyin = p.get_pinyin(new_words,' ',tone_marks='numbers')
    sent_phonemes = pinyin_2_phoneme(out_pinyin, words)
    print(sent_phonemes)
    coded_text = [ dic_phoneme[phonemes] for phonemes in sent_phonemes.split()]
    coded_text.append(dic_phoneme['~']) # 添加eos
    return coded_text
    

if __name__ == "__main__":
    # 加载相关参数
    para = hparams()
    
    # 加载模型
    device = torch.device("cuda:0")
    n_model = 3
    m_model_all = torch.load(os.path.join('save2',str(n_model),'model_final.pick'))
    
    m_model = Tacotron2(para)
    m_model.to(device)
    m_model.load_state_dict(m_model_all['model'])
    
    m_model.eval()
    
    path_save = os.path.join('eval',str(n_model))
    os.makedirs(path_save,exist_ok = True)
    
    
    
    #输入文字        
    words = "欢迎#2来到#2南开大学#4"
    coded_text = generate_text_code(words,para.dic_phoneme)
  
    # 生成语音
    wav_out = text2speech(m_model,para,np.array(coded_text),device)
    wav_out = wav_out/max(wav_out)    
    sf.write(os.path.join(path_save,'nankai.wav'),wav_out,para.fs)        
            
    x, fs = librosa.load(os.path.join(path_save,'nankai.wav'), sr=48000, mono=True)  # 确保音频是单声道
    x = x.astype(np.double)  # 转换数据类型为 double
    
    # 参数设置
    # frame_period = 5.0  # 帧周期为 5ms
    # hop_length = int(fs * frame_period * 0.001)  # 根据采样率和帧周期计算 hop length
    hop_length=para.hop_length
    frame_period=int(hop_length*1000/fs)
    fftlen = pw.get_cheaptrick_fft_size(fs)  # 计算 CheapTrick 所需的 FFT 大小

    # 参数提取
    # f0,timeaxis  = pw.dio(x, fs, f0_floor=71.0, f0_ceil=800.0, frame_period=frame_period)
    f0, timeaxis = pw.harvest(x, fs, frame_period=frame_period, f0_floor=71.0, f0_ceil=800.0)
    sp = pw.cheaptrick(x, f0, timeaxis, fs)
    ap = pw.d4c(x, f0, timeaxis, fs)

    # 编码和解码频谱包络
    coded_sp = pw.code_spectral_envelope(sp, fs,number_of_dimensions=40)  # 编码频谱包络
    decoded_sp = pw.decode_spectral_envelope(coded_sp,fs,fftlen)

    # 合成语音
    y = pw.synthesize(f0, decoded_sp, ap, fs, frame_period=frame_period)
    # 保存合成的音频到文件
    sf.write(os.path.join(path_save,'world_nankai.wav'), y.astype(np.float32),fs)
    
    # 画图显示原始和合成的波形
    plt.figure(figsize=(10, 2))
    librosa.display.waveshow(x, sr=fs, alpha=0.5)  # 原始波形
    plt.figure(figsize=(10, 2))
    librosa.display.waveshow(y, sr=fs, alpha=0.5)  # 合成波形

    # 显示所有图表
    plt.show()



    # files = np.loadtxt("scp/test.scp",dtype = 'str',delimiter = '|')    
    
    # file_ids = files[:,0].tolist()
    # index_phones = files[:,2].tolist()
    
    # for file_id,index_phone in zip(file_ids,index_phones):
    #     text_in = [int(id) for id in index_phone.split()]
    #     print("Generate speech %s.wav"%(file_id))
    #     wav_out = text2speech(m_model,para,np.array(text_in),device)
    #     wav_name = os.path.join(path_save,file_id+'.wav')
    #     wav_out = wav_out/max(wav_out)
    #     sf.write(wav_name,wav_out,para.fs)