from hparams import hparams
import torch

from preprcoessing import feature_world
import pyworld
import soundfile as sf
import glob
import numpy as np
import os
import librosa
from model import Generator

def wav_normlize(wav):
    # 信号正则
    max_ = np.max(wav)
    min_ = np.min(wav)
    wav_norm = wav*(2/(max_ - min_)) - (max_+min_)/(max_-min_)
    return wav_norm
    
def load_static(catch_path):
    static = {}
    info_f0 = np.load(os.path.join(catch_path,'static_f0.npy'), allow_pickle=True)
    static['mean_log_f0'] = np.float64(info_f0[0])
    static['std_log_f0'] = np.float64(info_f0[1])
    info_mepc = np.load(os.path.join(catch_path,'static_mecp.npy'), allow_pickle=True)
    static['coded_sps_mean'] = np.float64(info_mepc[0])
    static['coded_sps_std'] =  np.float64(info_mepc[1])
    return static


def pitch_conversion(f0,static_A,static_B):
    mean_log_f0_A = static_A["mean_log_f0"]
    std_log_f0_A = static_A["std_log_f0"]
    
    mean_log_f0_B = static_B["mean_log_f0"]
    std_log_f0_B = static_B["std_log_f0"]
    
    f0_converted = np.exp((np.ma.log(f0) - mean_log_f0_A) /
                          std_log_f0_A * std_log_f0_B + mean_log_f0_B)
    return f0_converted
    
def featu_normlize(data,data_mean,data_std,de_conver = False):
    
    if not de_conver:
        data_out  = (data- data_mean)/ data_std
    else:
        data_out = data*data_std + data_mean
        
    return data_out
    
def synthesis_world(coded_sp,f0,ap,para): 
    
    # 将coded_sp 转成 sp
    fs = para.fs
    fftlen = pyworld.get_cheaptrick_fft_size(fs)
    decoded_sp = pyworld.decode_spectral_envelope(coded_sp, fs, fftlen)
    
    frame_period = para.frame_period
   
    
    wav = pyworld.synthesize(f0, decoded_sp, ap, fs,frame_period=frame_period)
    wav = wav.astype(np.float32)
    return wav
    


def StarVC_model(wav,model_G,para,spk_A,spk_B):
    
    # 构建spk 与 index 以及 one-hot 的关系
    spk2index = {}
    index2spk = {}
    spk2one_hot = {}
    temp = torch.eye(para.n_spk)
    for i,spk in  enumerate (para.spk_list):
        spk2index[spk] = i
        index2spk[i] = spk
        spk2one_hot[spk] = temp[i]
    
    
    # 提取spk A的 统计特征
    catch_path = os.path.join(para.path_catch_feas,spk_A)
    static_A = load_static(catch_path)
    
    # 提取spk B 的 统计特征
    catch_path = os.path.join(para.path_catch_feas,spk_B)
    static_B = load_static(catch_path)
    
    
    # 提取特征
    f0,timeaxis,sp,ap,coded_sp=feature_world(wav,para)
    
    # 特征截取，保证帧长是4的整数倍
    T,D = np.shape(coded_sp)
    out_T = int(T//4*4)
    coded_sp = coded_sp[:out_T]
    f0 = f0[:out_T]
    ap = ap[:out_T]
    
    # 特征正则
    normlize_coded_sp_A = featu_normlize(coded_sp,
                                         static_A['coded_sps_mean'],
                                         static_A['coded_sps_std'],
                                         )
    # 增加维度
    normlize_coded_sp_A = np.expand_dims(normlize_coded_sp_A.T,axis=[0,1]) # batch 和 channel
    
    # 送入训练好的generator进行转换
    normlize_coded_sp_A = torch.from_numpy(normlize_coded_sp_A).to(torch.float32)
    onehot_lab_B = spk2one_hot[spk_B]
    onehot_lab_B = onehot_lab_B.unsqueeze(0).float()
    
    model_G.eval()
    with torch.no_grad():
        normlize_coded_sp_B = model_G(normlize_coded_sp_A,onehot_lab_B).numpy()
    print(normlize_coded_sp_B.shape)
    normlize_coded_sp_B = normlize_coded_sp_B[0,0] #取消batch的维度 和 channel的维度
    normlize_coded_sp_B = np.ascontiguousarray(normlize_coded_sp_B.T)  # T x D
    normlize_coded_sp_B = np.float64(normlize_coded_sp_B)
    
    # 反正则
    coded_sp_B = featu_normlize(normlize_coded_sp_B,
                                static_B['coded_sps_mean'],
                                static_B['coded_sps_std'],
                                de_conver = True)
 
    # 转换 f0
    f0_B = pitch_conversion(f0,static_A,static_B)
    
    # 将特征抓换为语音
    wav_B = synthesis_world(coded_sp_B,f0_B,ap,para)
    return wav_B
    
    
if __name__ == "__main__":
    
    # 加载相关参数
    para = hparams()
    
    
    # 加载模型
    n_model = 115000
    model_name = os.path.join(para.path_save,str(n_model),'model.pick')
    m_model = torch.load(model_name,map_location='cpu')

    model_G = Generator(para.n_spk)
    model_G.load_state_dict(m_model['model_G'])

    # 构建源说话人，目标说话人 list
    spk_As = para.spk_list
    
    spk_Bs = para.spk_list
    
    # 遍历说话人 A
    test_path = para.path_test_wavs
    for spk_A in spk_As:
        
        test_path_A = os.path.join(para.path_test_wavs,spk_A)
        test_wavs_A = glob.glob(test_path_A+'/*wav')
        
        # 读取说话人 A 的 wav
        for i,file_wav in enumerate(test_wavs_A):
            print('conver file %s'%(file_wav))
            wav,_ = librosa.load(file_wav,sr = para.fs,mono=True)
            
            # 遍历目标说话人 B 进行转换
            for spk_B in spk_Bs:
                
                if spk_A == spk_B:
                    continue
                
                conv_wav = StarVC_model(wav,model_G,para,spk_A,spk_B)
                conv_wav =  wav_normlize(conv_wav)
   
                # 文件保存
                path_save = os.path.join(para.path_eval,str(n_model),spk_A+'2'+spk_B)
                os.makedirs(path_save,exist_ok=True)
                
                file_src = os.path.join(path_save, spk_A+'-'+str(i)+'.wav')
                file_target = os.path.join(path_save, spk_A+'2'+spk_B+'-'+str(i)+'.wav')
        
                sf.write(file_src,wav,para.fs)
                sf.write(file_target,conv_wav,para.fs)

    
            