import torch
import numpy as np

import soundfile as sf
import json
import librosa

import threading

# from model.model_encoder import Encoder, Encoder_lf0
from model.model_encoder_Hubert_SpeakerEmbedding_classifier2 import Encoder, Encoder_lf0
from model.model_decoder import Decoder_ac
# from model.model_encoder import SpeakerEncoder as Encoder_spk
# from model.model_encoder_randn_attention_speaker_no_batch import SpeakerEncoder as Encoder_spk
# from model.model_encoder_randn_attention_speaker import SpeakerEncoder as Encoder_spk
from model.model_encoder_Hubert_SpeakerEmbedding_classifier import SpeakerEncoder as Encoder_spk
# from model.model_encoder_k_attention_speaker_30_AIshell import SpeakerEncoder as Encoder_spk
import model.ResNetSE34V2 as ResNetSE34V2

import os

import subprocess
from utils.spectrogram import logmelspectrogram
# from utils.spectrogram import *
import kaldiio
# from fairseq.models.wav2vec import Wav2Vec2Model
import resampy
import pyworld as pw
import fairseq
import argparse

# 加载模型
def load_model(index, model_path):

    if(index % 2 == 0):
        device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

    if(index % 2 == 1):
        device = torch.device("cuda:1" if torch.cuda.is_available() else "cpu")

    ckpt_path = "/home/wang/codes/py/VC/model_checkpoints/hubert_large_ll60k.pt"
    models, cfg, task = fairseq.checkpoint_utils.load_model_ensemble_and_task([ckpt_path])
    model = models[0]
    model.eval()
    model=model.to(device)

    # Speaker
    pt_path="/home/wang/codes/py/VC/Model_ResNetSE34/baseline_v2_ap.model"
    embedder_pt = torch.load(pt_path, map_location="cpu")
    embedder = ResNetSE34V2.ResNetSE()
    for key in list(embedder_pt.keys()):
        if str(key).startswith('__S__'):
            embedder_pt[key.replace("__S__.", "")] = embedder_pt[key]
            embedder_pt.pop(key, '404')
        if str(key).startswith('__L__'):
            embedder_pt.pop(key, '404')
    embedder.load_state_dict(embedder_pt)
    embedder.eval()

    ##VQMIVC
    # device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

    encoder = Encoder(in_channels=80, channels=512, n_embeddings=512, z_dim=64, c_dim=256)
    encoder_lf0 = Encoder_lf0()
    encoder_spk = Encoder_spk()
    decoder = Decoder_ac(dim_neck=64)
    encoder.to(device)
    encoder_lf0.to(device)
    encoder_spk.to(device)
    decoder.to(device)

    # model_path="/home/wang/codes/py/VC/model_checkpoints/best_checkpoints/model.ckpt-450.pt"
    # model_path="/home/wang/codes/py/VC/model_checkpoints/Hubert_SpeakerEmbedding_cycle_three_stage_checkpoints/useCSMITrue_useCPMITrue_usePSMITrue_useAmpFalse/model.ckpt-500.pt"
    checkpoint_path = model_path
    checkpoint = torch.load(checkpoint_path, map_location=lambda storage, loc: storage)
    encoder.load_state_dict(checkpoint["encoder"])
    encoder_spk.load_state_dict(checkpoint["encoder_spk"])
    # print(checkpoint["decoder"])
    decoder.load_state_dict(checkpoint["decoder"])
    # print(checkpoint["decoder"])

    encoder.eval()
    encoder_spk.eval()
    decoder.eval()

    return model, embedder, encoder,encoder_spk, encoder_lf0, decoder, device

##提取特征
def extract_logmel_embedding(wav_path, mean, std,mood,  model, embedder, encoder,encoder_spk, encoder_lf0, decoder,device):
    # wav, fs = librosa.load(wav_path, sr=sr)
    sr=16000
    wav, fs = sf.read(wav_path)
    # print(fs)
    if fs != sr:
        wav = resampy.resample(wav, fs, sr, axis=0)
        fs = sr
    #wav, _ = librosa.effects.trim(wav, top_db=15)
    # duration = len(wav)/fs
    assert fs == 16000
    peak = np.abs(wav).max()
    if peak > 1.0:
        wav /= peak
    mel = logmelspectrogram(
                x=wav,
                fs=fs,
                n_mels=80,
                n_fft=400,
                n_shift=160,
                win_length=400,
                window='hann',
                fmin=80,
                fmax=7600,
            )
    if mood=="Hubert":
        ##加载模型
        # device = torch.device("cuda:1" if torch.cuda.is_available() else "cpu")
        # ckpt_path = "/opt/data/private/VC/model_checkpoints/hubert_large_ll60k.pt"
        # models, cfg, task = fairseq.checkpoint_utils.load_model_ensemble_and_task([ckpt_path])
        # model = models[0]
        # model=model.to(device)
        ##提取特征
        with torch.no_grad():
            
            # feat=model(torch.tensor(np.array([wav]),dtype=torch.float).to(device),features_only=True)["features"]
            feat=model.extract_features(torch.tensor(np.array([wav]),dtype=torch.float).to(device))[0]
            # print("feat",feat.shape)
            feat = feat.detach().cpu().squeeze(0)
            
            # 归一化
            f_mean = torch.mean(feat,axis=0).reshape(1,-1)
            f_std = torch.std(feat,axis=0).reshape(1,-1)
            feat=(feat - f_mean) / (f_std + 1e-8)
            
    
    # mel = (mel - mean) / (std + 1e-8)
    #mel归一化
    # print("mel",mel.shape)
    # m_mean = np.mean(mel,axis=1).reshape(-1,1)
    # m_std = np.std(mel,axis=1).reshape(-1,1)
    # mel=(mel - m_mean) / (m_std + 1e-8)
    mel = (mel - mean) / (std + 1e-8)
    # a
    tlen = mel.shape[0]
    frame_period = 160/fs*1000
    f0, timeaxis = pw.dio(wav.astype('float64'), fs, frame_period=frame_period)
    f0 = pw.stonemask(wav.astype('float64'), f0, timeaxis, fs)
    f0 = f0[:tlen].reshape(-1).astype('float32')
    nonzeros_indices = np.nonzero(f0)
    lf0 = f0.copy()
    lf0[nonzeros_indices] = np.log(f0[nonzeros_indices]) # for f0(Hz), lf0 > 0 when f0 != 0
    mean, std = np.mean(lf0[nonzeros_indices]), np.std(lf0[nonzeros_indices])
    lf0[nonzeros_indices] = (lf0[nonzeros_indices] - mean) / (std + 1e-8)
    
    if mood=="Hubert":
        # print("mel",mel.shape)
        # print("feat",feat.shape)
        speaker_model = embedder.cuda()
        embedding = speaker_model(torch.tensor([np.array(wav, dtype=np.float32)]).cuda())[0].detach().cpu().numpy()
        return mel,lf0,feat,embedding
    
    return mel, lf0

# 转换单个文件
def convert(source_wav,reference_wav,converted_wav_path,mood,model, embedder, encoder,encoder_spk, encoder_lf0, decoder, device):
    
    src_wav_path = source_wav
    ref_wav_path = reference_wav
    
    out_dir = converted_wav_path
    mood=mood
    # print(out_dir)
    os.makedirs(out_dir, exist_ok=True)
    
    
    mel_stats = np.load('/home/wang/codes/py/VC/VCTK/Hubert/Hubert_mel_stats.npy')
    # mel_stats = np.load('/home/wang/codes/py/VC/select_AIshell/data/mel_stats.npy')
    # mel_stats=np.load("/home/wang/codes/py/VC/select_DNS/data/mel_stats.npy")
    # mel_stats=np.load("/home/wang/codes/py/VC/VCTK/data/mel_stats.npy")
    # mel_stats=np.load("/home/wang/codes/py/VC/select_gender_f150_m_150_DNS/data/mel_stats.npy")
    mean = mel_stats[0]
    std = mel_stats[1]
    feat_writer = kaldiio.WriteHelper("ark,scp:{o}.ark,{o}.scp".format(o=str(out_dir)+'/feats.1'))
    if mood=="VQMIVC":
        src_mel, src_lf0 = extract_logmel_embedding(src_wav_path, mean, std,mood,model, embedder, encoder,encoder_spk, encoder_lf0, decoder,device)
        # print("src_mel",src_mel.shape)
        ref_mel, _ = extract_logmel_embedding(ref_wav_path, mean, std, mood, model, embedder, encoder,encoder_spk, encoder_lf0, decoder,device)
        src_mel = torch.FloatTensor(src_mel.T).unsqueeze(0).to(device)
        src_lf0 = torch.FloatTensor(src_lf0).unsqueeze(0).to(device)
        ref_mel = torch.FloatTensor(ref_mel.T).unsqueeze(0).to(device)
        spk_emb = encoder_spk(ref_mel)
        
        src_filename = os.path.basename(src_wav_path).split('.')[0] 
        ref_filename=os.path.basename(ref_wav_path).split('.')[0] 
        with torch.no_grad():
            if mood=="VQMIVC": 
                z, _, _, _ = encoder.encode(src_mel)
            if mood=="Hubert":
                z = encoder(src_Hubert)
                # print("mooooooood")
            lf0_embs = encoder_lf0(src_lf0)
            # print("lfo:",lf0_embs.shape)
            # spk_emb = encoder_spk(ref_embedding)
            # print("spk:",spk_emb.shape)
            output = decoder(z, lf0_embs, spk_emb)
            
            feat_writer[src_filename+'_to_'+ref_filename+'_converted'] = output.squeeze(0).cpu().numpy()
        
    if mood=="Hubert":
        src_mel, src_lf0,src_Hubert,src_embedding = extract_logmel_embedding(src_wav_path, mean, std,mood, model, embedder, encoder,encoder_spk, encoder_lf0, decoder,device)
        ref_mel, _ ,ref_Hubert,ref_embedding= extract_logmel_embedding(ref_wav_path, mean, std, mood, model, embedder, encoder,encoder_spk, encoder_lf0, decoder,device)
        src_mel = torch.FloatTensor(src_mel.T).unsqueeze(0).to(device)
        src_lf0 = torch.FloatTensor(src_lf0).unsqueeze(0).to(device)
        ref_mel = torch.FloatTensor(ref_mel.T).unsqueeze(0).to(device)
        src_Hubert=torch.FloatTensor(src_Hubert.T).unsqueeze(0).to(device)
        src_embedding = torch.FloatTensor(src_embedding).unsqueeze(0).to(device)
        ref_embedding = torch.FloatTensor(ref_embedding).unsqueeze(0).to(device)
        spk_emb = encoder_spk(ref_embedding)
   
        src_filename = os.path.basename(src_wav_path).split('.')[0] 
        ref_filename=os.path.basename(ref_wav_path).split('.')[0] 
        with torch.no_grad():
            if mood=="VQMIVC": 
                z, _, _, _ = encoder.encode(src_mel)
            if mood=="Hubert":
                z = encoder(src_Hubert)
                # print("mooooooood")
            lf0_embs = encoder_lf0(src_lf0)
            # print("lfo:",lf0_embs.shape)
            # spk_emb = encoder_spk(ref_embedding)
            # print("spk:",spk_emb.shape)
            output = decoder(z, lf0_embs, spk_emb)
            
            feat_writer[src_filename+'_to_'+ref_filename+'_converted'] = output.squeeze(0).cpu().numpy()
            # feat_writer[src_filename+'_to_'+ref_filename+'_source'] = src_mel.squeeze(0).cpu().numpy().T
            # feat_writer[src_filename+'_to_'+ref_filename+'_reference'] = ref_mel.squeeze(0).cpu().numpy().T
    
    feat_writer.close()
    print('synthesize waveform...')
    cmd = ['parallel-wavegan-decode', '--checkpoint', \
           '/home/wang/codes/py/VC/vocoder/vocoder/checkpoint-3000000steps.pkl', \
           '--scp', f"{str(out_dir)}/feats.1.scp", '--outdir', str(out_dir)]
    subprocess.call(cmd)

#多线程函数  
def prepareFiles(file, root, converted_wav_root_path, path_name, index, mood, model, embedder, encoder,encoder_spk, encoder_lf0, decoder, device):
    
    for pair in file:
        source_wav=os.path.join(root, pair[0])
        reference_wav=os.path.join(root, pair[1])
        converted_wav_path = converted_wav_root_path + path_name[index]
        convert(source_wav, reference_wav, converted_wav_path, mood, model, embedder, encoder,encoder_spk, encoder_lf0, decoder, device)
        
def convertedAllThread(converted_wav_root_path, model_path):
   
    All_files = list()
    
    with open('/home/wang/codes/py/VC/Evaluate_wavs/json_testFemaleToFemale.txt','r+') as f:
        file1=json.load(f)
    
    with open('/home/wang/codes/py/VC/Evaluate_wavs/json_testFemaleToMale.txt','r+') as f:
        file2=json.load(f)
        
    with open('/home/wang/codes/py/VC/Evaluate_wavs/json_testMaleToFemale.txt','r+') as f:
        file3=json.load(f)
    
    with open('/home/wang/codes/py/VC/Evaluate_wavs/json_testMaleToMale.txt','r+') as f:
        file4=json.load(f)
        
    All_files.append(file1)
    All_files.append(file2)
    All_files.append(file3)
    All_files.append(file4)
    
    path_name = ["F_F", "F_M", "M_F", "M_M"]
    root="/home/wang/codes/py/VC/Evaluate_wavs"
    # converted_wav_root_path='/home/wang/codes/py/VC/converted/Hubert_SpeakerEmbedding_cycle_three_stage_checkpoints/'
    # converted_wav_root_path='/home/wang/codes/py/VC/converted/Best_Hubert_Xvector_Classifier/'

    mood="Hubert"
    index = 0 #控制文件名字
    threads = []
    for file  in All_files:
        model, embedder, encoder,encoder_spk, encoder_lf0, decoder, device = load_model(index, model_path)
        thread = threading.Thread(target=prepareFiles, args=(file, root, converted_wav_root_path, path_name, index, mood, model, embedder, encoder,encoder_spk, encoder_lf0, decoder, device))
        thread.start()
        threads.append(thread)
        index = index + 1
    
    for thread in threads:
        thread.join()

    print("All files processed.")

def convertedAll(converted_wav_root_path, model_path):

    model, embedder, encoder,encoder_spk, encoder_lf0, decoder, device = load_model(converted_wav_root_path, model_path)
    All_files = list()
    
    with open('/home/wang/codes/py/VC/Evaluate_wavs/json_testFemaleToFemale.txt','r+') as f:
        file1=json.load(f)
    
    with open('/home/wang/codes/py/VC/Evaluate_wavs/json_testFemaleToMale.txt','r+') as f:
        file2=json.load(f)
        
    with open('/home/wang/codes/py/VC/Evaluate_wavs/json_testMaleToFemale.txt','r+') as f:
        file3=json.load(f)
    
    with open('/home/wang/codes/py/VC/Evaluate_wavs/json_testMaleToMale.txt','r+') as f:
        file4=json.load(f)
    All_files.append(file1)
    All_files.append(file2)
    All_files.append(file3)
    All_files.append(file4)
    path_name = ["F_F", "F_M", "M_F", "M_M"]
    # root="/home/wang/codes/py/VC/VCTK/Dataset/VCTK-Corpus/wav48_silence_trimmed"
    # root="/home/wang/codes/py/VC/test_wav/all_wav"
    root="/home/wang/codes/py/VC/Evaluate_wavs"
    index = 0 #控制文件名字
    for file  in All_files:
        for pair in file:
            source_wav=os.path.join(root,pair[0])
            reference_wav=os.path.join(root,pair[1])
            converted_wav_path = converted_wav_root_path + path_name[index]
            convert(source_wav,reference_wav,converted_wav_path,mood,model, embedder, encoder,encoder_spk, encoder_lf0, decoder, device)
        index = index + 1

def convertedTest10(converted_wav_root_path, model_path):
    index = 0
    model, embedder, encoder,encoder_spk, encoder_lf0, decoder, device = load_model(index, model_path)
    file=[
          ["F1_book_05448_chp_0038_reader_06708_1.wav","M1_book_11346_chp_0030_reader_00812_9.wav"],
          ["F2_clean_fileid_9371.wav","M2_clean_fileid_11248.wav"],
          ["M1_book_11346_chp_0030_reader_00812_9.wav","F1_book_05448_chp_0038_reader_06708_1.wav"],
          ["M2_clean_fileid_11248.wav","F2_clean_fileid_9371.wav"],
          ["F1_book_05448_chp_0038_reader_06708_1.wav","F2_clean_fileid_9371.wav","M2_clean_fileid_11248.wav"],
          ["M1_book_11346_chp_0030_reader_00812_9.wav","M2_clean_fileid_11248.wav"],
          
          
          ["F1_VCTK_unseen_p228_001_mic1.wav","M1_VCTK_p376_006_mic1.wav"],
          ["F2_VCTK_unseen_p253_008_mic1.wav","M2_VCTK_unseen_p263_007_mic1.wav"],
          ["M1_VCTK_p376_006_mic1.wav","F1_VCTK_unseen_p228_001_mic1.wav"],
          ["M2_VCTK_unseen_p263_007_mic1.wav","F2_VCTK_unseen_p253_008_mic1.wav"],
          ["F1_VCTK_unseen_p228_001_mic1.wav","F2_VCTK_unseen_p253_008_mic1.wav"],
          ["M1_VCTK_p376_006_mic1.wav","M2_VCTK_unseen_p263_007_mic1.wav"]
          ]
    root="/home/wang/codes/py/VC/test_wav/all_wav"
    # converted_wav_path='/home/wang/codes/py/VC/converted/testDemoBest_Hubert_Xvector_Classifier'
    # converted_wav_path='/home/wang/codes/py/VC/converted/Best_Hubert_Xvector_Classifier'
    mood="Hubert"
    
    for pair in file:
        source_wav=os.path.join(root,pair[0])
        reference_wav=os.path.join(root,pair[1])
        convert(source_wav,reference_wav,converted_wav_root_path,mood,model, embedder, encoder,encoder_spk, encoder_lf0, decoder, device)

if __name__ == "__main__":
    
    # converted_wav_root_path='/home/wang/codes/py/VC/converted/testDemoHubert_SpeakerEmbedding_cycle_three_stage_checkpoints/'
    # converted_wav_root_path='/home/wang/codes/py/VC/converted/Best_Hubert_Xvector_Classifier/'
    # converted_wav_root_path='/home/wang/codes/py/VC/converted/testDemo_Hubert_SpeakerEmbedding_cycle_three_stage_checkpoints/'
    converted_wav_root_path='/home/wang/codes/py/VC/converted/Hubert_SpeakerEmbedding_cycle_three_stage_classifier2/'

    # model_path="/home/wang/codes/py/VC/model_checkpoints/best_checkpoints/model.ckpt-450.pt"
    # model_path="/home/wang/codes/py/VC/model_checkpoints/Hubert_SpeakerEmbedding_cycle_three_stage_checkpoints/useCSMITrue_useCPMITrue_usePSMITrue_useAmpFalse/model.ckpt-500.pt"
    model_path="/home/wang/codes/py/VC/model_checkpoints/Hubert_SpeakerEmbedding_cycle_three_stage_classifier2/useCSMITrue_useCPMITrue_usePSMITrue_useAmpFalse/model.ckpt-450.pt"
    #修改convert的mel_status
    # 转换测试的几个语句
    # convertedTest10(converted_wav_root_path, model_path)

    

    # 转换所有语句 单线程
    # convertedAll(converted_wav_root_path, model_path)

    #转换所有语句 多线程
    convertedAllThread(converted_wav_root_path, model_path)
    
    


    
    