import torch
import numpy as np

import soundfile as sf

# from model.model_encoder import Encoder, Encoder_lf0
from model.model_encoder_Hubert_decoder_add import Encoder, Encoder_lf0
from model.model_decoder_Hubert_add import Decoder_ac
# from model.model_encoder import SpeakerEncoder as Encoder_spk
# from model.model_encoder_randn_attention_speaker_no_batch import SpeakerEncoder as Encoder_spk
# from model.model_encoder_randn_attention_speaker import SpeakerEncoder as Encoder_spk
from model.model_encoder_Hubert_decoder_add import SpeakerEncoder as Encoder_spk
# from model.model_encoder_k_attention_speaker_30_AIshell import SpeakerEncoder as Encoder_spk

import os

import subprocess
from utils.spectrogram import logmelspectrogram
import kaldiio
# from fairseq.models.wav2vec import Wav2Vec2Model
import resampy
import pyworld as pw
import fairseq
import argparse

device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
ckpt_path = "/opt/data/private/VC/model_checkpoints/hubert_large_ll60k.pt"
models, cfg, task = fairseq.checkpoint_utils.load_model_ensemble_and_task([ckpt_path])
model = models[0]
model.eval()
model=model.to(device)


def extract_logmel(wav_path, mean, std,mood,sr=16000):
    # wav, fs = librosa.load(wav_path, sr=sr)
    wav, fs = sf.read(wav_path)
    print(fs)
    if fs != sr:
        wav = resampy.resample(wav, fs, sr, axis=0)
        fs = sr
    #wav, _ = librosa.effects.trim(wav, top_db=15)
    # duration = len(wav)/fs
    assert fs == 16000
    peak = np.abs(wav).max()
    if peak > 1.0:
        wav /= peak
    mel = logmelspectrogram(
                x=wav,
                fs=fs,
                n_mels=80,
                n_fft=400,
                n_shift=160,
                win_length=400,
                window='hann',
                fmin=80,
                fmax=7600,
            )
    if mood=="Hubert":
        ##加载模型
        # device = torch.device("cuda:1" if torch.cuda.is_available() else "cpu")
        # ckpt_path = "/opt/data/private/VC/model_checkpoints/hubert_large_ll60k.pt"
        # models, cfg, task = fairseq.checkpoint_utils.load_model_ensemble_and_task([ckpt_path])
        # model = models[0]
        # model=model.to(device)
        ##提取特征
        with torch.no_grad():
            
            # feat=model(torch.tensor(np.array([wav]),dtype=torch.float).to(device),features_only=True)["features"]
            feat=model.extract_features(torch.tensor(np.array([wav]),dtype=torch.float).to(device))[0]
            print("feat",feat.shape)
            feat = feat.detach().cpu().squeeze(0)
            
            # 归一化
            f_mean = torch.mean(feat,axis=0).reshape(1,-1)
            f_std = torch.std(feat,axis=0).reshape(1,-1)
            feat=(feat - f_mean) / (f_std + 1e-8)
            
    
    # mel = (mel - mean) / (std + 1e-8)
    #mel归一化
    print("mel",mel.shape)
    m_mean = np.mean(mel,axis=1).reshape(-1,1)
    m_std = np.std(mel,axis=1).reshape(-1,1)
    mel=(mel - m_mean) / (m_std + 1e-8)
    # a
    tlen = mel.shape[0]
    frame_period = 160/fs*1000
    f0, timeaxis = pw.dio(wav.astype('float64'), fs, frame_period=frame_period)
    f0 = pw.stonemask(wav.astype('float64'), f0, timeaxis, fs)
    f0 = f0[:tlen].reshape(-1).astype('float32')
    nonzeros_indices = np.nonzero(f0)
    lf0 = f0.copy()
    lf0[nonzeros_indices] = np.log(f0[nonzeros_indices]) # for f0(Hz), lf0 > 0 when f0 != 0
    mean, std = np.mean(lf0[nonzeros_indices]), np.std(lf0[nonzeros_indices])
    lf0[nonzeros_indices] = (lf0[nonzeros_indices] - mean) / (std + 1e-8)
    
    if mood=="Hubert":
        
        print("mel",mel.shape)
        print("feat",feat.shape)
        
        return mel,lf0,feat
    
    return mel, lf0


def convert(source_wav,reference_wav,converted_wav_path,mood,model_path):
    
    src_wav_path = source_wav
    ref_wav_path = reference_wav
    
    out_dir = converted_wav_path
    mood=mood
    print(out_dir)
    os.makedirs(out_dir, exist_ok=True)
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

    encoder = Encoder(in_channels=80, channels=512, n_embeddings=512, z_dim=64, c_dim=256)
    encoder_lf0 = Encoder_lf0()
    encoder_spk = Encoder_spk()
    decoder = Decoder_ac(dim_neck=64)
    encoder.to(device)
    encoder_lf0.to(device)
    encoder_spk.to(device)
    decoder.to(device)

    checkpoint_path = model_path
    checkpoint = torch.load(checkpoint_path, map_location=lambda storage, loc: storage)
    encoder.load_state_dict(checkpoint["encoder"])
    encoder_spk.load_state_dict(checkpoint["encoder_spk"])
    # print(checkpoint["decoder"])
    decoder.load_state_dict(checkpoint["decoder"])
    # print(checkpoint["decoder"])

    encoder.eval()
    encoder_spk.eval()
    decoder.eval()
    
    mel_stats = np.load('/opt/data/private/VC/VCTK/Hubert/Hubert_mel_stats.npy')
    # mel_stats = np.load('/home/wang/codes/py/VC/select_AIshell/data/mel_stats.npy')
    # mel_stats=np.load("/home/wang/codes/py/VC/select_DNS/data/mel_stats.npy")
    # mel_stats=np.load("/home/wang/codes/py/VC/select_gender_f150_m_150_DNS/data/mel_stats.npy")
    mean = mel_stats[0]
    std = mel_stats[1]
    feat_writer = kaldiio.WriteHelper("ark,scp:{o}.ark,{o}.scp".format(o=str(out_dir)+'/feats.1'))
    if mood=="VQMIVC":
        src_mel, src_lf0 = extract_logmel(src_wav_path, mean, std,mood)
        # print("src_mel",src_mel.shape)
        ref_mel, _ = extract_logmel(ref_wav_path, mean, std,mood)
    if mood=="Hubert":
        src_mel, src_lf0,src_Hubert = extract_logmel(src_wav_path, mean, std,mood)
        ref_mel, _ ,ref_Hubert= extract_logmel(ref_wav_path, mean, std,mood)
        src_Hubert=torch.FloatTensor(src_Hubert.T).unsqueeze(0).to(device)
    src_mel = torch.FloatTensor(src_mel.T).unsqueeze(0).to(device)
    src_lf0 = torch.FloatTensor(src_lf0).unsqueeze(0).to(device)
    ref_mel = torch.FloatTensor(ref_mel.T).unsqueeze(0).to(device)
    src_filename = os.path.basename(src_wav_path).split('.')[0] 
    ref_filename=os.path.basename(ref_wav_path).split('.')[0] 
    with torch.no_grad():
        if mood=="VQMIVC": 
            z, _, _, _ = encoder.encode(src_mel)
        if mood=="Hubert":
            z = encoder(src_Hubert)
            # print("mooooooood")
        lf0_embs = encoder_lf0(src_lf0)
        # print("lfo:",lf0_embs.shape)
        spk_emb = encoder_spk(ref_mel)
        # print("spk:",spk_emb.shape)
        output = decoder(z, lf0_embs, spk_emb)
        
        feat_writer[src_filename+'_to_'+ref_filename+'_converted'] = output.squeeze(0).cpu().numpy()
        feat_writer[src_filename+'_to_'+ref_filename+'_source'] = src_mel.squeeze(0).cpu().numpy().T
        feat_writer[src_filename+'_to_'+ref_filename+'_reference'] = ref_mel.squeeze(0).cpu().numpy().T
    
    feat_writer.close()
    print('synthesize waveform...')
    cmd = ['parallel-wavegan-decode', '--checkpoint', \
           '/opt/data/private/VC/vocoder/vocoder/vocoder/checkpoint-3000000steps.pkl', \
           '--scp', f"{str(out_dir)}/feats.1.scp", '--outdir', str(out_dir)]
    subprocess.call(cmd)

if __name__ == "__main__":
    # parser = argparse.ArgumentParser()
    # parser.add_argument('--source_wav', '-s', type=str, required=True)
    # parser.add_argument('--reference_wav', '-r', type=str, required=True)
    # parser.add_argument('--converted_wav_path', '-c', type=str, default='converted/Fine_k_30_attention_VCTK_to_female75_male75')
    # # parser.add_argument('--converted_wav_path', '-c', type=str, default='converted/model_encoder_k_attention_speaker_30')
    # # parser.add_argument('--converted_wav_path', '-c', type=str, default='converted/enmedding_attention_no_batch')
    # # parser.add_argument('--converted_wav_path', '-c', type=str, default='attention_converted_wav/converted')
    # # parser.add_argument('--converted_wav_path', '-c', type=str, default='origin_converted_wav/converted')
    # parser.add_argument('--model_path', '-m', type=str, required=True)
    # parser.add_argument('--mood', type=str, default="VQMIVC")
    # args = parser.parse_args()
    # convert(args)

    # unseen valid
    file=[
          ["F1_book_05448_chp_0038_reader_06708_1.wav","M1_book_11346_chp_0030_reader_00812_9.wav"],
          ["F2_clean_fileid_9371.wav","M2_clean_fileid_11248.wav"],
          ["M1_book_11346_chp_0030_reader_00812_9.wav","F1_book_05448_chp_0038_reader_06708_1.wav"],
          ["M2_clean_fileid_11248.wav","F2_clean_fileid_9371.wav"],
          ["F1_book_05448_chp_0038_reader_06708_1.wav","F2_clean_fileid_9371.wav","M2_clean_fileid_11248.wav"],
          ["M1_book_11346_chp_0030_reader_00812_9.wav","M2_clean_fileid_11248.wav"],
          
          
          ["F1_VCTK_unseen_p228_001_mic1.wav","M1_VCTK_p376_006_mic1.wav"],
          ["F2_VCTK_unseen_p253_008_mic1.wav","M2_VCTK_unseen_p263_007_mic1.wav"],
          ["M1_VCTK_p376_006_mic1.wav","F1_VCTK_unseen_p228_001_mic1.wav"],
          ["M2_VCTK_unseen_p263_007_mic1.wav","F2_VCTK_unseen_p253_008_mic1.wav"],
          ["F1_VCTK_unseen_p228_001_mic1.wav","F2_VCTK_unseen_p253_008_mic1.wav"],
          ["M1_VCTK_p376_006_mic1.wav","M2_VCTK_unseen_p263_007_mic1.wav"]
          ]
    
    # file=[
    #       ["F_p225_001_mic1.wav","M_p263_001_mic1.wav"],
    #       ["M_p263_001_mic1.wav","F_p225_001_mic1.wav"],
    #       ["F_p225_020_mic1.wav","M_p263_020_mic1.wav"],
    #       ["M_p263_020_mic1.wav","F_p225_020_mic1.wav"],
    #       ["F_p228_001_mic1.wav","M_p334_001_mic1.wav"],
    #       ["M_p334_001_mic1.wav","F_p228_001_mic1.wav"],
    #       ["F_p228_020_mic1.wav","M_p334_020_mic1.wav"],
    #       ["M_p334_020_mic1.wav","F_p228_020_mic1.wav"]
    #       ]
    
    # root="/home/wang/codes/py/VC/VQMIVC/test_wav/test10"
    
    root="/opt/data/private/VC/git/test_wav/all_wav"
    converted_wav_path='/opt/data/private/VC/git/converted/decoder_add_content_speaker'
    model_path="/opt/data/private/VC/model_checkpoints/Hubert_Decoder_add_checkpoints/useCSMITrue_useCPMITrue_usePSMITrue_useAmpFalse/model.ckpt-500.pt"
    mood="Hubert"
    
    for pair in file:
        source_wav=os.path.join(root,pair[0])
        reference_wav=os.path.join(root,pair[1])
        convert(source_wav,reference_wav,converted_wav_path,mood,model_path)