import torch
import torch
import numpy as np
import soundfile as sf

# from model.model_encoder import Encoder, Encoder_lf0
# from model.model_decoder import Decoder_ac
# # from model.model_encoder import SpeakerEncoder as Encoder_spk
# from model.model_decoder_attention_speaker import SpeakerEncoder as Encoder_spk
# import os
from model.model_encoder_attention_speaker import SpeakerEncoder as Encoder_spk
# import subprocess
from utils.spectrogram import logmelspectrogram
import kaldiio

import resampy
import pyworld as pw

import argparse
import torch.nn.functional as F


def extract_logmel(wav_path, mean, std, sr=16000):
    # wav, fs = librosa.load(wav_path, sr=sr)
    wav, fs = sf.read(wav_path)
    if fs != sr:
        wav = resampy.resample(wav, fs, sr, axis=0)
        fs = sr
    #wav, _ = librosa.effects.trim(wav, top_db=15)
    # duration = len(wav)/fs
    assert fs == 16000
    peak = np.abs(wav).max()
    if peak > 1.0:
        wav /= peak
    mel = logmelspectrogram(
                x=wav,
                fs=fs,
                n_mels=80,
                n_fft=400,
                n_shift=160,
                win_length=400,
                window='hann',
                fmin=80,
                fmax=7600,
            )
    
    mel = (mel - mean) / (std + 1e-8)
    return mel

def load_mean_std():
    
    # 加载mel谱归一化的mean和std
    mel_stats = np.load('/home/wang/codes/py/VC/VCTK/mel_stats/stats.npy')
    mean = mel_stats[0]
    std = mel_stats[1]
    
    return mean,std

def MCD_loss(convert_wav_path,target_wav_path,mean,std):
    
    # 生成mel谱图
    convert_mel=extract_logmel(convert_wav_path, mean, std, sr=16000)
    target_mel=extract_logmel(target_wav_path, mean, std, sr=16000)
    
    # print(convert_mel.shape)
    #
    length=np.min((convert_mel.shape[0],target_mel.shape[0]))
    convert_mel=convert_mel[:length,:]
    target_mel=target_mel[:length,:]
    #计算损失
    # print("con,",convert_mel.shape)
    # print("tar",target_mel.shape)
    # print(convert_mel)
    # print(target_mel)
    loss=torch.nn.MSELoss()
    # loss =F.l1_loss(torch.tensor(convert_mel), torch.tensor(target_mel))
    return loss(torch.tensor(convert_mel), torch.tensor(target_mel))

if __name__ == "__main__":
    
    mean,std=load_mean_std()
    
    # VCTK数据集语音
    convert_wav_path="/home/wang/codes/py/VC/VQMIVC/converted/VCTK_unseen_p228_001_mic1_to_VCTK_unseen_p228_002_mic1_converted_gen.wav"
    # convert_wav_path="/home/wang/codes/py/VC/VQMIVC/converted/VCTK_unseen_p253_001_mic1_to_VCTK_unseen_p253_008_mic1_converted_gen.wav"
    # convert_wav_path="/home/wang/codes/py/VC/VQMIVC/converted/Matrix_VCTK_unseen_p253_001_mic1_to_VCTK_unseen_p253_008_mic1_converted_gen.wav"
    target_wav_path="/home/wang/codes/py/VC/VQMIVC/test_wav/VCTK/VCTK_unseen_p228_001_mic1.wav"
    
    # VCTK转AIshell
    # convert_wav_path="/home/wang/codes/py/VC/VQMIVC/converted/P_matrixVCTK_unseen_p228_001_mic1_to_AIshell_BAC009S0915W0121_converted_gen.wav"
    # # convert_wav_path="/home/wang/codes/py/VC/VQMIVC/converted/Model_VCTK_unseen_p228_001_mic1_to_AIshell_BAC009S0915W0121_converted_gen.wav"
    # target_wav_path="/home/wang/codes/py/VC/VQMIVC/test_wav/VCTK_unseen_p228_001_mic1.wav"
    
    # DNS
    # convert_wav_path="/home/wang/codes/py/VC/VQMIVC/converted/clean_fileid_9371_to_clean_fileid_11623_converted_gen.wav"
    # convert_wav_path="/home/wang/codes/py/VC/VQMIVC/converted/Model_clean_fileid_9371_to_clean_fileid_11623_converted_gen.wav"
    # target_wav_path="/home/wang/codes/py/VC/VQMIVC/test_wav/clean_fileid_9371.wav"
    
    # PTDB
    # convert_wav_path="/home/wang/codes/py/VC/VQMIVC/converted/mic_F01_sa1_to_mic_F01_sa2_converted_gen.wav"
    # convert_wav_path="/home/wang/codes/py/VC/VQMIVC/converted/Model_mic_F01_sa1_to_mic_F01_sa2_converted_gen.wav"
    # target_wav_path="/home/wang/codes/py/VC/VQMIVC/PTDB/F01/mic_F01_sa1.wav"
    
    print(MCD_loss(convert_wav_path,target_wav_path,mean,std))