import numpy as np
from scipy.io import wavfile
from numpy.matlib import repmat


def wav_to_mfcc(type):
    # training_data是一个列表，每一个元素是features
    # features是字典，包含特征向量和模型ID
    # features = {"features":features, "modelID":k}
    if type == "train":
        dir_names = ['AE', 'AJ', 'AL', 'AW', 'BD', 'CB', 'CF', 'CR', 'DL', 'DN', 'EH', 'EL',
                     'FC', 'FD', 'FF', 'FI', 'FJ', 'FK', 'FL', 'GG']
        # dir_names = ['AE', 'AJ']
    elif type == "test":
        dir_names = ['AH','AR','AT','BC','BE','BM','BN','CC','CE','CP','DF','DJ','ED',
                          'EF','ET','FA','FG','FH','FM','FP','FR','FS','FT','GA','GP','GS',
                          'GW','HC','HJ','HM','HR','IA','IB','IM','IP','JA','JH','KA','KE',
                          'KG','LE','LG','MI','NL','NP','NT','PC','PG','PH','PR','RK','SA',
                          'SL','SR','SW','TC']
        # dir_names = ['AH','AR']


    else:
        print("ERROR INPUT")
        return None

    wav_names = ['1A_endpt.wav', '1B_endpt.wav', '2A_endpt.wav', '2B_endpt.wav', '3A_endpt.wav', '3B_endpt.wav',
                 '4A_endpt.wav', '4B_endpt.wav', '5A_endpt.wav', '5B_endpt.wav', '6A_endpt.wav', '6B_endpt.wav',
                 '7A_endpt.wav', '7B_endpt.wav', '8A_endpt.wav', '8B_endpt.wav', '9A_endpt.wav', '9B_endpt.wav',
                 'OA_endpt.wav', 'OB_endpt.wav', 'ZA_endpt.wav', 'ZB_endpt.wav']

    training_data = []
    features = {'features': [], "modelID": 0}
    NFFT = 512
    nfilt = 40

    for dirname in dir_names:
        for wav_name in wav_names:
            name = "data/wav/" + dirname + "/" + wav_name
            features['features'] = MFCC(name, NFFT, nfilt)
            training_data.append({'features': MFCC(name, NFFT, nfilt), 'modelID': wav_name[0]})

        print(dirname + " MFCC DONE ")

    return training_data


def MFCC(filename, NFFT, nfilt):
    ## NFFT = 512
    ## nfilt = 40
    sample_frequency, audio_sequence = wavfile.read(filename)

    audio_sequence = pre_emphasis(audio_sequence)

    windowlength, yseg = framing(sample_frequency, audio_sequence)

    window = windowing(yseg, windowlength)

    energy = shortEnergy(window)

    pow_frames = stft(window, NFFT)

    filter_banks = melFilterBank(sample_frequency, NFFT, pow_frames)

    filter_banks = 20 * np.log10(filter_banks)

    mfcc_temp_1 = dct(nfilt, filter_banks)

    mfcc_temp_2 = dynamicFeature(mfcc_temp_1, energy)

    mfcc = transformation(mfcc_temp_2)

    return mfcc


def pre_emphasis(audio_sequence):
    signal_points = len(audio_sequence)
    signal_points = int(signal_points)
    for i in range(1, signal_points, 1):
        audio_sequence[i] = audio_sequence[i] - 0.98 * audio_sequence[i - 1]
    return audio_sequence


def framing(sample_frequency, audio_sequence):
    signal_len = audio_sequence.shape[0]
    wlen = int(25 * sample_frequency / 1000)
    inc = int(10 * sample_frequency / 1000)
    fn = int((signal_len - wlen) / inc + 1)
    fn_list = np.arange(fn)
    indf = np.transpose(fn_list * inc)

    inds = np.arange(wlen)
    A = repmat(indf, wlen, 1)
    B = repmat(inds, fn, 1)
    target = np.transpose(A) + B
    yseg = np.zeros((fn, wlen))
    for i in range(fn):
        for j in range(wlen):
            yseg[i][j] = audio_sequence[target[i][j]]
    return wlen, yseg


def windowing(yseg, wlen):
    bar = np.array([0.54 - 0.46 * np.cos(2 * np.pi * n / (wlen - 1)) for n in range(wlen)])
    yseg_win = np.zeros((yseg.shape[0], yseg.shape[1]))

    for i in range(yseg.shape[0]):
        yseg_win[i] = yseg[i] * bar
    return yseg_win


def stft(yseg_win, NFFT):
    # NFFT = 512
    mag_frames = np.absolute(np.fft.rfft(yseg_win, NFFT))
    pow_frames = ((1.0 / NFFT) * ((mag_frames) ** 2))
    return pow_frames


def melFilterBank(sample_frequency, NFFT, pow_frames):
    nfilt = 40
    low_freq_mel = 300
    high_freq_mel = (2595 * np.log10(1 + (sample_frequency / 2) / 700))

    mel_points = np.linspace(low_freq_mel, high_freq_mel, nfilt + 2)
    hz_points = (700 * (10 ** (mel_points / 2595) - 1))
    bin = np.floor(
        (NFFT + 1) * hz_points / sample_frequency)
    fbank = np.zeros((nfilt, int(np.floor(NFFT / 2 + 1))))
    for m in range(1, nfilt + 1):
        f_m_minus = int(bin[m - 1])
        f_m = int(bin[m])
        f_m_plus = int(bin[m + 1])

        for k in range(f_m_minus, f_m):
            fbank[m - 1, k] = (k - bin[m - 1]) / (bin[m] - bin[m - 1])
        for k in range(f_m, f_m_plus):
            fbank[m - 1, k] = (bin[m + 1] - k) / (bin[m + 1] - bin[m])

    filter_banks = np.dot(pow_frames, fbank.T)
    filter_banks = np.where(filter_banks == 0, np.finfo(float).eps, filter_banks)
    return filter_banks


def dct(nfilt, filter_banks):
    dctcoef = np.zeros((nfilt, nfilt))  # 40 * 40

    for k in range(nfilt):
        for n in range(nfilt):
            dctcoef[k, n] = np.cos((2 * n + 1) * k * np.pi / (2 * nfilt))

    num_ceps = 12
    mfcc = np.transpose(dctcoef.dot(np.transpose(filter_banks)))[:, 1:num_ceps + 1]

    return mfcc

def shortEnergy(yseg_win):
    c1 = np.square(abs(yseg_win))
    energy = np.sum(c1, axis=1)
    energy = np.where(energy == 0, np.finfo(float).eps, energy)
    energy = 10*np.log10(energy)
    return energy

def dynamicFeature(mfcc,En):
    dtm = np.zeros(mfcc.shape)
    for i in range(2,mfcc.shape[0]-2):
        dtm[i] = (mfcc[i+1]-mfcc[i-1])/2

    dtmm = np.zeros(mfcc.shape)
    for i in range(2,mfcc.shape[0]-2):
        dtmm[i] = (dtm[i+1]-dtm[i-1])/2

    En1 = np.zeros(mfcc.shape[0])
    for i in range(2,mfcc.shape[0]-2):
        En1[i] = (En[i+1]-En[i-1])/2

    En2 = np.zeros(mfcc.shape[0])
    for i in range(2,mfcc.shape[0]-2):
        En2[i] = (En1[i+1]-En1[i-1])/2

    En = En.reshape((En1.shape[0],-1))
    En1 = En1.reshape((En1.shape[0],-1))
    En2 = En2.reshape((En2.shape[0],-1))

    mfcc = np.concatenate((mfcc,En,dtm,En1,dtmm,En2), axis=1)

    return mfcc


def transformation(mfcc):
    trans_feature = mfcc
    for item in range(mfcc.shape[1]):
        col = mfcc[:, item]
        trans_feature[:, item] = (col - np.mean(col)) / np.std(col)

    return np.transpose(mfcc)



