#!/usr/bin/python
# -*- encoding:utf-8 -*-
import os
import wave
import pickle
import librosa
import numpy as np
from utils.common import eps, split_in_seqs, split_multi_channels


def extract_fbank(pcm_data, sample_rate, num_frame_len_fft, num_frame_shift_fft, num_mel_bands, power):
    '''提取fbank特征

    Args:
        pcm_data: wav数据
        sample_rate: 采样率
        num_frame_len_fft：帧长对应的点数
        num_frame_shift_fft：帧移对应的点数
        num_mel_bands: fbank维度
        power: 1 for energy, 2 for power

    Notes:
        与ASR不同，不需要提前进行分帧操作，框架librosa的函数默认将帧长等于fft点数
        因此不需要额外的补零操作
        Learn from: https://cpjku.github.io/dcase_task2/#audio-data-pre-processing
        Understand: https://stackoverflow.com/questions/3694918/how-to-extract-frequency-associated-with-fft-values-in-python
    '''
    log_spec = False
    mfcc = False
    num_mfccs = 40
    stft = librosa.stft(pcm_data+eps, n_fft=num_frame_len_fft, hop_length=num_frame_shift_fft,          # 快速fft
                        win_length=None, window='hann', center=True, pad_mode='reflect')
    stft = np.abs(stft)                                                                                 # 取出幅度谱，shape = [num_frame_len_fft/2 + 1, frame]
    if log_spec:
        stft = np.log(stft + eps)
    else:
        freqs = librosa.core.fft_frequencies(sr=sample_rate, n_fft=num_frame_len_fft)                   # 类似于np.fft.fftfreq，shape = [num_frame_len_fft/2 + 1,]
        stft = librosa.perceptual_weighting(stft**power, freqs+eps, ref=1.0, amin=1e-10, top_db=99.0)   # 额外的步骤，应该和人耳感知有关，shape = [num_frame_len_fft/2 + 1,]

    mel_spectrum = librosa.feature.melspectrogram(S=stft, sr=sample_rate, n_mels=num_mel_bands)         # 提取fbank特征，shape = [num_mel_bands, frame]

    if mfcc:
        mel_spectrum = librosa.feature.mfcc(S=librosa.power_to_db(mel_spectrum), n_mfcc=num_mfccs)
    return mel_spectrum

    # # Exponent for the magnitude spectrogram
    # spec, n_fft = librosa.core.spectrum._spectrogram(y=pcm_data, n_fft=num_frame_len_fft,
    #                                                  hop_length=num_frame_shift_fft, power=power)

    # # 提取fbank特征
    # mel_basis = librosa.filters.mel(sr=sample_rate, n_fft=n_fft, n_mels=num_mel_bands)

    # # 【不懂】为什么这里需要进行点乘
    # return np.log(np.dot(mel_basis, spec) + 1e-8)


def load_audio(filename, mono=True, fs=44100):
    """Load audio file into numpy array
    Supports 24-bit wav-format

    Taken from TUT-SED system: https://github.com/TUT-ARG/DCASE2016-baseline-system-python

    Parameters
    ----------
    filename:  str
        Path to audio file

    mono : bool
        In case of multi-channel audio, channels are averaged into single channel.
        (Default value=True)

    fs : int > 0 [scalar]
        Target sample rate, if input audio does not fulfil this, audio is resampled.
        (Default value=44100)

    Returns
    -------
    audio_data : numpy.ndarray [shape=(signal_length, channel)]
        Audio

    sample_rate : integer
        Sample rate
    """
    file_base, file_extension = os.path.splitext(filename)
    if file_extension == '.wav':
        _audio_file = wave.open(filename)

        # Audio info
        sample_rate = _audio_file.getframerate()
        sample_width = _audio_file.getsampwidth()
        number_of_channels = _audio_file.getnchannels()
        number_of_frames = _audio_file.getnframes()

        # Read raw bytes
        data = _audio_file.readframes(number_of_frames)
        _audio_file.close()

        # Convert bytes based on sample_width
        num_samples, remainder = divmod(len(data), sample_width * number_of_channels)
        if remainder > 0:
            raise ValueError('The length of data is not a multiple of sample size * number of channels.')
        if sample_width > 4:
            raise ValueError('Sample size cannot be bigger than 4 bytes.')

        if sample_width == 3:
            # 24 bit audio
            a = np.empty((num_samples, number_of_channels, 4), dtype=np.uint8)
            raw_bytes = np.frombuffer(data, dtype=np.uint8)
            a[:, :, :sample_width] = raw_bytes.reshape(-1, number_of_channels, sample_width)
            a[:, :, sample_width:] = (a[:, :, sample_width - 1:sample_width] >> 7) * 255
            audio_data = a.view('<i4').reshape(a.shape[:-1]).T
        else:
            # 8 bit samples are stored as unsigned ints; others as signed ints.
            dt_char = 'u' if sample_width == 1 else 'i'
            a = np.frombuffer(data, dtype='<%s%d' % (dt_char, sample_width))
            audio_data = a.reshape(-1, number_of_channels).T

        # 对于多通道数据，强制平均单通道数据
        if mono:
            # Down-mix audio
            audio_data = np.mean(audio_data, axis=0)

        # Convert int values into float
        audio_data = audio_data / float(2 ** (sample_width * 8 - 1) + 1)

        # Resample
        if fs != sample_rate:
            audio_data = librosa.core.resample(audio_data, sample_rate, fs)
            sample_rate = fs

        return audio_data
    return None


def extract_delta(X):
    '''
    X shape: [N, 1, time, f_dim]
    '''
    delta = np.zeros(shape=X.shape)
    accelerate = np.zeros(shape=X.shape)
    for i in range(len(X)):
        delta[i, 0, :, :] = librosa.feature.delta(X[i, 0, :, :], width=9, order=1, axis=-1)
        accelerate[i, 0, :, :] = librosa.feature.delta(X[i, 0, :, :], width=9, order=2, axis=-1)
    X = np.concatenate([X, delta, accelerate], axis=1)
    return X


def load_train_data(feat_dir, chunk_size, nb_ch, is_delta):
    with open(os.path.join(feat_dir, 'train.pickle'), 'rb') as f:
        X_train = pickle.load(f)
        X_dev = pickle.load(f)
        Y_train = pickle.load(f)
        Y_dev = pickle.load(f)

        # 切块
        X_train, Y_train, X_dev, Y_dev = preprocess_data(X_train, Y_train, X_dev, Y_dev, chunk_size, nb_ch)

        # 对验证集提取delta特征
        if is_delta:
            X_dev = extract_delta(X_dev)

        return X_train, Y_train, X_dev, Y_dev


def load_test_data(feat_dir):
    with open(os.path.join(feat_dir, 'test.pickle'), 'rb') as f:
        X_test = pickle.load(f)
        Y_test = pickle.load(f)
        return X_test, Y_test


def preprocess_data(_X, _Y, X_dev, Y_dev, _chunk_size, _nb_ch):
    _X = split_in_seqs(_X, _chunk_size)
    _Y = split_in_seqs(_Y, _chunk_size)

    X_dev = split_in_seqs(X_dev, _chunk_size)
    Y_dev = split_in_seqs(Y_dev, _chunk_size)

    _X = split_multi_channels(_X, _nb_ch)
    X_dev = split_multi_channels(X_dev, _nb_ch)

    return _X, _Y, X_dev, Y_dev
