import warnings
warnings.filterwarnings('ignore')
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
import tensorflow as tf
tf.compat.v1.logging.set_verbosity(40)
import os
import numpy as np
import pandas as pd
from scipy import signal
from scipy.io import wavfile
from sklearn.model_selection import train_test_split
from tensorflow.keras import optimizers, losses, activations, models
from tensorflow.keras.layers import Convolution2D, Dense, Input, Flatten, Dropout, MaxPooling2D, BatchNormalization

def log_specgram(audio, sample_rate, window_size=20,
                 step_size=10, eps=1e-10):
    nperseg = int(round(window_size * sample_rate / 1e3))
    noverlap = int(round(step_size * sample_rate / 1e3))
    freqs, times, spec = signal.spectrogram(audio,
                                            fs=sample_rate,
                                            window='hann',
                                            nperseg=nperseg,
                                            noverlap=noverlap,
                                            detrend=False)
    return freqs, times, np.log(spec.T.astype(np.float32) + eps)

def pad_audio(samples):
    if len(samples) >= L:
        return samples
    else:
        return np.pad(samples, pad_width=(L - len(samples), 0), mode='constant', constant_values=(0, 0))

def chop_audio(samples, L=16000, num=20):
    for i in range(num):
        beg = np.random.randint(0, len(samples) - L)
        yield samples[beg: beg + L]

def label_transform(labels):
    nlabels = []
    for label in labels:
        if label == '_background_noise_':
            nlabels.append('silence')
        elif label not in legal_labels:
            nlabels.append('unknown')
        else:
            nlabels.append(label)
    return pd.get_dummies(pd.Series(nlabels))

def model_cnn(x_train, y_train):
    input_shape = (99, 81, 1)
    nclass = 3
    inp = Input(shape=input_shape)
    norm_inp = BatchNormalization()(inp)
    img_1 = Convolution2D(8, kernel_size=2, activation=activations.relu)(norm_inp)
    img_1 = Convolution2D(8, kernel_size=2, activation=activations.relu)(img_1)
    img_1 = MaxPooling2D(pool_size=(2, 2))(img_1)
    img_1 = Dropout(rate=0.2)(img_1)
    img_1 = Convolution2D(16, kernel_size=3, activation=activations.relu)(img_1)
    img_1 = Convolution2D(16, kernel_size=3, activation=activations.relu)(img_1)
    img_1 = MaxPooling2D(pool_size=(2, 2))(img_1)
    img_1 = Dropout(rate=0.2)(img_1)
    img_1 = Convolution2D(32, kernel_size=3, activation=activations.relu)(img_1)
    img_1 = MaxPooling2D(pool_size=(2, 2))(img_1)
    img_1 = Dropout(rate=0.2)(img_1)
    img_1 = Flatten()(img_1)

    dense_1 = BatchNormalization()(Dense(128, activation=activations.relu)(img_1))
    dense_1 = BatchNormalization()(Dense(128, activation=activations.relu)(dense_1))
    dense_1 = Dense(nclass, activation=activations.softmax)(dense_1)

    model = models.Model(inputs=inp, outputs=dense_1)
    opt = optimizers.Adam()

    model.compile(optimizer=opt, loss=losses.binary_crossentropy,metrics=['accuracy'])
    model.summary()

    x_train, x_valid, y_train, y_valid = train_test_split(x_train, y_train, test_size=0.2, random_state=2017)
    x_test, x_valid, y_test, y_valid = train_test_split(x_valid, y_valid, test_size=0.5, random_state=2017)
    model.fit(x_train, y_train, batch_size = 16, validation_data=(x_valid, y_valid), epochs=3, shuffle=True,verbose=2)

    score = model.evaluate(x_test,y_test,verbose=2)
    print('测试集损失值:',score[0])
    print('测试集准确率:',score[1])

    return model


if __name__ == "__main__":
    L = 16000
    legal_labels = 'left right stop'.split()

    data_path = r'E:\DATA\direction_data_3rd\语音命令数据'

    new_sample_rate = 8000
    y_data = []
    x_data = []

    for label in os.listdir(data_path):
        label_path = os.path.join(data_path,label)
        for voice_name in os.listdir(label_path):
            sample_rate, samples = wavfile.read(os.path.join(label_path,voice_name))
            samples = samples[:,0]
            samples = pad_audio(samples)
            if len(samples) > 16000:
                n_samples = chop_audio(samples)
            else:
                n_samples = [samples]
            for samples in n_samples:
                resampled = signal.resample(samples, int(new_sample_rate / sample_rate * samples.shape[0]))
                _, _, specgram = log_specgram(resampled, sample_rate=new_sample_rate)
                y_data.append(label)
                x_data.append(np.array(specgram))

    x_data = np.array(x_data)
    x_data = x_data.reshape(tuple(list(x_data.shape) + [1]))
    y_data = label_transform(y_data)
    label_index = y_data.columns.values
    y_data = y_data.values
    y_data = np.array(y_data)

    model = model_cnn(x_data, y_data)