import warnings
warnings.filterwarnings('ignore')
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
import tensorflow as tf
tf.compat.v1.logging.set_verbosity(40)

from tensorflow.keras.losses import SparseCategoricalCrossentropy
from sklearn.model_selection import train_test_split
import numpy as np
import librosa
import librosa.display
import matplotlib.pyplot as plt
import random
from tensorflow.keras import optimizers, activations, models
from tensorflow.keras.layers import Dense, Input, Flatten, Dropout, MaxPooling2D, BatchNormalization,Conv2D
from sklearn.metrics import confusion_matrix
import seaborn as sns

def extract_features(path):
    x_data = []
    y_data = []
    path_list = []
    for i, label_name in enumerate(os.listdir(path)):
        label_path = os.path.join(path, label_name)
        for voice_name in os.listdir(label_path):
            voice_path = os.path.join(label_path, voice_name)
            path_list.append(voice_path)

            x, sr = librosa.load(voice_path, sr=None, res_type='kaiser_fast')
            mfccs = np.expand_dims(librosa.feature.mfcc(x, sr, n_mfcc=100).T, axis=2)
            x_data.append(mfccs)

            y_data.append(i)
    return np.array(x_data), np.array(y_data), np.array(path_list)


def model_cnn(input_shape):
    nclass = 3
    inp = Input(shape=input_shape)
    norm_inp = BatchNormalization()(inp)
    img_1 = Conv2D(8, kernel_size=2, activation=activations.relu)(norm_inp)
    img_1 = Conv2D(8, kernel_size=2, activation=activations.relu)(img_1)
    img_1 = MaxPooling2D(pool_size=(2, 2))(img_1)
    img_1 = Dropout(rate=0.2)(img_1)
    img_1 = Conv2D(16, kernel_size=3, activation=activations.relu)(img_1)
    img_1 = Conv2D(16, kernel_size=3, activation=activations.relu)(img_1)
    img_1 = MaxPooling2D(pool_size=(2, 2))(img_1)
    img_1 = Dropout(rate=0.2)(img_1)
    img_1 = Conv2D(32, kernel_size=3, activation=activations.relu)(img_1)
    img_1 = MaxPooling2D(pool_size=(2, 2))(img_1)
    img_1 = Dropout(rate=0.2)(img_1)
    img_1 = Flatten()(img_1)

    dense_1 = BatchNormalization()(Dense(128, activation=activations.relu)(img_1))
    dense_1 = BatchNormalization()(Dense(128, activation=activations.relu)(dense_1))
    dense_1 = Dense(nclass, activation=activations.softmax)(dense_1)

    model = models.Model(inputs=inp, outputs=dense_1)

    return model


if __name__ == '__main__':
    path = r'E:\DATA\direction_data_3rd\语音命令数据'
    classes_name = os.listdir(path)
    x_data, y_data, path_list = extract_features(path)
    print(x_data.shape)
    x_train, x_test, y_train, y_test, path_train, path_test = train_test_split(x_data, y_data, path_list,
                                                                               train_size=0.8)
    x_val, x_test, y_val, y_test, path_val, path_test = train_test_split(x_test, y_test, path_test, train_size=0.5)

    model = model_cnn(x_data.shape[1:])

    opt = optimizers.Adam()

    model.compile(optimizer=opt, loss=SparseCategoricalCrossentropy(), metrics=['accuracy'])
    model.summary()

    history = model.fit(x_train, y_train, batch_size=16, validation_data=(x_val, y_val), epochs=30, shuffle=True)

    plt.plot(history.history['loss'], 'ro-', label='train')
    plt.plot(history.history['val_loss'], 'go--', label='val')
    plt.legend()
    plt.title('loss')
    plt.show()

    plt.plot(history.history['accuracy'], 'ro-', label='train')
    plt.plot(history.history['val_accuracy'], 'go--', label='val')
    plt.legend()
    plt.title('accuracy')
    plt.show()

    score = model.evaluate(x_test, y_test, verbose=0)
    print(f'测试集损失值:{score[0]:.3f}')
    print(f'测试集准确率:{score[1]:.3f}')

    plt.pie([score[1], 1 - score[1]], [0, 0.02], ['true', 'false'], autopct='%1.1f%%')
    plt.show()

    pred_test = np.argmax(model.predict(x_test),1)
    confusion_matrix_test = confusion_matrix(y_test,pred_test)
    sns.heatmap(confusion_matrix_test,annot=True)
    plt.show()

    plt.figure(figsize=(15, 6))
    for i in range(9):
        plt.subplot(3, 3, i + 1)
        r = random.randint(0, len(x_test) - 1)
        pred_test = tf.argmax(model.predict(x_test[r:r + 1]), axis=1)
        pred_label = classes_name[pred_test[0]]
        true_label = classes_name[y_test[r]]
        x, sr = librosa.load(path_test[r])
        librosa.display.waveplot(x, sr)
        plt.title(pred_label, c='k' if pred_label == true_label else 'r')
    plt.show()