import numpy as np
import os
import keras
from keras.models import Sequential
from keras.layers import Dense
from keras.callbacks import ReduceLROnPlateau
import pickle
import sklearn.metrics as sm

# 加载数据集 和 标签[并返回标签集的处理结果]
def create_datasets():
    wavs=[]
    labels=[] # labels 和 testlabels 这里面存的值都是对应标签的下标，下标对应的名字在labsInd中
    testwavs=[]
    testlabels=[]

    labsInd=[]      ## 训练集标签的名字   0：seven   1：stop
    testlabsInd=[]  ## 测试集标签的名字   0：seven   1：stop



    path = r'D:/Pycharm/trainvoice5/train/abnormal_treble/'
    files = os.listdir(path)
    for i in files:
        print(i)
        waveData = get_wav_mfcc(path + i)  # 把每一条音频转换为mfcc形式
        wavs.append(waveData)
        if ("1" in labsInd) == False:
            labsInd.append("1")
        labels.append(labsInd.index("1"))


    path=r'D:/Pycharm/trainvoice5/train/normal_bass/'
    files = os.listdir(path)
    for i in files:
        print(i)
        waveData = get_wav_mfcc(path+i)
        wavs.append(waveData)
        if ("2" in labsInd)==False:
            labsInd.append("2")
        labels.append(labsInd.index("2"))


    path=r'D:/Pycharm/trainvoice5/train/normal_bass_inside_the_carriage/'
    files = os.listdir(path)
    for i in files:
        print(i)
        waveData = get_wav_mfcc(path+i)
        wavs.append(waveData)
        if ("3" in labsInd)==False:
            labsInd.append("3")
        labels.append(labsInd.index("3"))

    path = r'D:/Pycharm/trainvoice5/train/normal_treble/'
    files = os.listdir(path)
    for i in files:
        print(i)
        waveData = get_wav_mfcc(path + i)
        wavs.append(waveData)
        if ("4" in labsInd) == False:
            labsInd.append("4")
        labels.append(labsInd.index("4"))

    path = r'D:/Pycharm/trainvoice5/train/normal_treble_inside_the_carriage/'
    files = os.listdir(path)
    for i in files:
        print(i)
        waveData = get_wav_mfcc(path + i)
        wavs.append(waveData)
        if ("5" in labsInd) == False:
            labsInd.append("5")
        labels.append(labsInd.index("5"))

    path = r'D:/Pycharm/trainvoice5/train/resonant_bass_inside_the_carriage/'
    files = os.listdir(path)
    for i in files:
        print(i)
        waveData = get_wav_mfcc(path + i)
        wavs.append(waveData)
        if ("6" in labsInd) == False:
            labsInd.append("6")
        labels.append(labsInd.index("6"))

    path = r'D:/Pycharm/trainvoice5/train/resonant_treble_inside_the_carriage/'
    files = os.listdir(path)
    for i in files:
        print(i)
        waveData = get_wav_mfcc(path + i)
        wavs.append(waveData)
        if ("7" in labsInd) == False:
            labsInd.append("7")
        labels.append(labsInd.index("7"))

########################################################################################




    path=r'D:/Pycharm/trainvoice_dnntest/abnormal_treble/'
    files = os.listdir(path)
    for i in files:
        print(i)
        waveData = get_wav_mfcc(path+i)
        testwavs.append(waveData)
        if ("1" in testlabsInd)==False:
            testlabsInd.append("1")
        testlabels.append(testlabsInd.index("1"))


    path=r'D:/Pycharm/trainvoice_dnntest/normal_bass/'
    files = os.listdir(path)
    for i in files:
        print(i)
        waveData = get_wav_mfcc(path+i)
        testwavs.append(waveData)
        if ("2" in testlabsInd)==False:
            testlabsInd.append("2")
        testlabels.append(testlabsInd.index("2"))


    path = r'D:/Pycharm/trainvoice_dnntest/normal_bass_inside_the_carriage/'
    files = os.listdir(path)
    for i in files:
        # print(i)
        waveData = get_wav_mfcc(path + i)
        testwavs.append(waveData)
        if ("3" in testlabsInd) == False:
            testlabsInd.append("3")
        testlabels.append(testlabsInd.index("3"))

    path = r'D:/Pycharm/trainvoice_dnntest/normal_treble/'
    files = os.listdir(path)
    for i in files:
        # print(i)
        waveData = get_wav_mfcc(path + i)
        testwavs.append(waveData)
        if ("4" in testlabsInd) == False:
            testlabsInd.append("4")
        testlabels.append(testlabsInd.index("4"))

    path = r'D:/Pycharm/trainvoice_dnntest/normal_treble_inside_the_carriage/'
    files = os.listdir(path)
    for i in files:
        # print(i)
        waveData = get_wav_mfcc(path + i)
        testwavs.append(waveData)
        if ("5" in testlabsInd) == False:
            testlabsInd.append("5")
        testlabels.append(testlabsInd.index("5"))

    path = r'D:/Pycharm/trainvoice_dnntest/resonant_bass_inside_the_carriage/'
    files = os.listdir(path)
    for i in files:
        # print(i)
        waveData = get_wav_mfcc(path + i)
        testwavs.append(waveData)
        if ("6" in testlabsInd) == False:
            testlabsInd.append("6")
        testlabels.append(testlabsInd.index("6"))

    path = r'D:/Pycharm/trainvoice_dnntest/resonant_treble_inside_the_carriage/'
    files = os.listdir(path)
    for i in files:
        # print(i)
        waveData = get_wav_mfcc(path + i)
        testwavs.append(waveData)
        if ("7" in testlabsInd) == False:
            testlabsInd.append("7")
        testlabels.append(testlabsInd.index("7"))

    wavs=np.array(wavs)
    labels=np.array(labels)
    testwavs=np.array(testwavs)
    testlabels=np.array(testlabels)
    return (wavs,labels),(testwavs,testlabels),(labsInd,testlabsInd)

#############################################################################################################

def get_wav_mfcc(wav_path):
    import librosa
    wav, sr = librosa.load(wav_path, sr=16000)
    normalized_waveform  = wav/np.max(np.abs(wav))
    data = list(normalized_waveform)
    l.append(len(data))
    while len(data)>300000:
        del data[len(data)-1]  #删除最后一个
        del data[0]    #删除第一个
    while len(data)<300000:
        data.append(0)
    data=np.array(data)
    # 平方之后，开平方，取正数，值的范围在  0-1  之间
    data = data ** 2
    data = data ** 0.5
    return data

########################################################################################################################################################

if __name__ == '__main__':
    l=[]
    (wavs,labels),(testwavs,testlabels),(labsInd,testlabsInd) = create_datasets()
    # print("训练数据集大小:", wavs.shape)
    # print("训练标签集大小:", labels.shape)
    # print("测试数据集大小:", testwavs.shape)
    # print("测试标签集大小:", testlabels.shape)


    #分配独热码
    labels = keras.utils.to_categorical(labels, 7)
    testlabels = keras.utils.to_categorical(testlabels, 7)

    # 构建模型
    model = Sequential()
    model.add(Dense(128, activation='relu',input_shape=(300000,)))
    model.add(Dense(32, activation='relu'))
    model.add(Dense(7, activation='softmax'))

    #打印一下模型的摘要信息（非必要）
    model.summary()

    # [编译模型] 配置模型，损失函数采用交叉熵，优化采用Adadelta，将识别准确率作为模型评估
    model.compile(loss=keras.losses.categorical_crossentropy, optimizer=keras.optimizers.Adadelta(), metrics=['accuracy'])

    # 训练模型，将 ReduceLROnPlateau 回调函数传递给 callbacks 参数。validation_data为验证集
    model.fit(wavs, labels, batch_size=512, epochs=650, verbose=1, validation_data=(testwavs, testlabels))


    # model.save('D:/graduation design/DNN/model.h5') # 保存


    # 开始评估模型效果 # verbose=0为不输出日志信息
    score = model.evaluate(testwavs, testlabels, verbose=1)
    print('Test loss:', score[0])
    print('Test accuracy:', score[1]) # 准确度


    # 输出分类报告
    prd_test_y = model.predict(testwavs)
    print('===============================================================')
    print(sm.classification_report(np.argmax(testlabels, axis=1), np.argmax(prd_test_y, axis=1)))
    print('===============================================================')


#热力矩阵图
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.metrics import confusion_matrix, classification_report
import numpy as np

# 假设 testlabels 是真实标签，而 model.predict(testwavs) 是模型预测的标签
true_labels = np.argmax(testlabels, axis=1)
predicted_labels = np.argmax(model.predict(testwavs), axis=1)
conf_matrix = confusion_matrix(true_labels, predicted_labels)

# 使用 seaborn 生成热力图
plt.figure(figsize=(10,7))
sns.heatmap(conf_matrix, annot=True, fmt='d')
plt.xlabel('Predicted')
plt.ylabel('True')
plt.show()