# import numpy as np
# import scipy.io.wavfile as wf
# import python_speech_features as sf
# import sklearn.preprocessing as sp
# import os
# # from keras.models import Sequential
# from keras.models import Model
# from tensorflow.keras.layers import Input, Conv2D, Add, Activation, Flatten, Dense, BatchNormalization, MaxPooling2D
# from tensorflow.keras.optimizers import Adam
#
# # 整理样本
# def search_files(directory):
#     files_dict = {}
#     for cur_dir, sub_dirs, files in os.walk(directory):
#         for file in files:
#             if file.endswith(".wav"):
#                 label = cur_dir.split(os.path.sep)[-1]
#                 if label not in files_dict:
#                     files_dict[label] = []
#                 files_dict[label].append(os.path.join(cur_dir, file))
#     return files_dict
#
# def files_mfc(file_urls):
#     x_data, y_data = [], []
#     for label, urls in file_urls.items():
#         for file in urls:
#             sample_rate, signs = wf.read(file)
#             mfc = sf.mfcc(signs, sample_rate)
#             x_data.append(np.mean(mfc, axis=0))
#             y_data.append(label)
#     return np.array(x_data), y_data
#
# # 读取文件路径
# train_urls = search_files("D:/Pycharm/trainvoice5/train")
# test_urls = search_files("D:/Pycharm/trainvoice5/test")
#
# # 整理数据
# train_x, train_y = files_mfc(train_urls)
# test_x, test_y = files_mfc(test_urls)
#
# # 预处理数据
# input_shape = (13, 1, 1) # MFCC特征数量为13
# train_x = train_x.reshape(train_x.shape[0], *input_shape)
# test_x = test_x.reshape(test_x.shape[0], *input_shape)
#
# # 编码标签
# encoder = sp.LabelEncoder()
# train_y = encoder.fit_transform(train_y)
# test_y = encoder.transform(test_y)
#
# # 构建简化的ResNet模型
# def residual_block(x, filters, kernel_size=(1, 1), strides=(1, 1)):
#     # 主路径
#     main_path = Conv2D(filters=filters, kernel_size=kernel_size, strides=strides, padding='same')(x)
#     main_path = BatchNormalization()(main_path)
#     main_path = Activation('relu')(main_path)
#
#     main_path = Conv2D(filters=filters, kernel_size=kernel_size, padding='same')(main_path)
#     main_path = BatchNormalization()(main_path)
#
#     # 残差连接
#     shortcut = Conv2D(filters=filters, kernel_size=kernel_size, strides=strides, padding='same')(x)
#     shortcut = BatchNormalization()(shortcut)
#
#     # 将主路径和捷径相加
#     main_path = Add()([main_path, shortcut])
#     main_path = Activation('relu')(main_path)
#     return main_path
#
#
# # 输入层
# input_shape = (13, 1, 1)  # MFCC特征数量为13
# inputs = Input(shape=input_shape)
#
# # 第一个残差块
# x = residual_block(inputs, 32)
#
# # 池化层
# x = MaxPooling2D(pool_size=(2, 1))(x)
#
# # 展平层
# x = Flatten()(x)
#
# # 全连接层
# x = Dense(128, activation='relu')(x)
#
# # 输出层
# outputs = Dense(7, activation='softmax')(x)  # 分类数量为7
#
# # 创建模型
# model = Model(inputs=inputs, outputs=outputs)
#
#
#
# # 编译模型
# model.compile(optimizer=Adam(), loss='sparse_categorical_crossentropy', metrics=['accuracy'])
#
# # 训练模型
# model.fit(train_x, train_y, epochs=15, batch_size=16)
#
# #保存模型
# # file = open("D:/graduation design/RESNET/model.pb", "wb")
#
#
# # 预测测试集
# prd_test_y = model.predict(test_x)
# prd_test_y = np.argmax(prd_test_y, axis=1)
#
# # 输出分类报告
# from sklearn.metrics import classification_report
# print(classification_report(test_y, prd_test_y))
#
#
#
# #可视化实验结果
# import matplotlib.pyplot as plt
# import seaborn as sns
# from sklearn.metrics import confusion_matrix
# # 计算混淆矩阵
# cm = confusion_matrix(test_y, prd_test_y)
#
# # 绘制混淆矩阵热力图
# plt.figure(figsize=(10, 7))
# sns.heatmap(cm, annot=True, fmt='d')
# plt.title('Confusion Matrix')
# plt.ylabel('Actual Label')
# plt.xlabel('Predicted Label')
# plt.show()


import numpy as np
import scipy.io.wavfile as wf
import python_speech_features as sf
import sklearn.preprocessing as sp
import os
from keras.models import Model
from keras.layers import Input, Conv2D, Add, Activation, Flatten, Dense, BatchNormalization, MaxPooling2D
from keras.optimizers import Adam
from sklearn.metrics import classification_report, confusion_matrix
import matplotlib.pyplot as plt
import seaborn as sns
import pickle


# 整理样本
def search_files(directory):
    files_dict = {}
    for cur_dir, sub_dirs, files in os.walk(directory):
        for file in files:
            if file.endswith(".wav"):
                label = cur_dir.split(os.path.sep)[-1]
                if label not in files_dict:
                    files_dict[label] = []
                files_dict[label].append(os.path.join(cur_dir, file))
    return files_dict

def files_mfc(file_urls):
    x_data, y_data = [], []
    for label, urls in file_urls.items():
        for file in urls:
            sample_rate, signs = wf.read(file)
            mfc = sf.mfcc(signs, sample_rate)
            x_data.append(np.mean(mfc, axis=0))
            y_data.append(label)
    return np.array(x_data), y_data

# 读取文件路径
train_urls = search_files("D:/Pycharm/trainvoice5/train")
test_urls = search_files("D:/Pycharm/trainvoice5/train")

# 整理数据
train_x, train_y = files_mfc(train_urls)
test_x, test_y = files_mfc(test_urls)

# 预处理数据
input_shape = (13, 1, 1) # MFCC特征数量为13
train_x = train_x.reshape(train_x.shape[0], *input_shape)
test_x = test_x.reshape(test_x.shape[0], *input_shape)

# 编码标签
encoder = sp.LabelEncoder()
train_y = encoder.fit_transform(train_y)
test_y = encoder.transform(test_y)

# 构建简化的ResNet模型
def residual_block(x, filters, kernel_size=(1, 1), strides=(1, 1)):
    # 主路径
    main_path = Conv2D(filters=filters, kernel_size=kernel_size, strides=strides, padding='same')(x)
    main_path = BatchNormalization()(main_path)
    main_path = Activation('relu')(main_path)

    main_path = Conv2D(filters=filters, kernel_size=kernel_size, padding='same')(main_path)
    main_path = BatchNormalization()(main_path)

    # 残差连接
    shortcut = Conv2D(filters=filters, kernel_size=kernel_size, strides=strides, padding='same')(x)
    shortcut = BatchNormalization()(shortcut)

    # 将主路径和捷径相加
    main_path = Add()([main_path, shortcut])
    main_path = Activation('relu')(main_path)
    return main_path


# 输入层
inputs = Input(shape=input_shape)

# 第一个残差块
x = residual_block(inputs, 32)

# 池化层
x = MaxPooling2D(pool_size=(2, 1))(x)

# 展平层
x = Flatten()(x)

# 全连接层
x = Dense(128, activation='relu')(x)

# 输出层
outputs = Dense(7, activation='softmax')(x)  # 分类数量为7

# 创建模型
model = Model(inputs=inputs, outputs=outputs)

# 编译模型
model.compile(optimizer=Adam(), loss='sparse_categorical_crossentropy', metrics=['accuracy'])

# 训练模型
model.fit(train_x, train_y, epochs=20, batch_size=16)

# 保存模型
model.save("D:/graduation design/RESNET/model.pickle")

# 预测测试集
prd_test_y = model.predict(test_x)
prd_test_y = np.argmax(prd_test_y, axis=1)

# 输出分类报告
print(classification_report(test_y, prd_test_y))

# 计算混淆矩阵
cm = confusion_matrix(test_y, prd_test_y)

# 绘制混淆矩阵热力图
plt.figure(figsize=(10, 7))
sns.heatmap(cm, annot=True, fmt='d')
plt.title('Confusion Matrix')
plt.ylabel('Actual Label')
plt.xlabel('Predicted Label')
plt.show()
