import numpy as np
from scipy import io
from sklearn.model_selection import train_test_split
import tensorflow as tf
from tensorflow.keras import layers, models
import time
import matplotlib.pyplot as plt
from sklearn import preprocessing
from collections import Counter
from sklearn.metrics import confusion_matrix
import seaborn as sns

plt.rcParams['font.sans-serif'] = ['KaiTi']
plt.rcParams['axes.unicode_minus'] = False

# 数据加载
mat_N = io.loadmat('../data/data_N.mat')
mat_IR = io.loadmat('../data/data_IR.mat')
mat_B = io.loadmat('../data/data_B.mat')
mat_OR = io.loadmat('../data/data_OR.mat')

data_N = np.transpose(mat_N['N']).reshape(1, -1)
data_B0 = np.transpose(mat_B['B0']).reshape(1, -1)
data_B1 = np.transpose(mat_B['B1']).reshape(1, -1)
data_B2 = np.transpose(mat_B['B2']).reshape(1, -1)
data_IR0 = np.transpose(mat_IR['IR0']).reshape(1, -1)
data_IR1 = np.transpose(mat_IR['IR1']).reshape(1, -1)
data_IR2 = np.transpose(mat_IR['IR2']).reshape(1, -1)
data_OR0 = np.transpose(mat_OR['OR0']).reshape(1, -1)
data_OR1 = np.transpose(mat_OR['OR1']).reshape(1, -1)
data_OR2 = np.transpose(mat_OR['OR2']).reshape(1, -1)

win_len = 2000
step = 500

def data_sample(data, win_len, step):
    i = 0
    data_sample = []
    # 使用RobustScaler对样本进行归一化
    maxabsscaler_scaler = preprocessing.RobustScaler()
    while i < len(data) - win_len:
        data_sample.append(data[i:i + win_len])
        i = i + step
    return maxabsscaler_scaler.fit_transform(data_sample)

data_N_sample = np.array(data_sample(data_N[0], win_len, step))
data_B0_sample = np.array(data_sample(data_B0[0], win_len, step))
data_B1_sample = np.array(data_sample(data_B1[0], win_len, step))
data_B2_sample = np.array(data_sample(data_B2[0], win_len, step))
data_IR0_sample = np.array(data_sample(data_IR0[0], win_len, step))
data_IR1_sample = np.array(data_sample(data_IR1[0], win_len, step))
data_IR2_sample = np.array(data_sample(data_IR2[0], win_len, step))
data_OR0_sample = np.array(data_sample(data_OR0[0], win_len, step))
data_OR1_sample = np.array(data_sample(data_OR1[0], win_len, step))
data_OR2_sample = np.array(data_sample(data_OR2[0], win_len, step))


classes = ('N', 'B0', 'B1', 'B2', 'IR0', 'IR1', 'IR2', 'OR0', 'OR1', 'OR2')

N_label = [0 for i in range(data_N_sample.shape[0])]
B0_label = [1 for i in range(data_B0_sample.shape[0])]
B1_label = [2 for i in range(data_B1_sample.shape[0])]
B2_label = [3 for i in range(data_B2_sample.shape[0])]
IR0_label = [4 for i in range(data_IR0_sample.shape[0])]
IR1_label = [5 for i in range(data_IR1_sample.shape[0])]
IR2_label = [6 for i in range(data_IR2_sample.shape[0])]
OR0_label = [7 for i in range(data_OR0_sample.shape[0])]
OR1_label = [8 for i in range(data_OR1_sample.shape[0])]
OR2_label = [9 for i in range(data_OR2_sample.shape[0])]

x_data = np.vstack((data_N_sample, data_B0_sample, data_B1_sample, data_B2_sample,
                    data_IR0_sample, data_IR1_sample, data_IR2_sample,
                    data_OR0_sample, data_OR1_sample, data_OR2_sample))
y_data = N_label + B0_label + B1_label + B2_label + IR0_label + IR1_label + IR2_label + OR0_label + OR1_label + OR2_label

x_data = np.array(x_data)
y_data = np.array(y_data)


# 将一维信号 reshape 成 2D
def reshape_to_2d(data):
    samples, features = data.shape
    height = 128
    width = features // height
    reshaped_data = data[:, :height * width].reshape(-1, height, width, 1)
    return reshaped_data

# 划分训练测试集
x_train, x_test, y_train, y_test = train_test_split(x_data, y_data, test_size=0.3, random_state=42, shuffle=True)

x_train_2d = reshape_to_2d(x_train)
x_test_2d = reshape_to_2d(x_test)


# 定义残差模块
def residual_block(x, filters, kernel_size=3):
    """
    创建一个残差模块
    """
    shortcut = x
    x = layers.Conv2D(filters, kernel_size, padding='same', activation='relu')(x)
    x = layers.Conv2D(filters, kernel_size, padding='same')(x)
    x = layers.Add()([shortcut, x])
    return layers.Activation('relu')(x)

# 定义 CNN + ResNet 模型
def create_resnet_cnn(input_shape, num_classes):
    """
    创建 CNN + ResNet 模型
    :param input_shape: 输入图像的形状 (height, width, channels)
    :param num_classes: 类别数量
    :return: keras.Model
    """
    inputs = layers.Input(shape=input_shape)

    # Initial Conv Layer
    x = layers.Conv2D(64, (7, 7), padding='same', activation='relu')(inputs)
    x = layers.MaxPooling2D((2, 2))(x)

    # Residual Blocks
    x = residual_block(x, 64)
    x = residual_block(x, 64)

    # Additional Conv Layer
    x = layers.Conv2D(128, (3, 3), padding='same', activation='relu')(x)
    x = layers.MaxPooling2D((2, 2))(x)

    # Flatten and Dense Layers
    x = layers.GlobalAveragePooling2D()(x)
    x = layers.Dense(128, activation='relu')(x)
    x = layers.Dropout(0.5)(x)
    outputs = layers.Dense(num_classes, activation='softmax')(x)

    # Create Model
    model = models.Model(inputs, outputs)

    # Compile the model
    model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'])

    return model

# 创建模型
input_shape = (128, 15, 1)
num_classes = 10
model = create_resnet_cnn(input_shape, num_classes)
model.summary()

# 训练模型
start = time.time()
num_epochs = 40
batch_size = 64

history = model.fit(x_train_2d, y_train, epochs=num_epochs, batch_size=batch_size, validation_split=0.3)

print('Total time = %2dm:%2ds' % ((time.time() - start) // 60, (time.time() - start) % 60))
print('Finished Training')

# 评估模型
test_loss, test_acc = model.evaluate(x_test_2d, y_test)
print('Final Test Accuracy: %.2f %%' % (test_acc * 100))

# 各类别的准确率
y_pred = model.predict(x_test_2d)
y_pred_classes = np.argmax(y_pred, axis=1)

correct_pred = {classname: 0 for classname in classes}
total_pred = {classname: 0 for classname in classes}

for label, prediction in zip(y_test, y_pred_classes):
    if label == prediction:
        correct_pred[classes[label]] += 1
    total_pred[classes[label]] += 1

for classname, correct_count in correct_pred.items():
    accuracy = 100 * float(correct_count) / total_pred[classname]
    print("Accuracy for class {:5s} is: {:.1f} %".format(classname, accuracy))

# 生成混淆矩阵
cm = confusion_matrix(y_test, y_pred_classes)
print("Confusion Matrix:")
print(cm)

# 绘制准确率和损失曲线
epochs_Accuracy = list(range(num_epochs))
plt.figure(figsize=(10, 5))
plt.ylabel('准确率 (%)', fontsize=15)
plt.xlabel('迭代次数', fontsize=15)
plt.plot(epochs_Accuracy, history.history['accuracy'], '-', label='Train')
plt.plot(epochs_Accuracy, history.history['val_accuracy'], '--', label='Test')
plt.legend(loc='lower right')
plt.savefig('./train_test_acc.png', dpi=300, bbox_inches='tight')
plt.close()

plt.figure(figsize=(10, 5))
plt.ylabel('损失', fontsize=15)
plt.xlabel('迭代次数', fontsize=15)
plt.plot(epochs_Accuracy, history.history['loss'], '-', label='Train')
plt.legend(loc='upper right')
plt.savefig('./train_loss.png', dpi=300, bbox_inches='tight')
plt.close()

# 绘制混淆矩阵
plt.figure(figsize=(8, 6))
sns.heatmap(cm, annot=True, fmt='d', cmap='Blues', xticklabels=classes, yticklabels=classes)
plt.xlabel('Predicted')
plt.ylabel('True')
plt.title('Confusion Matrix')
plt.savefig('./confusion_matrix.png', dpi=300, bbox_inches='tight')
plt.close()
