import numpy as np
from scipy import io
from sklearn.model_selection import train_test_split
import torch.nn as nn
import torch.optim as optim
import torch
import time
import matplotlib.pyplot as plt
from sklearn import preprocessing
from collections import Counter
import torch.nn.functional as F
from torch.utils.data import TensorDataset, DataLoader
from sklearn.metrics import confusion_matrix
import seaborn as sns

plt.rcParams['font.sans-serif'] = ['KaiTi']
plt.rcParams['axes.unicode_minus'] = False

# 数据加载
mat_N = io.loadmat('../data/data_N.mat')
mat_IR = io.loadmat('../data/data_IR.mat')
mat_B = io.loadmat('../data/data_B.mat')
mat_OR = io.loadmat('../data/data_OR.mat')

data_N = np.transpose(mat_N['N']).reshape(1, -1)
data_B0 = np.transpose(mat_B['B0']).reshape(1, -1)
data_B1 = np.transpose(mat_B['B1']).reshape(1, -1)
data_B2 = np.transpose(mat_B['B2']).reshape(1, -1)
data_IR0 = np.transpose(mat_IR['IR0']).reshape(1, -1)
data_IR1 = np.transpose(mat_IR['IR1']).reshape(1, -1)
data_IR2 = np.transpose(mat_IR['IR2']).reshape(1, -1)
data_OR0 = np.transpose(mat_OR['OR0']).reshape(1, -1)
data_OR1 = np.transpose(mat_OR['OR1']).reshape(1, -1)
data_OR2 = np.transpose(mat_OR['OR2']).reshape(1, -1)

win_len = 2000
step = 500

def data_sample(data, win_len, step):
    i = 0
    data_sample = []
    # 使用RobustScaler对样本进行归一化
    maxabsscaler_scaler = preprocessing.RobustScaler()
    while i < len(data) - win_len:
        data_sample.append(data[i:i + win_len])
        i = i + step
    return maxabsscaler_scaler.fit_transform(data_sample)

data_N_sample = np.array(data_sample(data_N[0], win_len, step))
data_B0_sample = np.array(data_sample(data_B0[0], win_len, step))
data_B1_sample = np.array(data_sample(data_B1[0], win_len, step))
data_B2_sample = np.array(data_sample(data_B2[0], win_len, step))
data_IR0_sample = np.array(data_sample(data_IR0[0], win_len, step))
data_IR1_sample = np.array(data_sample(data_IR1[0], win_len, step))
data_IR2_sample = np.array(data_sample(data_IR2[0], win_len, step))
data_OR0_sample = np.array(data_sample(data_OR0[0], win_len, step))
data_OR1_sample = np.array(data_sample(data_OR1[0], win_len, step))
data_OR2_sample = np.array(data_sample(data_OR2[0], win_len, step))

classes = ('N', 'B0', 'B1', 'B2', 'IR0', 'IR1', 'IR2', 'OR0', 'OR1', 'OR2')

N_label = [0 for i in range(data_N_sample.shape[0])]
B0_label = [1 for i in range(data_B0_sample.shape[0])]
B1_label = [2 for i in range(data_B1_sample.shape[0])]
B2_label = [3 for i in range(data_B2_sample.shape[0])]
IR0_label = [4 for i in range(data_IR0_sample.shape[0])]
IR1_label = [5 for i in range(data_IR1_sample.shape[0])]
IR2_label = [6 for i in range(data_IR2_sample.shape[0])]
OR0_label = [7 for i in range(data_OR0_sample.shape[0])]
OR1_label = [8 for i in range(data_OR1_sample.shape[0])]
OR2_label = [9 for i in range(data_OR2_sample.shape[0])]

x_data = np.vstack((data_N_sample, data_B0_sample, data_B1_sample, data_B2_sample,
                    data_IR0_sample, data_IR1_sample, data_IR2_sample,
                    data_OR0_sample, data_OR1_sample, data_OR2_sample))
y_data = N_label + B0_label + B1_label + B2_label + IR0_label + IR1_label + IR2_label + OR0_label + OR1_label + OR2_label

x_data = np.array(x_data)
y_data = np.array(y_data)

# 将一维信号 reshape 成 2D
def reshape_to_2d(data):
    samples, features = data.shape
    height = 128
    width = features // height
    reshaped_data = data[:, :height * width].reshape(-1, 1, height, width)
    return reshaped_data

# 划分训练测试集
x_train, x_test, y_train, y_test = train_test_split(x_data, y_data, test_size=0.3, random_state=42, shuffle=True)

x_train_2d = reshape_to_2d(x_train)
x_test_2d = reshape_to_2d(x_test)

x_train = torch.tensor(x_train_2d, dtype=torch.float32)
y_train = torch.tensor(y_train, dtype=torch.long)
x_test = torch.tensor(x_test_2d, dtype=torch.float32)
y_test = torch.tensor(y_test, dtype=torch.long)

print("训练集数据分布：", Counter(y_train.numpy()))
print("测试集数据分布：", Counter(y_test.numpy()))
print("数据维度:", x_train.shape, y_train.shape, x_test.shape, y_test.shape)

# 使用 DataLoader
batch_size = 64
train_dataset = TensorDataset(x_train, y_train)
test_dataset = TensorDataset(x_test, y_test)

train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
test_loader = DataLoader(test_dataset, batch_size=batch_size, shuffle=False)

class Net2D(nn.Module):
    def __init__(self, in_channels=1, num_classes=10):
        super(Net2D, self).__init__()
        # 第一层卷积层
        self.conv1 = nn.Conv2d(in_channels, out_channels=32, kernel_size=3, padding=1)
        self.pool1 = nn.MaxPool2d(kernel_size=2, stride=2)

        # 第二层卷积层
        self.conv2 = nn.Conv2d(32, 64, kernel_size=3, padding=1)
        self.pool2 = nn.MaxPool2d(kernel_size=2, stride=2)

        # 第三层卷积层
        self.conv3 = nn.Conv2d(64, 128, kernel_size=3, padding=1)
        self.pool3 = nn.MaxPool2d(kernel_size=2, stride=2)

        self.lstm1 = nn.LSTM(input_size=128, hidden_size=64, batch_first=True, bidirectional=False)
        self.lstm2 = nn.LSTM(input_size=64, hidden_size=64, batch_first=True, bidirectional=False)

        # 全连接层
        self.fc1 = nn.Linear(64, 128)
        self.dropout = nn.Dropout(0.5)
        self.fc2 = nn.Linear(128, num_classes)

    def forward(self, x):
        # 第一层卷积 + BN + 激活 + 池化
        x = F.relu(self.conv1(x))
        x = self.pool1(x)
        # 第二层卷积 + BN + 激活 + 池化
        x = F.relu(self.conv2(x))
        x = self.pool2(x)

        # 第三层卷积 + BN + 激活 + 池化
        x = F.relu(self.conv3(x))
        x = self.pool3(x)

        # 全局平均池化
        batch_size, channes, h, w = x.size()
        x = x.permute(0, 2, 3, 1)
        x = x.reshape(batch_size, -1, 128)
        x, (hn, cn) = self.lstm1(x)
        x, (hn, cn) = self.lstm2(x)
        x = hn[-1]

        # 全连接层 + Dropout
        x = F.relu(self.fc1(x))
        x = self.dropout(x)
        x = self.fc2(x)  # 输出分类结果

        return x

device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
net = Net2D(num_classes=10).to(device)

criterion = nn.CrossEntropyLoss()
# optimizer = optim.Adam(net.parameters(), lr=0.001)
optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9)

def evaluate(model, loader):
    model.eval()
    correct = 0
    total = 0
    with torch.no_grad():
        for inputs, labels in loader:
            inputs, labels = inputs.to(device), labels.to(device)
            outputs = model(inputs)
            _, predicted = torch.max(outputs.data, 1)
            total += labels.size(0)
            correct += (predicted == labels).sum().item()
    acc = 100.0 * correct / total
    return acc

start = time.time()
num_epochs = 40
Accuracy_train = []
Accuracy_test = []
Loss_train = []

for epoch in range(num_epochs):
    net.train()
    running_loss = 0.0
    for inputs, labels in train_loader:
        inputs, labels = inputs.to(device), labels.to(device)
        optimizer.zero_grad()
        outputs = net(inputs)
        loss = criterion(outputs, labels)
        loss.backward()
        optimizer.step()
        running_loss += loss.item() * inputs.size(0)
    epoch_loss = running_loss / len(train_loader.dataset)
    Loss_train.append(epoch_loss)
    train_acc = evaluate(net, train_loader)
    Accuracy_train.append(train_acc)
    test_acc = evaluate(net, test_loader)
    Accuracy_test.append(test_acc)

    print(f"Epoch [{epoch+1}/{num_epochs}]: Loss={epoch_loss:.4f}, Train Acc={train_acc:.2f}%, Test Acc={test_acc:.2f}%")

print('Total time = %2dm:%2ds' % ((time.time() - start) // 60, (time.time() - start) % 60))
print('Finished Training')

# 最终在测试集上的表现
final_test_acc = evaluate(net, test_loader)
print('Final Test Accuracy: %.2f %%' % final_test_acc)
all_preds = []
all_labels = []
# 各类别的准确率
net.eval()
correct_pred = {classname: 0 for classname in classes}
total_pred = {classname: 0 for classname in classes}
with torch.no_grad():
    for inputs, labels in test_loader:
        inputs, labels = inputs.to(device), labels.to(device)
        outputs = net(inputs)
        _, predicted = torch.max(outputs, 1)
        all_preds.extend(predicted.cpu().numpy())
        all_labels.extend(labels.cpu().numpy())
        for label, prediction in zip(labels, predicted):
            if label == prediction:
                correct_pred[classes[label]] += 1
            total_pred[classes[label]] += 1

for classname, correct_count in correct_pred.items():
    accuracy = 100 * float(correct_count) / total_pred[classname]
    print("Accuracy for class {:5s} is: {:.1f} %".format(classname, accuracy))
# 生成混淆矩阵
cm = confusion_matrix(all_labels, all_preds)
print("Confusion Matrix:")
print(cm)

epochs_Accuracy = list(range(num_epochs))
plt.figure(figsize=(10, 5))
plt.ylabel('准确率 (%)', fontsize=15)
plt.xlabel('迭代次数', fontsize=15)
plt.plot(epochs_Accuracy, Accuracy_train, '-', label='Train')
plt.plot(epochs_Accuracy, Accuracy_test, '--', label='Test')
plt.legend(loc='lower right')
plt.savefig('./train_test_acc.png', dpi=300, bbox_inches='tight')
plt.close()

plt.figure(figsize=(10, 5))
plt.ylabel('损失', fontsize=15)
plt.xlabel('迭代次数', fontsize=15)
plt.plot(epochs_Accuracy, Loss_train, '-', label='Train')
plt.legend(loc='upper right')
plt.savefig('./train_loss.png', dpi=300, bbox_inches='tight')
plt.close()

# 绘制混淆矩阵，不使用 plt.show()
plt.figure(figsize=(8, 6))
sns.heatmap(cm, annot=True, fmt='d', cmap='Blues', xticklabels=classes, yticklabels=classes)
plt.xlabel('Predicted')
plt.ylabel('True')
plt.title('Confusion Matrix')

# 将图像保存到文件夹（例如当前目录下的results文件夹中）
plt.savefig('./confusion_matrix.png', dpi=300, bbox_inches='tight')
plt.close()  # 关闭图表以释放内存
