import time
import torch
import torch.nn as nn
import torch.optim as optim
import torch.optim.lr_scheduler as lr
import torch.nn.functional as F
from torch.utils.data import DataLoader, TensorDataset
import numpy as np
import scipy.io as sio
import matplotlib.pyplot as plt

def convert_to_one_hot(Y, C):
    Y = np.eye(C,dtype=int)[Y.reshape(-1)].T
    return Y
DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# 加载数据
data = sio.loadmat('data/ugodataprocess.mat')
emgData = data['emg']
emgLabel = data['label']
# 随机打乱数据和标签
N = emgData.shape[0]
index = np.random.permutation(N)
data = emgData[index, :]
label = emgLabel[index]

# 对数据升维,标签one-hot
data = np.expand_dims(data, axis=2)
label = convert_to_one_hot(label,13).T

# 划分数据集
N = data.shape[0]
num_train = round(N * 0.8)
X_train = torch.tensor(data[0:num_train]).float()
Y_train = torch.tensor(label[0:num_train]).float()
X_test = torch.tensor(data[num_train:N]).float()
Y_test = torch.tensor(label[num_train:N]).float()

X_train = X_train.unsqueeze(1)  # 在第二维度上添加一个维度，使其成为四维数据
X_test = X_test.unsqueeze(1)
Y_train = Y_train.unsqueeze(1)  # 在第二维度上添加一个维度，使其成为四维数据
Y_test = Y_test.unsqueeze(1)

# 将输入数据移动到 GPU 上
X_train = X_train.to(DEVICE)
Y_train = Y_train.to(DEVICE)
X_test = X_test.to(DEVICE)
Y_test = Y_test.to(DEVICE)

print("X_train shape: " + str(X_train.shape))
print("Y_train shape: " + str(Y_train.shape))
print("X_test shape: " + str(X_test.shape))
print("Y_test shape: " + str(Y_test.shape))


# 定义神经网络模型
class CNN(nn.Module):
    def __init__(self):
        super(CNN, self).__init__()
        self.conv1 = nn.Conv2d(1, 32, kernel_size=2, padding=1)
        #self.relu1 = nn.ReLU()
        self.conv2 = nn.Conv2d(32, 64, kernel_size=3, padding=1)
        #self.relu2 = nn.ReLU()
        self.pool1 = nn.MaxPool2d((2,1), (2,1))
        self.conv3 = nn.Conv2d(64, 128, kernel_size=3, padding=1)
        #self.relu3 = nn.ReLU()
        self.pool2 = nn.MaxPool2d((2,1), (2,1))
        self.conv4 = nn.Conv2d(128, 256, kernel_size=3, padding=1)  # 添加第四个卷积层
        #self.relu4 = nn.ReLU()
        self.pool3 = nn.MaxPool2d(2, 2)  # 添加第三个池化层
        self.flatten = nn.Flatten()
        self.dropout1 = nn.Dropout(0.25)
        self.fc1 = nn.Linear(512, 1280)
        self.dropout2 = nn.Dropout(0.25)
        self.fc2 = nn.Linear(1280, 512)
        self.fc3 = nn.Linear(512,13)
        #self.fc4 = nn.Linear(128,13)

    def forward(self, x):
        x = F.relu(self.conv1(x))

        x = F.relu(self.conv2(x))
        x = self.pool1(x)
        x = self.dropout1(x)
        x = F.relu(self.conv3(x))

        x = F.relu(self.conv4(x))
        x = self.pool3(x)  # 请注意这里使用的是第二个池化层
        #x = x.view(-1, 256 * 2 * 1)
        x = self.dropout1(x)
        x = self.flatten(x)

        x = F.relu(self.fc1(x))
        x = self.dropout2(x)
        x = F.relu(self.fc2(x))
        x = self.fc3(x)
        #x = self.fc4(x)
        return x


# 创建模型实例
model = CNN().to(DEVICE)

# 定义损失函数和优化器
criterion = nn.CrossEntropyLoss()
optimizer = optim.AdamW(model.parameters(), lr=0.001)
#scheduler = lr.ExponentialLR(optimizer, gamma=0.3)
# 转换标签为long型
Y_train = torch.argmax(Y_train, dim=2).squeeze().long()
Y_test = torch.argmax(Y_test, dim=2).squeeze().long()


# 训练模型
start = time.time()
epochs = 2000
train_losses = []
test_accuracies = []
for epoch in range(epochs):
    model.train()
    optimizer.zero_grad()
    outputs = model(X_train)
    loss = criterion(outputs, Y_train)
    loss.backward()
    optimizer.step()
    #scheduler.step()
    train_losses.append(loss.item())

    if (epoch + 1) % 1 == 0:
        print('Epoch [{}/{}], Loss: {:.4f}'.format(epoch + 1, epochs, loss.item()))
        # 测试模型
        # model.eval()
        with torch.no_grad():
            outputs = model(X_test)
            _, predicted = torch.max(outputs, 1)
            total = Y_test.size(0)
            correct = (predicted == Y_test).sum().item()

            print('Test Accuracy: {:.2f}%'.format(100 * correct / total))
            test_accuracies.append(100 * correct / total)



end = time.time()
print("Training time:", end - start)



# 绘制 loss 和准确率图表
plt.figure(figsize=(10, 5))
plt.subplot(1, 2, 1)
plt.plot(train_losses, label='Train Loss')
plt.xlabel('Epoch')
plt.ylabel('Loss')
plt.title('Training Loss')
plt.legend()

plt.subplot(1, 2, 2)
plt.plot(test_accuracies, label='Test Accuracy')
plt.xlabel('Epoch')
plt.ylabel('Accuracy')
plt.title('Test Accuracy')
plt.legend()

plt.show()