import torch
import scipy.io as sio
from dataprocess import channel_reconstruct, data_process_to_50hz, data_process_with_sliding_window, processed_data_join
from net_and_train import convert_to_one_hot
import numpy as np
from torch.utils.data import Dataset
from torch.utils.data import DataLoader
import torch.nn.functional as F
import torch.nn as nn
import torch.optim as optim
import time
import matplotlib.pyplot as plt

data = sio.loadmat('ugodata.mat')
emg1 = data['emg']
print(f'emgshape', emg1.shape)
label1 = data['label']
print(f'labelshape', label1.shape)

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

data_preprocess = channel_reconstruct(data, 114514)  # 先填上
emg = data_preprocess['emg']
label = data_preprocess['label']

data_process_to_50hz(emg, 101)

spectrograms, label = data_process_with_sliding_window(emg, label, 40, 30, 'wavelet')

spec = np.array(spectrograms)

data_join = processed_data_join(emg, label)

emg_data = data_join['emg']
label_data = data_join['label']

label = label_data.astype(int)
N = emg_data.shape[0]


class EMGDataset(Dataset):
    def __init__(self, data, labels):
        self.data = data
        self.labels = labels

    def __len__(self):
        return len(self.labels)

    def __getitem__(self, idx):
        return self.data[idx], self.labels[idx]


class CNN(nn.Module):
    def __init__(self):
        super(CNN, self).__init__()
        # 第一个卷积通道
        self.conv11 = nn.Conv1d(4, 16, kernel_size=2, padding=1)
        self.conv12 = nn.Conv1d(16, 32, 3, padding=1)
        self.conv13 = nn.Conv1d(32, 64, 3, padding=1)
        self.pool11 = nn.MaxPool1d(2, 2)
        self.pool12 = nn.MaxPool1d(2, 2)

        # 第二个卷积通道
        self.conv21 = nn.Conv1d(4, 32, 2, padding=2)
        self.conv22 = nn.Conv1d(32, 64, 2, padding=1)
        self.conv23 = nn.Conv1d(64, 128, 3, padding=1)
        self.pool21 = nn.MaxPool1d(2, 2)
        self.pool22 = nn.MaxPool1d(3, 3)

        # 第三个卷积通道
        self.conv31 = nn.Conv1d(4, 16, 2, padding=1)
        self.conv32 = nn.Conv1d(16, 32, 3, padding=1)
        self.conv33 = nn.Conv1d(32, 64, 3, padding=1)
        self.pool31 = nn.AvgPool1d(2, 2)
        self.pool32 = nn.AvgPool1d(3, 3)

        self.flatten = nn.Flatten()
        self.dropout1 = nn.Dropout(0.4)
        self.fc1 = nn.Linear(13312, 4096)
        self.dropout2 = nn.Dropout(0.4)
        self.fc2 = nn.Linear(4096, 1024)
        self.fc3 = nn.Linear(1024, 256)
        self.fc4 = nn.Linear(256, 7)

    def forward(self, x, x0, x1, x2, x3):
        x1 = F.relu(self.conv11(x1))
        x1 = F.relu(self.conv12(x1))
        x1 = self.pool11(x1)
        x1 = F.relu(self.conv13(x1))
        x1 = self.pool12(x1)

        x2 = F.relu(self.conv21(x2))
        x2 = F.relu(self.conv22(x2))
        x2 = self.pool21(x2)
        x2 = F.relu(self.conv23(x2))
        x2 = self.pool22(x2)

        x3 = F.relu(self.conv31(x3))
        x3 = F.relu(self.conv32(x3))
        x3 = self.pool31(x3)
        x3 = F.relu(self.conv33(x3))
        x3 = self.pool32(x3)

        x0 = torch.cat(x1.view(x1.size(0), -1), x2.view(x2.size(0), -1))
        x = torch.cat(x0.view(x0.size(0), -1), x3.view(x3.size(0), -1))
        x = self.dropout1(x)
        x = self.flatten(x)
        x = F.relu(self.fc1(x))
        x = self.dropout2(x)
        x = F.relu(self.fc2(x))
        x = self.fc3(x)
        x = self.fc4(x)

        return x


label = convert_to_one_hot(label, 7).T
num_train = round(N * 0.8)
X_train = torch.tensor(data[0:num_train]).float()
Y_train = torch.tensor(label[0:num_train]).float()
X_test = torch.tensor(data[num_train:N]).float()
Y_test = torch.tensor(label[num_train:N]).float()
# X_train = X_train.permute(0, 2, 1)
# X_test = X_test.permute(0, 2, 1)

print("X_train shape: " + str(X_train.shape))
print("Y_train shape: " + str(Y_train.shape))
print("X_test shape: " + str(X_test.shape))
print("Y_test shape: " + str(Y_test.shape))

train_dataset = EMGDataset(X_train, Y_train)
test_dataset = EMGDataset(X_test, Y_test)

train_loader = DataLoader(train_dataset, batch_size=512, shuffle=True)
test_loader = DataLoader(test_dataset, batch_size=512, shuffle=True)

model = (CNN().to(device))
# 定义损失函数和优化器
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=0.001)

# 训练模型
start = time.time()
epochs = 100
train_losses = []
test_accuracies = []
for epoch in range(epochs):
    model.train()
    for batch_idx, (data_batch, label_batch) in enumerate(train_loader):
        data_batch, label_batch = data_batch.to(device), label_batch.to(device)
        #  print(data_batch.shape)
        #  print(label_batch.shape)
        #  label_batch = label_batch.squeeze().long()
        optimizer.zero_grad()
        outputs = model(data_batch)
        loss = criterion(outputs, label_batch)
        loss.backward()
        optimizer.step()
        train_losses.append(loss.item())
        if (batch_idx + 1) % 1 == 0:
            print(
                'Epoch [{}/{}], Batch [{}/{}], Loss: {:.4f}'.format(epoch + 1, epochs, batch_idx + 1, len(train_loader),
                                                                    loss.item()))
            # 测试模型
            model.eval()
            correct = 0
            total = 0
            with torch.no_grad():
                for data_batch, label_batch in test_loader:
                    data_batch, label_batch = data_batch.to(device), label_batch.to(device)
                    outputs = model(data_batch)
                    # print(outputs)
                    predicted = torch.argmax(outputs, dim=1)
                    # print('--------------predicted--------------')
                    # print(predicted.shape)
                    _, total = torch.max(label_batch, dim=1)
                    # print('--------------total--------------')
                    # print(total)
                    # print(total.shape)
                    # print('----------------------------')
                    match = (predicted == total).int()
                    # print('--------------match--------------')
                    # print(match)
                    correct = match.sum().item()
                    total = label_batch.size(0)
                    # print(correct)
                    print('Test Accuracy for batch {}: {:.2f}%'.format(batch_idx + 1, 100 * correct / total))
                    test_accuracies.append(100 * correct / total)
            # model.train()
end = time.time()
print("Training time:", end - start)

# 绘制 loss 和准确率图表
plt.figure(figsize=(10, 5))
plt.subplot(1, 2, 1)
plt.plot(train_losses, label='Train Loss')
plt.xlabel('Epoch')
plt.ylabel('Loss')
plt.title('Training Loss')
plt.legend()

plt.subplot(1, 2, 2)
plt.plot(test_accuracies, label='Test Accuracy')
plt.xlabel('Epoch')
plt.ylabel('Accuracy')
plt.title('Test Accuracy')
plt.legend()

plt.show()
