import time
import torch
import torch.nn as nn
import torch.optim as optim
from sklearn.svm import NuSVC
from sklearn.preprocessing import StandardScaler
import torch.nn.functional as F
from torch.utils.data import DataLoader
import numpy as np
import scipy.io as sio
import matplotlib.pyplot as plt
from torch.utils.data import Dataset
from sklearn.metrics import accuracy_score, classification_report

torch.cuda.empty_cache()


def convert_to_one_hot(Y, C):
    Y = Y.astype(int)  # 确保Y是整数类型
    # Y = Y.reshape(-1, 1)
    one_hot = np.zeros((Y.shape[0], C), dtype=int)
    for i in range(Y.shape[0]):
        one_hot[i][Y[i]] = 1
    return one_hot


def convert_from_one_hot(one_hot):
    # 使用argmax函数找到每个独热编码向量中值为1的位置的索引
    original_labels = np.argmax(one_hot, axis=1)
    return original_labels


def svm_fc(x, y):
    # 训练用
    x_cpu = x.cpu()
    x_numpy = x_cpu.detach().numpy()
    x_reshape = x_numpy.reshape(x.size(0), -1)
    scaler_1 = StandardScaler()
    x_scaled = scaler_1.fit_transform(x_reshape)
    y = y.cpu()
    y_reshape = convert_from_one_hot(y)

    svm_classifier_1 = NuSVC(nu=0.06, kernel='linear', decision_function_shape='ovr',
                             class_weight={0: 0.25, 1: 1, 2: 1,
                                           3: 1, 4: 1})
    svm_classifier_1.fit(x_scaled, y_reshape)

    y_predicted = svm_classifier_1.predict(x_scaled)
    tensor = torch.from_numpy(y_predicted)
    tensor = tensor.float()
    tensor = tensor.to(DEVICE)

    return tensor


DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# 加载数据
data = sio.loadmat('ugodata-process-to-use.mat')
emgData = data['emg']
emgLabel = np.transpose(data['label'])
print(f'emglabel', emgLabel.shape)
data = emgData
label = emgLabel
label = label.astype(int)
N = emgData.shape[0]


class EMGDataset(Dataset):
    def __init__(self, data, labels):
        self.data = data
        self.labels = labels

    def __len__(self):
        return len(self.labels)

    def __getitem__(self, idx):
        return self.data[idx], self.labels[idx]


assert data.shape[0] == label.shape[0], "两个数组的第一个维度长度必须相同"

# 生成随机排列索引
indices = np.random.permutation(data.shape[0])

# 使用排列索引来打乱data1和data2
data = data[indices]
label = label[indices]

label = convert_to_one_hot(label, 5)
N = data.shape[0]
num_train = round(N * 0.8)
X_train = torch.tensor(data[0:num_train]).float()
Y_train = torch.tensor(label[0:num_train]).float()
X_test = torch.tensor(data[num_train:N]).float()
Y_test = torch.tensor(label[num_train:N]).float()
Z_test = torch.tensor(emgLabel[num_train:N]).float()
X_train = X_train.permute(0, 2, 1)
X_test = X_test.permute(0, 2, 1)

print("X_train shape: " + str(X_train.shape))
print("Y_train shape: " + str(Y_train.shape))
print("X_test shape: " + str(X_test.shape))
print("Y_test shape: " + str(Y_test.shape))

train_dataset = EMGDataset(X_train, Y_train)
test_dataset = EMGDataset(X_test, Y_test)

train_loader = DataLoader(train_dataset, batch_size=32, shuffle=True)
test_loader = DataLoader(test_dataset, batch_size=16, shuffle=False)


# 定义神经网络模型
class CNN(nn.Module):
    def __init__(self):
        super(CNN, self).__init__()
        self.conv11 = nn.Conv1d(4, 16, kernel_size=2, padding=1)
        self.conv12 = nn.Conv1d(16, 32, 3, padding=1)
        self.conv13 = nn.Conv1d(16, 64, 3, padding=1)
        self.pool11 = nn.MaxPool1d(2, 2)
        self.pool12 = nn.MaxPool1d(2, 2)

        # 第二个卷积通道
        self.conv21 = nn.Conv1d(4, 32, 2, padding=2)
        self.conv22 = nn.Conv1d(32, 64, 2, padding=1)
        self.conv23 = nn.Conv1d(32, 128, 3, padding=1)
        self.pool21 = nn.MaxPool1d(2, 2)
        self.pool22 = nn.MaxPool1d(3, 3)

        # 第三个卷积通道
        self.conv31 = nn.Conv1d(4, 16, 2, padding=1)
        self.conv32 = nn.Conv1d(16, 32, 3, padding=1)
        self.conv33 = nn.Conv1d(16, 64, 3, padding=1)
        self.pool31 = nn.AvgPool1d(2, 2)
        self.pool32 = nn.AvgPool1d(3, 3)

        self.flatten = nn.Flatten()
        self.dropout1 = nn.Dropout(0.4)
        self.fc1 = nn.Linear(47968, 16384)
        self.dropout2 = nn.Dropout(0.4)
        self.fc2 = nn.Linear(16384, 4096)
        self.fc3 = nn.Linear(4096, 512)
        self.fc4 = nn.Linear(512, 5)

    def forward(self, x1, x2, x3, x4):
        # x = x.view(-1, 256 * 2 * 1)
        x1 = F.relu(self.conv11(x1))
        # x1 = F.relu(self.conv12(x1))
        x1 = self.pool11(x1)
        x1 = F.relu(self.conv13(x1))
        x1 = self.pool12(x1)

        x2 = F.relu(self.conv21(x2))
        # x2 = F.relu(self.conv22(x2))
        x2 = self.pool21(x2)
        x2 = F.relu(self.conv23(x2))
        x2 = self.pool22(x2)

        x3 = F.relu(self.conv31(x3))
        # x3 = F.relu(self.conv32(x3))
        x3 = self.pool31(x3)
        x3 = F.relu(self.conv33(x3))
        x3 = self.pool32(x3)

        x0 = torch.cat((x1.view(x1.size(0), -1), x2.view(x2.size(0), -1)), dim=1)
        x = torch.cat((x0.view(x0.size(0), -1), x3.view(x3.size(0), -1)), dim=1)
        x = self.pool11(x)
        x = self.dropout1(x)
        x = self.flatten(x)
        x = F.relu(self.fc1(x))
        x = self.dropout2(x)
        x = F.relu(self.fc2(x))
        x = self.fc3(x)
        x5 = x
        x = self.fc4(x)
        return x, x5


# 创建模型实例
model = (CNN().to(DEVICE))

# 定义损失函数和优化器
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=0.001)

# 训练模型
start = time.time()
epochs = 100
train_losses = []
test_accuracies = []
test_accuracies_svm = []
for epoch in range(epochs):
    model.train()
    for batch_idx, (data_batch, label_batch) in enumerate(train_loader):
        data_batch, label_batch = data_batch.to(DEVICE), label_batch.to(DEVICE)
        optimizer.zero_grad()
        outputs, _ = model(data_batch, data_batch, data_batch, label_batch.float())
        # CNN
        print(outputs.shape)
        print(label_batch.shape)
        loss = criterion(outputs, label_batch)
        loss.backward()
        optimizer.step()
        train_losses.append(loss.item())
        if (batch_idx + 1) % 1 == 0:
            print(
                '------Epoch [{}/{}], Batch [{}/{}], Loss: {:.4f}------'.format(epoch + 1, epochs, batch_idx + 1,
                                                                                len(train_loader),
                                                                                loss.item()))
            # 测试模型
            model.eval()
            with torch.no_grad():
                for data_batch, label_batch in test_loader:
                    data_batch, label_batch = data_batch.to(DEVICE), label_batch.to(DEVICE)
                    print(label_batch)
                    _, svm_feature = model(data_batch, data_batch, data_batch, label_batch.float())
                    # SVM进行分类
                    label_batch_cpu = label_batch.cpu()
                    svm_predicted = svm_fc(svm_feature, label_batch)
                    # 为了进行对比进行的如下操作
                    svm_predicted_cpu = svm_predicted.cpu()
                    label_svm = convert_from_one_hot(label_batch_cpu)
                    print(label_svm)
                    accuracy_svm = accuracy_score(label_svm, svm_predicted_cpu.numpy())
                    test_accuracies_svm.append(accuracy_svm)

                    print("Accuracy-SVM:", accuracy_svm)
                    print(classification_report(label_svm, svm_predicted_cpu))
                    data_batch_cpu = []
                    data_batch_numpy = []
            # model.train()

end = time.time()
print("Training time:", end - start)
torch.jit.save(torch.jit.script(model), 'model.pt')
integrated_data = {'loss': train_losses, 'acc': test_accuracies_svm}
sio.savemat('pic.mat', integrated_data)
# 绘制 loss 和准确率图表
plt.figure(figsize=(15, 10))
plt.subplot(2, 2, 1)
plt.plot(train_losses, label='Train Loss')
plt.xlabel('Epoch')
plt.ylabel('Loss')
plt.title('Training Loss')
plt.legend()

plt.subplot(2, 2, 2)
plt.plot(test_accuracies, label='Test Accuracy')
plt.xlabel('Epoch')
plt.ylabel('Accuracy')
plt.title('Test Accuracy')
plt.legend()

plt.subplot(2, 2, 3)
plt.plot(test_accuracies_svm, label='SVM Accuracy')
plt.xlabel('Epoch')
plt.ylabel('Accuracy')
plt.title('SVM Test Accuracy')
plt.legend()

plt.show()
