from torch.utils.data import Subset
from dataset import DeapDataset
from model import CNNForEEG
from torch.utils.data import random_split, DataLoader
import torch.nn as nn
import torch.optim as optim
import numpy as np
import torch
from torch.optim.lr_scheduler import StepLR

# dataset = DeapDataset('C:/Users/john/Desktop/learn-eeg/plv_matrix')
# train_ratio = 0.8
# test_ratio = 1 - train_ratio
# total_size = len(dataset)

# train_size = int(train_ratio * total_size)
# test_size = total_size - train_size

# generator = torch.Generator().manual_seed(42)
# train_dataset, test_dataset = random_split(
#     dataset,
#     [train_size, test_size],
#     generator=generator
# )

# all_indices = np.arange(4600 * 32)
# train_idx, test_idx = train_test_split(
#     all_indices, test_size=0.2, random_state=42)

# train_dataset = Subset(full_dataset, train_idx)
# test_dataset = Subset(full_dataset, test_idx)


train_dataset = DeapDataset(
    'C:/Users/john/Desktop/learn-eeg/plv_matrix_global', 1, 26)
test_dataset = DeapDataset(
    'C:/Users/john/Desktop/learn-eeg/plv_matrix_global', 26, 33)

train_loader = DataLoader(train_dataset, batch_size=256, shuffle=True)
test_loader = DataLoader(test_dataset, batch_size=256, shuffle=False)


device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print(device)
model = CNNForEEG().to(device)
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=0.001)
scheduler = StepLR(
    optimizer,
    step_size=10,   # 每 10 个 epoch 调整一次
    gamma=0.8       # 学习率乘以 0.8
)


num_epochs = 50

best_acc = 0
train_loss_list = []
test_loss_list = []

train_acc_list = []
test_acc_list = []

for epoch in range(num_epochs):
    # 训练阶段
    model.train()
    running_loss = 0.0
    correct = 0
    total = 0

    for inputs, labels in train_loader:
        inputs = inputs.to(device)
        labels = labels.to(device)

        optimizer.zero_grad()
        outputs = model(inputs.float())
        loss = criterion(outputs, labels.long())
        loss.backward()
        optimizer.step()
        scheduler.step()
        b = loss.item()
        running_loss += b
        print("running_loss", b)
        _, predicted = outputs.max(1)
        total += labels.size(0)
        a = predicted.eq(labels).sum().item()
        correct += a
        print("correct", a)

    train_loss = running_loss / len(train_loader)
    train_acc = 100. * correct / total

    train_loss_list.append(train_loss)
    train_acc_list.append(train_acc)

    # 验证阶段
    model.eval()
    test_running_loss = 0.0
    test_correct = 0
    test_total = 0

    with torch.no_grad():
        for inputs, labels in test_loader:
            inputs = inputs.to(device)
            labels = labels.to(device)

            outputs = model(inputs.float())
            loss = criterion(outputs, labels.long())

            test_running_loss += loss.item()
            _, predicted = outputs.max(1)
            test_total += labels.size(0)
            test_correct += predicted.eq(labels).sum().item()

    test_loss = test_running_loss / len(test_loader)
    test_acc = 100. * test_correct / test_total

    test_acc_list.append(test_acc)
    test_loss_list.append(test_loss)

    # 保存最佳模型
    if test_acc > best_acc:
        best_acc = test_acc
        torch.save(model.state_dict(), 'best_model.pth')

    print(f'Epoch [{epoch+1}/{num_epochs}]')
    print(f'Train Loss: {train_loss:.4f} | Train Acc: {train_acc:.2f}%')
    print(f'Test Loss: {test_loss:.4f} | Test Acc: {test_acc:.2f}%')
    print('-' * 50)

print(f'Best Test Accuracy: {best_acc:.2f}%')


np.save('train_loss_list', np.array(train_loss_list))
np.save('train_acc_list', np.array(train_acc_list))

np.save('test_loss_list', np.array(test_loss_list))
np.save('test_acc_list', np.array(test_acc_list))
