import os
import torch
from torch.utils.data import DataLoader, random_split
from tqdm import tqdm
from modelSource import Multi_featureV2_modify as multi_person_feature
from loadDataset import BCICompDataSetForMultiV2
from loadDataset import LoadModelParam

device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')


def testModel(modelPath: str, data_loader):
    model = torch.load(modelPath).to(device)
    for data, label in data_loader:
        data = data.to(device)
        # data = torch.unsqueeze(data.float(), dim=1).to(device)
        label = label.long().to(device)
        output = model(data)
        print(output)
        print(label)
        print((output.argmax(1) == label).sum().item())


def test(rootPath:str,modelPath:str):
    modelPath = modelPath
    fileRootPath = os.path.join(rootPath,"newData")
    fileSubPath = ["SubjectA", "SubjectB"]
    datas = BCICompDataSetForMultiV2.BCICompDatasetForMulti((61, 62, 63), os.path.join(fileRootPath, fileSubPath[0]),
                                   os.path.join(fileRootPath, fileSubPath[1]), False, True)

    batchSize = 64

    dataSetSize = len(datas)

    splitRate = 0.9

    train_size = int(dataSetSize * splitRate)
    val_size = dataSetSize - train_size  # 剩余作为验证集

    # 随机划分训练集和验证集
    train_dataset, val_dataset = random_split(datas, [train_size, val_size])

    # 创建 Subset 实例(不需要)
    # train_dataset = Subset(train_dataset, range(train_size))
    # val_dataset = Subset(val_dataset, range(train_size, train_size + val_size))

    # 创建 DataLoader 实例
    train_loader = DataLoader(train_dataset, batch_size=batchSize, shuffle=True, drop_last=True)
    val_loader = DataLoader(val_dataset, batch_size=batchSize, shuffle=False, drop_last=True)

    testModel(modelPath, val_loader)
"""
tensor([[1., 0.],
        [1., 0.],
        [1., 0.],
        [1., 0.],
        [1., 0.],
        [1., 0.],
        [1., 0.],
        [1., 0.],
        [1., 0.],
        [1., 0.],
        [1., 0.],
        [1., 0.],
        [1., 0.],
        [1., 0.],
        [1., 0.],
        [1., 0.],
        [1., 0.],
        [1., 0.],
        [1., 0.],
        [1., 0.],
        [1., 0.],
        [1., 0.],
        [1., 0.],
        [1., 0.],
        [1., 0.],
        [1., 0.],
        [1., 0.],
        [1., 0.],
        [1., 0.],
        [1., 0.],
        [1., 0.],
        [1., 0.],
        [1., 0.],
        [1., 0.],
        [1., 0.],
        [1., 0.],
        [1., 0.],
        [1., 0.],
        [1., 0.],
        [1., 0.],
        [1., 0.],
        [1., 0.],
        [1., 0.],
        [1., 0.],
        [1., 0.],
        [1., 0.],
        [1., 0.],
        [1., 0.],
        [1., 0.],
        [1., 0.],
        [1., 0.],
        [1., 0.],
        [1., 0.],
        [1., 0.],
        [1., 0.],
        [1., 0.],
        [1., 0.],
        [1., 0.]], device='cuda:0', grad_fn=<SoftmaxBackward0>)
tensor([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1,
        0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0,
        0, 0, 0, 0, 0, 0, 1, 0, 0, 0], device='cuda:0')
52
"""
