import os
import torch
from torch.utils.data import DataLoader, random_split
from tqdm import tqdm
from modelSource import Multi_featureV2_2_modify,SepConv1D
from loadDataset import P3002022DatasetForMulti,P3002022DatasetForSepConv1D
from loadDataset import LoadModelParam
from config import MyConfig

device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')


def testModel(modelPath: str, data_loader):
    model = torch.load(modelPath).to(device)
    model.eval()
    total_accuracy = 0
    totalCount = 0
    with torch.no_grad():
        for data, label in data_loader:
            data = torch.unsqueeze(data.float(), dim=1).to(device)
            # data = data.float().to(device)
            # print(data.shape) #Multi  torch.Size([64, 6, 307]) Sep1D torch.Size([64, 6, 307])
            label = label.float().to(device)
            output = model(data)
            # print(output)
            preds = output.argmax(dim=1)
            labels_indices = label.argmax(dim=1)
            # accuracy = (output.argmax(1) == label).sum().item()
            # accuracy = (preds == labels_indices).sum().item()
            accuracy = (preds == labels_indices).sum().item()
            totalCount += data.size(0)
            # acc[i]=(output.argmax(1) == label).sum().item()/len(label)
            total_accuracy += accuracy
            print(accuracy)
            print(data.size(0))
            print("-------")
            # print(accuracy)
            # print("label长度:{}".format(len(label)))
            # print("正确的个数:{}".format((output.argmax(1) == label).sum().item()))
    print("总体测试正确率:{}".format(total_accuracy / totalCount))


def test(myConfig:MyConfig.MyConfig):
    if not os.path.exists(myConfig.modelDataSetRootPath):
        raise "rootPath doesn't exist"
    if not os.path.exists(myConfig.modelFilePath):
        raise "modelPath doesn't exist"
    modelPath = myConfig.modelFilePath
    fileRootPath = myConfig.modelDataSetRootPath
    # fileSubPath = ["SubjectA", "SubjectB"]
    # datas = P3002022DatasetForMulti.Dataset2022ForMulti((16,19,31), fileRootPath)
    # datas=P3002022DatasetForSepConv1D.Dataset2022ForMulti((16,19,31),fileRootPath)
    datas=P3002022DatasetForMulti.Dataset2022ForMultiWithTargetMachine(myConfig.dataAChannel,
                                                                fileRootPath,
                                                                myConfig.machineDataFile,
                                                                myConfig.dataBChannel,
                                                                myConfig.machineDataLabel,
                                                                myConfig.machineDataTargetWordDict,
                                                                myConfig.machineDataCSVDelimiter
                                                                )
    batchSize = myConfig.batchSize

    # dataSetSize = len(datas)

    # splitRate = 0.9

    # train_size = int(dataSetSize * splitRate)
    # val_size = dataSetSize - train_size  # 剩余作为验证集

    # 随机划分训练集和验证集
    # train_dataset, val_dataset = random_split(datas, [train_size, val_size])

    # 创建 Subset 实例(不需要)
    # train_dataset = Subset(train_dataset, range(train_size))
    # val_dataset = Subset(val_dataset, range(train_size, train_size + val_size))

    # 创建 DataLoader 实例
    # train_loader = DataLoader(train_dataset, batch_size=batchSize, shuffle=True, drop_last=True)
    dataSet = DataLoader(datas, batch_size=batchSize, shuffle=False, drop_last=True)

    testModel(modelPath, dataSet)
