﻿from loadDataset import LoadBCICompDataSet
from other.Butterworth_filter import *
from modelSource import Multi_feature
import torch
import os

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

ScreenLetter = ["A", "B", "C", "D", "E", "F",
                "G", "H", "I", "J", "K", "L",
                "M", "N", "O", "P", "Q", "R",
                "S", "T", "U", "V", "W", "X",
                "Y", "Z", "1", "2", "3", "4",
                "5", "6", "7", "8", "9", "_"]

# filter=BCIButterworthFilter()
subject_data_A = LoadBCICompDataSet.LoadBCICompDataSet(r"E:\文件\Documents\Python\大创\DataSet\BCI_Comp_III_Wads_2004\Subject_A_Train.mat")
# subject_data_A['Responses']=filter.filtByffilt(subject_data_A.getResponses())

if not os.path.exists("../model"):
    os.mkdir("../model")

# subject_target_At=torch.from_numpy(subject_data_A.alphaConvNdArray())
subject_target_At = torch.Tensor(subject_data_A.alphaTransfNumList())
# 数据的label是需要long类型
subject_target_At = subject_target_At.to(torch.long)
subject_Response_A = subject_data_A.getTargetLetterAvgResponse()
subject_Response_At = torch.from_numpy(subject_Response_A)
subject_Response_At = subject_Response_At.to(torch.float32)

model = Multi_feature.multi_person_feature(1, 36)
model = model.to(device)

# 损失函数
loss_fn = torch.nn.CrossEntropyLoss()
# 优化器
learning_rate = 0.1
optim = torch.optim.SGD(model.parameters(), learning_rate)

# 记录训练的次数
# total_train_step=0
# 测试的次数
total_test_step = 0
# 训练的轮数
epoch = 100

# 这里要的做的应该是在85次循环中，每次找到36个字符的对应的信号？
for i in range(epoch):
    print("----------第{}轮训练开始----------".format(i + 1))
    model.train()
    for e in range(np.size(subject_Response_A, 0)):
        optim.zero_grad()
        data = subject_Response_At[e, 54:54 + 78, 6:6 + 6].reshape(1, 1, 78, 6).transpose(2, 3)
        label = subject_target_At[e].reshape(1)  # torch.Size(1)
        data = data.to(device)
        label = label.to(device)
        # print(data)
        output = model(data)
        # print(output.argmax(1))
        # print(label)
        loss = loss_fn(output, label)  # label需要Long类型
        loss.backward()
        optim.step()
        # total_train_step+=1
        # if total_train_step%100==0:
        #     print("训练次数{}，Loss:{}".format(total_train_step,loss.item()))
        # print(output.argmax(1).item())
        # print(label.item())

    # 测试步骤开始
    total_test_loss = 0
    total_accuray = 0
    model.eval()
    with torch.no_grad():
        for e in range(np.size(subject_Response_A, 0)):
            data = subject_Response_At[e, 54:54 + 78, 6:6 + 6].reshape(1, 1, 78, 6).transpose(2, 3)
            label = subject_target_At[e].reshape(1)
            data = data.to(device)
            label = label.to(device)
            output = model(data)
            loss = loss_fn(output, label)
            # total_train_step+=1
            total_test_loss += loss
            # print(loss)
            accuray = (output.argmax(1) == label).sum()
            total_accuray += accuray
    print("整体测试集上的Loss:{}".format(total_test_loss))
    print("整体测试集上的Accuray:{}".format(total_accuray / np.size(subject_Response_A, 0)))
    total_test_step += 1

    torch.save(model, "./model/test_{}.pth".format(i))
    print("模型已保存")
