import os
import torch
from torch.utils.data import DataLoader, random_split
from tqdm import tqdm
from modelSource import Multi_featureV2 as multi_person_feature
from loadDataset import BCICompDataSetForMultiV2
# from modelSource import Multi_featureV2 as multi_person_feature
import plotext as plt
from loadDataset import LoadModelParam
import datetime


def train(rootPath: str, modelPath: str = ""):
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    # model = multi_person_feature(1, 2)
    # modelPath=r"/home/HModel/test_99_53.458333.pth"
    model = LoadModelParam.LoadModelParam("" if modelPath == "" else modelPath,
                                          multi_person_feature.multi_person_feature(1, 2)).GetModel().to(device)
    batchSize = 64
    # 损失函数
    loss_fn = torch.nn.CrossEntropyLoss()
    # 优化器
    learning_rate = 0.01
    momentom = 0.9

    splitRate = 0.9
    optim = torch.optim.SGD(model.parameters(), learning_rate, momentum=momentom)

    if not os.path.exists(os.path.join(rootPath, "model")):
        os.mkdir(os.path.join(rootPath, "model"))
    modelSavePath = os.path.join(rootPath, "model")
    if not os.path.exists(os.path.join(rootPath, "log")):
        os.mkdir(os.path.join(rootPath, "log"))
    logPath = os.path.join(rootPath, "log")
    datas = BCICompDataSetForMultiV2.BCICompDatasetForMulti((61, 62, 63), r"/home/MyDataSet/SubjectA",
                                                            r"/home/MyDataSet/SubjectB", False, True)

    dataSetSize = len(datas)

    train_size = int(dataSetSize * splitRate)
    val_size = dataSetSize - train_size  # 剩余作为验证集

    # 随机划分训练集和验证集
    train_dataset, val_dataset = random_split(datas, [train_size, val_size])

    # 创建 Subset 实例(不需要)
    # train_dataset = Subset(train_dataset, range(train_size))
    # val_dataset = Subset(val_dataset, range(train_size, train_size + val_size))

    # 创建 DataLoader 实例
    train_loader = DataLoader(train_dataset, batch_size=batchSize, shuffle=True)
    val_loader = DataLoader(val_dataset, batch_size=batchSize, shuffle=False)

    # 训练的轮数
    epoch = 100

    recordFile = open(logPath + str(datetime.datetime.today()) + "record.txt", "w")

    lossArray = [0] * epoch
    accuracyArray = [0.] * epoch
    xAxis = range(epoch)

    for i in tqdm(range(epoch), desc="Training"):
        model.train()
        for data, label in train_loader:
            # data = data.float().reshape(-1, 1, 6, 240).to(device)
            # print(data.size())
            data = torch.unsqueeze(data.float(), dim=1).to(device)
            label = label.long().to(device)
            # print(data)
            output = model(data)
            # print(output.argmax(1))
            # print(label)
            loss = loss_fn(output, label)  # label需要Long类型
            loss.backward()
            optim.step()
        total_test_loss = 0
        total_accuracy = 0
        model.eval()
        totalCount = 0
        with torch.no_grad():
            for data, label in val_loader:
                # data = data.float().reshape(-1, 1, 6, 240).to(device)
                data = torch.unsqueeze(data.float(), dim=1).to(device)
                label = label.long().to(device)
                output = model(data)
                loss = loss_fn(output, label)
                # total_train_step+=1
                total_test_loss += loss
                # print(loss)
                accuracy = (output.argmax(1) == label).sum().item()
                total_accuracy += accuracy
                totalCount += batchSize
        # print("Epoch：{}，整体测试集上的Loss:{}".format(i, total_test_loss))
        # print("Epoch：{}，整体测试集上的Accuracy:{}".format(i, total_accuracy / totalCount))
        lossArray[i] = total_test_loss
        accuracyArray[i] = total_accuracy / totalCount
        recordFile.write("Epoch：{}，整体测试集上的Loss:{}\n".format(i, total_test_loss))
        recordFile.write("Epoch：{}，整体测试集上的Accuracy:{}\n".format(i, total_accuracy / totalCount))
        recordFile.write("-------------------------------------\n")
        torch.save(model, os.path.join(modelSavePath, "test_{}_{:5f}.pth".format(i, total_accuracy / totalCount)))
        # total_test_step += 1
    recordFile.close()
    plt.plot(xAxis, lossArray, label='Loss Line')
    # 绘制第二条折线
    plt.plot(xAxis, accuracyArray, label='Accuracy Line')
    # 添加标题和轴标签
    plt.title('Validation Loss')
    plt.xlabel('X-axis')
    plt.ylabel('Y-axis')
    # 显示图表
    plt.show()
