import os
import torch
from torch.utils.data import DataLoader, random_split
from tqdm import tqdm
from loadDataset import P3002022DatasetForMulti
from modelSource import Multi_featureV2_modify as multi_person_feature
# from modelSource import Multi_featureV2 as multi_person_feature
import plotext as plt
from loadDataset import LoadModelParam
# import datetime
import pandas as pd
import shutil
import glob


def trainAndValid(rootPath: str, model: torch.nn.Module, epoch: int, batchSize: int, excel: pd.DataFrame,
                  device: torch.device, optim: torch.optim.Optimizer, loss_fn: torch.nn.CrossEntropyLoss,
                  data_loader: list):
    if not os.path.exists(os.path.join(rootPath, "model")):
        os.mkdir(os.path.join(rootPath, "model"))
    modelSavePath = os.path.join(rootPath, "model")
    if not os.path.exists(os.path.join(rootPath, "model/tmp")):
        os.mkdir(os.path.join(rootPath, "model/tmp"))
    modelTmpPath = os.path.join(rootPath, "model/tmp")
    if not os.path.exists(os.path.join(rootPath, "log")):
        os.mkdir(os.path.join(rootPath, "log"))

    if len(os.listdir(modelTmpPath)) != 0:
        for i in glob.glob(os.path.join(modelTmpPath, "*.pth")):
            os.remove(i)

    # recordFile = open("./log/" + str(datetime.datetime.today()) + "record.txt", "w")

    lossArray = [0] * epoch
    accuracyArray = [0.] * epoch
    xAxis = range(epoch)

    for i in tqdm(range(epoch), desc="Training"):
        model.train()
        for data, label in data_loader[0]:
            # data = data.float().reshape(-1, 1, 6, 240).to(device)
            # print(data.size())
            data = torch.unsqueeze(data.float(), dim=1).to(device)
            label = label.long().to(device)
            # print(data)
            output = model(data)
            # print(output.argmax(1))
            # print(label)
            loss = loss_fn(output, label)  # label需要Long类型
            loss.backward()
            optim.step()
        total_test_loss = 0
        total_accuracy = 0
        model.eval()
        totalCount = 0
        with torch.no_grad():
            for data, label in data_loader[1]:
                # data = data.float().reshape(-1, 1, 6, 240).to(device)
                data = torch.unsqueeze(data.float(), dim=1).to(device)
                label = label.long().to(device)
                output = model(data)
                loss = loss_fn(output, label)
                # total_train_step+=1
                total_test_loss += loss
                # print(loss)
                accuracy = (output.argmax(1) == label).sum().item()
                total_accuracy += accuracy
                totalCount += batchSize
        # print("Epoch：{}，整体测试集上的Loss:{}".format(i, total_test_loss))
        # print("Epoch：{}，整体测试集上的Accuracy:{}".format(i, total_accuracy / totalCount))
        lossArray[i] = total_test_loss
        accuracyArray[i] = total_accuracy / totalCount
        # recordFile.write("Epoch：{}，整体测试集上的Loss:{}\n".format(i, total_test_loss))
        # recordFile.write("Epoch：{}，整体测试集上的Accuracy:{}\n".format(i, total_accuracy / totalCount))
        excel[i] = total_accuracy / totalCount
        # recordFile.write("-------------------------------------\n")
        torch.save(model, os.path.join(modelTmpPath, "model_{}.pth".format(i)))
        # total_test_step += 1
    # recordFile.close()
    maxAcc = -1
    for index in range(len(accuracyArray)):
        if (maxAcc < accuracyArray[index]):
            maxAcc = index
    shutil.copy(os.path.join(modelTmpPath, "model_{}.pth".format(maxAcc)), modelSavePath)
    plt.plot(xAxis, lossArray, label='Loss Line')
    # 绘制第二条折线
    plt.plot(xAxis, accuracyArray, label='Accuracy Line')
    # 添加标题和轴标签
    plt.title('Validation Loss')
    plt.xlabel('X-axis')
    plt.ylabel('Y-axis')
    # 显示图表
    plt.show()
    return excel


def train(rootPath: str, modelPath: str = ""):
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    # model = multi_person_feature(1, 2)
    # modelPath=r"/home/HModel/test_99_53.458333.pth"
    fileRootPath: str = os.path.join(rootPath, "newData")
    logFile: str = "train_newData_modify_model.xlsx"
    # fileSubPath: list = ["SubjectA", "SubjectB"]
    model = LoadModelParam.LoadModelParam("" if modelPath == "" else modelPath,
                                          multi_person_feature.multi_person_feature(1, 2)).GetModel().to(device)
    batchSize = 64
    # momentom = 0.9
    splitRate = 0.9
    # 训练的轮数
    epoch = 100
    # 损失函数
    loss_fn = torch.nn.CrossEntropyLoss()

    cols = ["0.1", "0.01", "0.001", "0.0001", "0.00001", "0.000001"]
    momentoms = [0.9, 0.09, 0.009, 0.009, 0.0009, 0.00009]
    useIndex = 1
    # excel: pd.DataFrame = pd.DataFrame(columns=[cols[useIndex]])
    # print(3excel)

    datas = P3002022DatasetForMulti.Dataset2022ForMulti((29, 30, 31), fileRootPath)

    dataSetSize = len(datas)

    train_size = int(dataSetSize * splitRate)
    val_size = dataSetSize - train_size  # 剩余作为验证集

    # 随机划分训练集和验证集
    train_dataset, val_dataset = random_split(datas, [train_size, val_size])

    # 创建 Subset 实例(不需要)
    # train_dataset = Subset(train_dataset, range(train_size))
    # val_dataset = Subset(val_dataset, range(train_size, train_size + val_size))

    # 创建 DataLoader 实例
    train_loader = DataLoader(train_dataset, batch_size=batchSize, shuffle=True)
    val_loader = DataLoader(val_dataset, batch_size=batchSize, shuffle=False)

    excel: pd.DataFrame = pd.DataFrame(columns=cols)
    for i in range(len(cols)):
        col = cols[i]
        momentom = momentoms[i]
        learning_rate = float(col)
        # 优化器
        optim = torch.optim.SGD(model.parameters(), learning_rate, momentum=momentom)
        excel[col] = trainAndValid(rootPath, model, epoch, batchSize, excel[col].copy(), device, optim, loss_fn,
                                   [train_loader, val_loader]).copy()
    # excel: pd.DataFrame = pd.DataFrame(columns=[cols[useIndex]])
    # learning_rate = float(cols[useIndex])
    # optim = torch.optim.SGD(model.parameters(), learning_rate, momentum=momentoms[useIndex])
    # excel[cols[useIndex]] = trainAndValid(rootPath, model, epoch, batchSize, excel[col].copy(), device,optim, loss_fn,
    #                                    [train_loader, val_loader]).copy()

    excel.to_excel(os.path.join(os.path.join(rootPath, "log"), logFile))
