import os
import torch
from torch.utils.data import DataLoader, random_split
from tqdm import tqdm
from loadDataset import P3002022DatasetForMulti
from modelSource import SepConv1D
# from modelSource import Multi_featureV2 as multi_person_feature
import plotext as plt
from loadDataset import LoadModelParam
# import datetime
import pandas as pd
import shutil
import glob
from modelSource import Focal_loss
import math
from config.myConfig import MyConfig


def trainAndValid(myConfig: MyConfig, model: torch.nn.Module, epoch: int, batchSize: int, excel: pd.DataFrame,
                  excel_lr: str,
                  excel_loss: str, device: torch.device, optim: torch.optim.Optimizer,
                  loss_fn: torch.nn.Module, data_loader: list):
    if not os.path.exists(myConfig.modelSaveRootPath):
        os.mkdir(myConfig.modelSaveRootPath)
    modelSavePath = myConfig.modelSaveRootPath
    if not os.path.exists(os.path.join(myConfig.modelSaveRootPath, "tmp")):
        os.mkdir(os.path.join(myConfig.modelSaveRootPath, "tmp"))
    modelTmpPath = os.path.join(myConfig.modelSaveRootPath, "tmp")
    if not os.path.exists(myConfig.logFileDir):
        os.mkdir(myConfig.logFileDir)

    if len(os.listdir(modelTmpPath)) != 0:
        for i in glob.glob(os.path.join(modelTmpPath, "*.pth")):
            os.remove(i)

    # recordFile = open("./log/" + str(datetime.datetime.today()) + "record.txt", "w")

    lossArray = [0] * epoch
    accuracyArray = [0.] * epoch
    xAxis = range(epoch)

    for i in tqdm(range(epoch), desc="Training"):
        model.train()
        for data, label in data_loader[0]:
            # print(data.size())
            data = data.float().to(device)
            label = label.float().to(device)
            # print(data)
            output = model(data)
            # print(output.argmax(1))
            # print(label)
            loss = loss_fn(output, label)  # label需要float类型
            loss.backward()
            optim.step()
            optim.zero_grad()
        total_test_loss = 0
        total_accuracy = 0
        model.eval()
        totalCount = 0
        with torch.no_grad():
            for data, label in data_loader[1]:
                data = data.float().to(device)
                label = label.float().to(device)
                output = model(data)
                loss = loss_fn(output, label)
                # total_train_step+=1
                total_test_loss += loss
                # print(loss)
                pred = (output >= 0.5).float().squeeze()
                label=label.squeeze()
                correct = (pred == label).sum().item()
                total_accuracy += correct
                totalCount += data.size(0)
        # print("Epoch：{}，整体测试集上的Loss:{}".format(i, total_test_loss))
        # print("Epoch：{}，整体测试集上的Accuracy:{}".format(i, total_accuracy / totalCount))
        lossArray[i] = total_test_loss
        accuracyArray[i] = total_accuracy / totalCount
        # recordFile.write("Epoch：{}，整体测试集上的Loss:{}\n".format(i, total_test_loss))
        # recordFile.write("Epoch：{}，整体测试集上的Accuracy:{}\n".format(i, total_accuracy / totalCount))
        excel.loc[i, excel_lr] = total_accuracy / totalCount
        excel.loc[i, excel_loss] = total_test_loss.cpu()
        # recordFile.write("-------------------------------------\n")
        torch.save(model, os.path.join(modelTmpPath, "model_{}.pth".format(i)))
        # total_test_step += 1
    # recordFile.close()
    maxAcc = 0
    for index in range(len(accuracyArray)):
        if accuracyArray[maxAcc] < accuracyArray[index]:
            maxAcc = index
    print("选取第{}次训练模型".format(maxAcc))
    shutil.copy(os.path.join(modelTmpPath, "model_{}.pth".format(maxAcc)), modelSavePath)
    plt.plot(xAxis, lossArray, label='Loss Line')
    # 绘制第二条折线
    plt.plot(xAxis, accuracyArray, label='Accuracy Line')
    # 添加标题和轴标签
    plt.title('Validation Loss')
    plt.xlabel('X-axis')
    plt.ylabel('Y-axis')
    # 显示图表
    plt.show()
    plt.clf()
    # return excel


def train(myConfig: MyConfig):
    if myConfig.modelDataSetRootPath is None:
        raise Exception("rootPath doesn't exist")
    if not os.path.exists(myConfig.modelDataSetRootPath):
        print("创建目录：" + myConfig.modelDataSetRootPath)
        os.makedirs(myConfig.modelDataSetRootPath)
    if myConfig.modelSaveRootPath is None:
        raise Exception("modelSaveRootPath can't be None")
    if not os.path.exists(myConfig.modelSaveRootPath):
        print("创建目录：" + myConfig.modelSaveRootPath)
        os.makedirs(myConfig.modelSaveRootPath)
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    # model = multi_person_feature(1, 2)
    # modelPath=r"/home/HModel/test_99_53.458333.pth"
    fileRootPath: str = myConfig.modelDataSetRootPath
    logFile: str = os.path.join(myConfig.logFileDir, myConfig.logFileName)
    # fileSubPath: list = ["SubjectA", "SubjectB"]
    # model = LoadModelParam.LoadModelParam("" if myConfig.modelFilePath is None else myConfig.modelFilePath,
    #                                       multi_person_feature.multi_person_feature(1, 2)).GetModel().to(device)
    batchSize = myConfig.batchSize
    # momentom = 0.9
    splitRate = myConfig.splitRate
    # 训练的轮数
    epoch = myConfig.epoch
    # 损失函数
    # loss_fn = torch.nn.CrossEntropyLoss()
    # https://blog.csdn.net/BIgHAo1/article/details/121783011
    # loss_fn = Focal_loss.focal_loss(num_classes=myConfig.num_classes)
    loss_fn=torch.nn.BCELoss()

    cols = []
    learning_rates = [0.1, 0.01, 0.001, 0.0001, 0.00001, 0.000001]
    momentoms = [0.9]*len(learning_rates)
    for x in learning_rates:
        cols.append("学习率:" + str(x))
        cols.append("Loss值")

    useIndex = 1
    # excel: pd.DataFrame = pd.DataFrame(columns=[cols[useIndex]])
    # print(3excel)

    datas = P3002022DatasetForMulti.Dataset2022ForMulti((16,19,31), fileRootPath)

    dataSetSize = len(datas)

    train_size = int(dataSetSize * splitRate)
    val_size = dataSetSize - train_size  # 剩余作为验证集

    # 随机划分训练集和验证集
    train_dataset, val_dataset = random_split(datas, [train_size, val_size])

    # 创建 Subset 实例(不需要)
    # train_dataset = Subset(train_dataset, range(train_size))
    # val_dataset = Subset(val_dataset, range(train_size, train_size + val_size))

    # 创建 DataLoader 实例
    train_loader = DataLoader(train_dataset, batch_size=batchSize, shuffle=True,pin_memory=True if device.type == 'cuda' else False)
    val_loader = DataLoader(val_dataset, batch_size=batchSize, shuffle=False,pin_memory=True if device.type == 'cuda' else False)

    excel: pd.DataFrame = pd.DataFrame(columns=cols)
    j = 0
    for i in range(len(learning_rates)):
        model = LoadModelParam.LoadModelParam("" if myConfig.modelFilePath is None else myConfig.modelFilePath,
                                              SepConv1D.SepConv1DNet()).GetModel().to(device)
        col = learning_rates[i]
        momentom = momentoms[i]
        learning_rate = col
        # 优化器
        optim = torch.optim.SGD(model.parameters(), learning_rate, momentum=momentom)
        trainAndValid(myConfig, model, epoch, batchSize, excel, cols[j], cols[j + 1], device, optim,
                      loss_fn, [train_loader, val_loader])
        j += 2
    # excel: pd.DataFrame = pd.DataFrame(columns=[cols[useIndex], cols[useIndex + 1]])
    # learning_rate = learning_rates[math.ceil(useIndex/2)]
    # optim = torch.optim.SGD(model.parameters(), learning_rate, momentum=momentoms[math.ceil(useIndex/2)])
    # trainAndValid(rootPath, model, epoch, batchSize, excel, cols[useIndex], cols[useIndex + 1], device, optim, loss_fn,
    #               [train_loader, val_loader])

    excel.to_excel(os.path.join(os.path.join(myConfig.logFileDir, "log"), logFile))
