import csv
import datetime
import os
from glob import glob
from importlib import import_module

import numpy as np
import torch
from torch import optim
from torch.backends import cudnn
from torch.optim import lr_scheduler
from torch.utils.data import DataLoader
from utils.Utils import start_tensorboard
from torch.utils.tensorboard import SummaryWriter

from models.EvaluationMetrics import DSC, Hausdorff_Distance_3D
from utils.LogUtil import my_logger
from utils.ShangJiaoVesselDataset import VesselData
from utils.ShangJiao_Utils import weights_init, debug_dataloader
from utils.Utils import print_GPU_memory_usage, serializeObject
from utils.DataAugmentation import CustomPreprocessModule
from utils.split_combine_mj import SplitComb
from utils.CustomDataLoader import ParseDataLoaderGenerator
from models.Loss import computeLoss
from utils.trainval_classifier import train_casenet, val_casenet


def train_ShangJiao_UNet_3D(configure):
    torch.manual_seed(0)
    # torch.cuda.set_device(0)
    print('----------------------Load Model------------------------')
    model = import_module(configure.model)
    config, net = model.get_model(configure)
    start_epoch = configure.start_epoch
    save_dir = configure.save_dir
    save_dir = os.path.join('results', save_dir)
    my_logger.info("savedir: ", save_dir)
    my_logger.info("args.lr: ", configure.lr)
    # args.lr_stage = config['lr_stage']
    # args.lr_preset = config['lr']

    if configure.resume:
        resume_part = configure.resumepart
        checkpoint = torch.load(configure.resume)
        if resume_part:
            """
            load part of the weight parameters
            """
            net.load_state_dict(checkpoint['state_dict'], strict=False)
            print('part load Done')
        else:
            """
            load full weight parameters
            """
            net.load_state_dict(checkpoint['state_dict'])
            print("full resume Done")
    else:
        weights_init(net, init_type='xavier')  # weight initialization

    if configure.epochs is None:
        end_epoch = configure.lr_stage[-1]
    else:
        end_epoch = configure.epochs

    if not os.path.exists(save_dir):
        os.makedirs(save_dir)
    logfile = os.path.join(save_dir, 'log.txt')

    # sys.stdout = Logger(logfile)
    # pyfiles = [f for f in os.listdir('./') if f.endswith('.py')]
    # for f in pyfiles:
    #     shutil.copy(f, os.path.join(save_dir, f))

    net = net.cuda()
    cudnn.benchmark = True
    # if args.multigpu:
    #     net = DataParallel(net)

    if not configure.sgd:
        optimizer = optim.Adam(net.parameters(), lr=1e-3)  # args.lr
    else:
        optimizer = optim.SGD(net.parameters(), lr=1e-3, momentum=0.9)

    if configure.test:
        print('------------testing--------------------------')
        if configure.cubesizev is not None:
            marginv = configure.cubesizev
        else:
            marginv = configure.cubesize
        print('validation stride ', configure.stridev)
        split_comber = SplitComb(configure.stridev, marginv)

        dataset_test = VesselData(
            config,
            phase='test',
            split_comber=split_comber,
            debug=configure.debug,
            random_select=False)
        test_loader = DataLoader(
            dataset_test,
            batch_size=configure.batch_size,
            shuffle=False,
            num_workers=configure.workers,
            pin_memory=True)
        epoch = start_epoch
        print('start testing')
        testdata = val_casenet(epoch, net, test_loader, configure, save_dir, test_flag=True)
        return

    if configure.debugval:
        epoch = start_epoch
        print('---------------------start debugging val----------------------')
        if configure.cubesizev is not None:
            marginv = configure.cubesizev
        else:
            marginv = configure.cubesize
        print('validation stride ', configure.stridev)
        split_comber = SplitComb(configure.stridev, marginv)
        dataset_val = VesselData(
            config,
            phase='val',
            split_comber=split_comber,
            debug=configure.debug,
            random_select=False)
        val_loader = DataLoader(
            dataset_val,
            batch_size=configure.batch_size,
            shuffle=False,
            num_workers=configure.workers,
            pin_memory=True)
        valdata = val_casenet(epoch, net, val_loader, configure, save_dir, test_flag=False)
        return

    print('---------------------------------Load Dataset--------------------------------')
    margin = configure.cubesize
    print('patch size ', margin)
    print('train stride ', configure.stridet)
    split_comber = SplitComb(configure.stridet, margin)

    dataset_train = VesselData(
        config,
        phase='train',
        split_comber=split_comber,
        debug=configure.debug,
        random_select=configure.randsel)

    train_loader = DataLoader(
        dataset_train,
        batch_size=configure.batch_size,
        shuffle=True,
        num_workers=configure.workers,
        pin_memory=True)

    print('--------------------------------------')
    if configure.cubesizev is not None:
        marginv = configure.cubesizev
    else:
        marginv = configure.cubesize
    print('validation stride ', configure.stridev)
    split_comber = SplitComb(configure.stridev, marginv)
    dataset_val = VesselData(
        config,
        phase='val',
        split_comber=split_comber,
        debug=configure.debug,
        random_select=False)
    val_loader = DataLoader(
        dataset_val,
        batch_size=configure.batch_size,
        shuffle=False,
        num_workers=configure.workers,
        pin_memory=True)

    print('--------------------------------------')

    if configure.debugdataloader and configure.debug:
        print('start debugging')
        testFolder = 'debug'
        if not os.path.exists(testFolder):
            os.mkdir(testFolder)
        debug_dataloader(train_loader, testFolder)
        return

    ##############################
    # start training
    ##############################

    total_epoch = []
    train_loss = []
    val_loss = []
    test_loss = []

    train_acc = []
    val_acc = []
    test_acc = []

    train_sensi = []
    val_sensi = []
    test_sensi = []

    dice_train = []
    dice_val = []
    dice_test = []

    ppv_train = []
    ppv_val = []
    ppv_test = []

    logdirpath = os.path.join(save_dir, 'log')
    if not os.path.exists(logdirpath):
        os.mkdir(logdirpath)

    v_loss, mean_acc2, mean_sensiti2, mean_dice2, mean_ppv2 = 0, 0, 0, 0, 0
    te_loss, mean_acc3, mean_sensiti3, mean_dice3, mean_ppv3 = 0, 0, 0, 0, 0

    for epoch in range(start_epoch, end_epoch + 1):
        t_loss, mean_acc, mean_sensiti, mean_dice, mean_ppv = train_casenet(epoch, net, train_loader, optimizer,
                                                                            configure,
                                                                            save_dir)
        train_loss.append(t_loss)
        train_acc.append(mean_acc)
        train_sensi.append(mean_sensiti)
        dice_train.append(mean_dice)
        ppv_train.append(mean_ppv)

        # save the current model
        if configure.multigpu:
            state_dict = net.module.state_dict()
        else:
            state_dict = net.state_dict()
        for key in state_dict.keys():
            state_dict[key] = state_dict[key].cpu()
        torch.save({
            'state_dict': state_dict,
            'args': configure},
            os.path.join(save_dir, 'latest.ckpt'))

        # save the model frequently
        if epoch % configure.save_freq == 0:
            if configure.multigpu:
                state_dict = net.module.state_dict()
            else:
                state_dict = net.state_dict()
            for key in state_dict.keys():
                state_dict[key] = state_dict[key].cpu()
            torch.save({
                'state_dict': state_dict,
                'args': configure},
                os.path.join(save_dir, '%03d.ckpt' % epoch))

        if (epoch % configure.val_freq == 0) or (epoch == start_epoch):
            v_loss, mean_acc2, mean_sensiti2, mean_dice2, mean_ppv2 = val_casenet(epoch, net, val_loader, configure,
                                                                                  save_dir, test_flag=False)

        # if epoch % args.test_freq == 0:
        # 	te_loss, mean_acc3, mean_sensiti3, mean_dice3, mean_ppv3 = val_casenet(epoch, net, test_loader, args, save_dir, test_flag=True)

        val_loss.append(v_loss)
        val_acc.append(mean_acc2)
        val_sensi.append(mean_sensiti2)
        dice_val.append(mean_dice2)
        ppv_val.append(mean_ppv2)

        test_loss.append(te_loss)
        test_acc.append(mean_acc3)
        test_sensi.append(mean_sensiti3)
        dice_test.append(mean_dice3)
        ppv_test.append(mean_ppv3)

        total_epoch.append(epoch)

        totalinfo = np.array([total_epoch, train_loss, val_loss, test_loss, \
                              train_acc, val_acc, test_acc, \
                              train_sensi, val_sensi, test_sensi, \
                              dice_train, dice_val, dice_test, \
                              ppv_train, ppv_val, ppv_test])

        np.save(os.path.join(logdirpath, 'log.npy'), totalinfo)

    logName = os.path.join(logdirpath, 'log.csv')
    with open(logName, 'a') as csvout:
        writer = csv.writer(csvout)
        row = ['train epoch', 'train loss', 'val loss', 'test loss', \
               'train acc', 'val acc', 'test acc', \
               'train sensi', 'val sensi', 'test sensi', \
               'dice train', 'dice val', 'dice test', \
               'ppv train', 'ppv val', 'ppv test']
        writer.writerow(row)

        for i in range(len(total_epoch)):
            row = [total_epoch[i], train_loss[i], val_loss[i], test_loss[i], \
                   train_acc[i], val_acc[i], test_acc[i], \
                   train_sensi[i], val_sensi[i], test_sensi[i], \
                   dice_train[i], dice_val[i], dice_test[i], \
                   ppv_train[i], ppv_val[i], ppv_test[i]]
            writer.writerow(row)
        csvout.close()

    print("Done")
    return


def train_model(configure):
    my_logger.info("Initially allocated memory is " + str(torch.cuda.memory_allocated()))  # only torch tensor
    my_logger.info("The Max allocated memory is " + str(torch.cuda.max_memory_allocated()))

    current_weights_path = None
    if not configure.if_debug and configure.if_record_train_log:
        start_time = datetime.datetime.now().strftime('%b%d-%H-%M-%S')
        model_name = configure.model.__str__().split("\n")[0].replace("(", "")
        dateset_name: str = os.path.basename(configure.train_data_root_path).split("_")[0]
        current_weights_path = os.path.join(configure.weight_root_dir,
                                            dateset_name + "_" + model_name + "_" + start_time)
        if not os.path.exists(current_weights_path):
            os.mkdir(current_weights_path)
        my_logger.info("The save path of model weights is: " + str(current_weights_path))
        run_logs_dir = os.path.join(configure.log_root_dir, dateset_name + "_" + model_name + "_" + start_time)
        if not os.path.exists(run_logs_dir):
            os.mkdir(run_logs_dir)
        my_logger.info("The save path of run logs is: " + str(run_logs_dir))
        train_run_logs_dir = os.path.join(run_logs_dir, "train")
        if not os.path.exists(train_run_logs_dir):
            os.mkdir(train_run_logs_dir)
        serializeObject(configure, train_run_logs_dir)
        train_logs_writer = SummaryWriter(log_dir=train_run_logs_dir)
        run_order = r"tensorboard --logdir=" + run_logs_dir
        start_tensorboard(run_order)
        my_logger.info("Tensorboard start up...")

    device = torch.device('cuda')

    all_data_path_list = glob(os.path.join(configure.train_data_root_path, "*"))
    my_data_loader_generator = ParseDataLoaderGenerator(all_data_path_list, configure)
    # if configure.if_debug:
    # my_data_loader_generator = TestDataLoader(all_data_path_list, configure)
    # else:
    # my_data_loader_generator = MultiThreadDataLoader(all_data_path_list, configure)

    learning_rate = configure.learning_rate
    epoch_num = configure.training_epochs

    # The result has gone through softmax layer
    network = configure.model
    if configure.old_weights_dir is not None:
        check_point = torch.load(configure.old_weights_dir)
        network.load_state_dict(check_point)

    if configure.if_data_augmentation:
        preprocessModule = CustomPreprocessModule(configure.patch_size, configure.aug_probability)

    if configure.if_cuda:
        network = network.to(device)
    param_total_nums = sum(p.numel() for p in network.parameters())
    my_logger.info("send the model to GPU memory...")
    my_logger.info("the total number of parameters is " + str(param_total_nums))
    print_GPU_memory_usage("create the model")

    optimizer = optim.Adam(network.parameters(), lr=learning_rate)
    scheduler = lr_scheduler.ExponentialLR(optimizer, gamma=0.95)

    global_step = 0

    for epoch in range(configure.old_epoch, epoch_num):
        epoch_start_time = datetime.datetime.now()
        for group_index in range(my_data_loader_generator.group_num):
            common_train_data_loader, valid_data_loader = my_data_loader_generator.get_data_loader()
            network.train()

            # Pytorch dataloader will convert the numpy array to tensor automatically
            for i, [train_data, label] in enumerate(common_train_data_loader):
                configure.epoch_size = len(common_train_data_loader)
                configure.set_interval()
                # if configure.if_data_augmentation:
                #     train_data, label = preprocessModule.process(train_data.numpy(), label.numpy(),
                #                                                  )
                #     train_data = torch.from_numpy(train_data)
                #     label = torch.from_numpy(label)
                if configure.if_cuda:
                    train_data = train_data.to(device, dtype=torch.float32, non_blocking=True)
                    label = label.to(device, dtype=torch.long, non_blocking=True)
                else:
                    train_data = train_data.type(torch.float32)
                    label = label.type(torch.long)

                optimizer.zero_grad()  # Because pytorch will accumulate the gradient
                predict = network(train_data)  # .squeeze(0)
                loss = computeLoss(configure.loss_function, predict, label)
                loss.backward()  # compute the current gradient and back propagation
                optimizer.step()  # update the weights
                global_step += 1
                train_dice = DSC(predict, label, configure.threshold)
                # _, train_hd = Hausdorff_Distance_3D(predict, label)
                my_logger.info({
                    'epoch': epoch,
                    # "sample_index": str() + "/" + str(my_data_loader_generator.group_num),
                    'step': global_step,
                    'train loss': loss.item(),
                    'train accuracy': train_dice,
                    # 'train hausdorff distance': train_hd,
                })

                if not configure.if_debug and global_step % configure.save_model_interval == 1:
                    scheduler.step()
                    my_logger.info("Current learning rate is " + str(optimizer.state_dict()['param_groups'][0]['lr']))

                if not configure.if_debug and global_step % configure.lr_schedule_interval == 1:
                    # modify the learning rate, scheduler should be applied after the optimizer’s update. the end of each
                    # epoch
                    torch.save(network.state_dict(),
                               os.path.join(current_weights_path,
                                            "epoch_" + str(epoch) + "global_step_" + str(global_step)))

                if configure.if_valid and global_step % configure.valid_interval == 1:
                    # valid operation
                    network.eval()
                    total_dice = 0
                    total_hd = 0
                    counter = 0
                    with torch.no_grad():
                        for i, [valid_img, valid_label] in enumerate(valid_data_loader):
                            valid_img = valid_img.to(device, dtype=torch.float32, non_blocking=True)
                            valid_label = valid_label.to(device, dtype=torch.float32, non_blocking=True)
                            predict = network(valid_img)  # .squeeze(0)
                            dice = DSC(predict, valid_label, configure.threshold)
                            # total_hd += Hausdorff_Distance_3D(predict, valid_label)[1]
                            total_dice += dice
                            counter += 1
                            valid_img.cpu()
                            valid_label.cpu()
                        my_logger.info({
                            'epoch': epoch,
                            'accuracy': total_dice / counter,
                            # 'hausdorff distance': total_hd / counter
                        })
                        print_GPU_memory_usage("a valid")
                        if not configure.if_debug and configure.if_record_train_log:
                            train_logs_writer.add_scalars("show", {
                                "valid accuracy": total_dice / counter,
                                # 'hausdorff distance': total_hd / counter,
                                "train accuracy": train_dice,
                                "train loss": loss.item(),
                                "learning rate": optimizer.state_dict()['param_groups'][0]['lr']
                            }, global_step)
                    network.train()
        epoch_end_time = datetime.datetime.now()
        my_logger.info("The duration time of " + str(epoch) + " is " + str(epoch_end_time - epoch_start_time))


if __name__ == '__main__':
    os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE"
    # from config import VNetConfigure
    # from config import ModifiedUNet3DConfigure
    # from config import UNet3DConfigure
    from config.Parse2022_config import AttentionUNetConfigure as myConfigure

    # train_model(ModifiedUNet3DConfigure)
    # train_model(VNetConfigure)
    try:
        train_model(myConfigure)
    except KeyboardInterrupt:
        print("catch the control-c order")
