import datetime
import os
import random
import subprocess
from glob import glob

import numpy as np

from utils.Utils import start_tensorboard
import torch
from torch import optim
from torch.optim import lr_scheduler
from torch.utils.tensorboard import SummaryWriter

from models.BRAVENet import BRAVENet
from models.EvaluationMetrics import dice_coefficient_3D, DSC
from models.loss import computeLoss
from utils.CustomDataLoader import SingleDataLoader, SingleTransDataLoader
from utils.DataAugmentation import CustomPreprocessModule
from utils.LogUtil import my_logger
from utils.Utils import print_GPU_memory_usage, serializeObject, random_crop_3d, index_crop_3d


def train_model_backup(configure):
    my_logger.info("Initially allocated memory is " + str(torch.cuda.memory_allocated()))  # only torch tensor
    my_logger.info("The Max allocated memory is " + str(torch.cuda.max_memory_allocated()))

    if not configure.if_debug and configure.if_record_train_log:
        start_time = datetime.datetime.now().strftime('%b%d_%H-%M-%S')
        model_name = configure.model.__str__().split("\n")[0].replace("(", "")
        dateset_name: str = os.path.basename(configure.train_data_root_path).split("_")[0]
        current_weights_path = os.path.join(configure.weight_root_dir, dateset_name + model_name + start_time)
        if not os.path.exists(current_weights_path):
            os.mkdir(current_weights_path)
        run_logs_dir = os.path.join(configure.log_root_dir, dateset_name + model_name + start_time)
        if not os.path.exists(run_logs_dir):
            os.mkdir(run_logs_dir)
        train_run_logs_dir = os.path.join(run_logs_dir, "train")
        if not os.path.exists(train_run_logs_dir):
            os.mkdir(train_run_logs_dir)
        serializeObject(configure, train_run_logs_dir)
        train_logs_writer = SummaryWriter(log_dir=train_run_logs_dir)

    device = torch.device('cuda')

    all_data_path_list = glob(os.path.join(configure.train_data_root_path, "*"))
    if configure.if_debug:
        all_data_path_list = all_data_path_list[0:4]

    my_data_loader_generator = SingleDataLoader(all_data_path_list, configure)

    save_model_interval = configure.save_model_interval
    valid_interval = configure.valid_interval
    lr_schedule_interval = configure.lr_schedule_interval

    learning_rate = configure.learning_rate
    epoch_num = configure.training_epochs

    # The result has gone through softmax layer
    network = configure.model
    if configure.old_weights_dir is not None:
        check_point = torch.load(configure.old_weights_dir)
        network.load_state_dict(check_point)

    if configure.if_data_augmentation:
        preprocessModule = CustomPreprocessModule(configure.patch_size, configure.aug_probability)

    if configure.if_cuda:
        network = network.to(device)
    param_total_nums = sum(p.numel() for p in network.parameters())
    my_logger.info("send the model to GPU memory...")
    my_logger.info("the total number of parameters is " + str(param_total_nums))
    print_GPU_memory_usage("create the model")

    optimizer = configure.optimizer(network.parameters(), lr=learning_rate)
    scheduler = configure.scheduler(optimizer, gamma=0.95)

    global_step = 0

    for epoch in range(configure.old_epoch, epoch_num):
        epoch_start_time = datetime.datetime.now()
        for group_index in range(my_data_loader_generator.group_num):
            common_train_data_loader, valid_data_loader = my_data_loader_generator.get_data_loader()
            network.train()
            big_train_data = None
            # Pytorch dataloader will convert the numpy array to tensor automatically
            for i, [train_data, label] in enumerate(common_train_data_loader):
                if configure.if_data_augmentation:
                    train_data, label = preprocessModule.process(train_data.numpy(), label.numpy(),
                                                                 )
                    big_train_data = torch.from_numpy(train_data)
                    big_label = torch.from_numpy(label)
                    train_data, index_list = random_crop_3d(big_train_data, [int(i // 2) for i in configure.patch_size])
                    label = index_crop_3d(big_label, index_list)
                if configure.if_cuda:
                    train_data = train_data.to(device, dtype=torch.float32)
                    label = label.to(device, dtype=torch.long)
                    if big_train_data is not None:
                        big_train_data = big_train_data.to(device, dtype=torch.float32)
                else:
                    train_data = train_data.type(torch.float32)
                    label = label.type(torch.long)

                optimizer.zero_grad()  # Because pytorch will accumulate the gradient
                predict = network(train_data)  # .squeeze(0)

                loss = computeLoss(configure.loss_function, predict, label, configure.loss_weights)

                loss.backward()  # compute the current gradient and back propagation
                optimizer.step()  # update the weights
                global_step += 1
                train_dice = dice_coefficient_3D(predict, label, configure.class_num)
                train_dsc = DSC(predict, label)
                # _, train_hd = Hausdorff_Distance_3D(predict, label)
                my_logger.info({
                    'epoch': epoch,
                    "group_index": str(group_index) + "/" + str(my_data_loader_generator.group_num),
                    "i": str(i) + "/" + str(len(common_train_data_loader)),
                    'step': global_step,
                    'train loss': loss.item(),
                    'train accuracy': train_dice.item(),
                    'train dsc': train_dsc,
                })

                if global_step % lr_schedule_interval == 1:
                    scheduler.step()
                    my_logger.info("Current learning rate is " + str(optimizer.state_dict()['param_groups'][0]['lr']))

                if not configure.if_debug and global_step % save_model_interval == 1:
                    # modify the learning rate, scheduler should be applied after the optimizer’s update. the end of each
                    # epoch
                    torch.save(network.state_dict(),
                               os.path.join(current_weights_path, "epoch_" + str(epoch)))

                if configure.if_valid and global_step % valid_interval == 1:
                    # valid operation
                    network.eval()
                    total_dice = 0
                    total_hd = 0
                    total_dsc = 0
                    counter = 0
                    with torch.no_grad():
                        for i, [valid_img, valid_label] in enumerate(valid_data_loader):
                            valid_img = valid_img.to(device, dtype=torch.float32)
                            valid_label = valid_label.to(device, dtype=torch.float32)
                            predict, _ = network(valid_img)  # .squeeze(0)
                            dice = dice_coefficient_3D(predict, valid_label, 1)
                            # total_hd += Hausdorff_Distance_3D(predict, valid_label)[1]
                            total_dsc += DSC(predict, valid_label)
                            total_dice += dice.item()
                            counter += 1
                            valid_img.cpu()
                            valid_label.cpu()
                        my_logger.info({
                            'epoch': epoch,
                            'accuracy': total_dice / counter,
                            'dsc': total_dsc / counter
                        })
                        print_GPU_memory_usage("a valid")
                        if not configure.if_debug and configure.if_record_train_log:
                            train_logs_writer.add_scalars("show", {
                                "valid accuracy": total_dice / counter,
                                'valid dsc': total_dsc / counter,
                                "train accuracy": train_dice.item(),
                                "train loss": loss.item(),
                                "learning rate": optimizer.state_dict()['param_groups'][0]['lr']
                            }, global_step)
                    network.train()
            my_data_loader_generator.regenerate_data_loader()
            # del common_train_data_loader
            # del valid_data_loader
        epoch_end_time = datetime.datetime.now()
        my_logger.info("The duration time of " + str(epoch) + " is " + str(epoch_end_time - epoch_start_time))


def train_model(configure):
    my_logger.info("Initially allocated memory is " + str(torch.cuda.memory_allocated()))  # only torch tensor
    my_logger.info("The Max allocated memory is " + str(torch.cuda.max_memory_allocated()))

    current_weights_path = None
    train_logs_writer = None
    if not configure.if_debug and configure.if_record_train_log:
        start_time = datetime.datetime.now().strftime('%b%d-%H-%M-%S')
        model_name = configure.model.__str__().split("\n")[0].replace("(", "")
        dateset_name: str = os.path.basename(configure.train_data_root_path).split("_")[0]
        current_weights_path = os.path.join(configure.weight_root_dir,
                                            dateset_name + "_" + model_name + "_" + start_time)
        if not os.path.exists(current_weights_path):
            os.mkdir(current_weights_path)
        my_logger.info("The save path of model weights is: " + str(current_weights_path))
        run_logs_dir = os.path.join(configure.log_root_dir, dateset_name + "_" + model_name + "_" + start_time)
        if not os.path.exists(run_logs_dir):
            os.mkdir(run_logs_dir)
        my_logger.info("The save path of run logs is: " + str(run_logs_dir))
        train_run_logs_dir = os.path.join(run_logs_dir, "train")
        if not os.path.exists(train_run_logs_dir):
            os.mkdir(train_run_logs_dir)
        serializeObject(configure, train_run_logs_dir)
        train_logs_writer = SummaryWriter(log_dir=train_run_logs_dir)
        run_order = r"tensorboard --logdir=" + run_logs_dir
        start_tensorboard(run_order)
        my_logger.info("Tensorboard start up...")

    device = torch.device('cuda')

    all_data_path_list = glob(os.path.join(configure.train_data_root_path, "*"))
    if configure.if_debug:
        all_data_path_list = all_data_path_list[0:5]

    train_index_list = random.sample(range(0, len(all_data_path_list)),
                                     int(len(all_data_path_list) * configure.train_val_rate))
    valid_index_list = [i for i in range(0, len(all_data_path_list)) if
                        i not in train_index_list]

    train_generator = SingleTransDataLoader([all_data_path_list[i] for i in train_index_list],
                                            configure)
    train_data_loader = train_generator.get_data_loader()
    valid_data_loader = SingleTransDataLoader([all_data_path_list[i] for i in valid_index_list],
                                              configure).get_data_loader()

    learning_rate = configure.learning_rate
    epoch_num = configure.training_epochs

    # The result has gone through softmax layer
    network = configure.model
    if configure.old_weights_dir is not None:
        check_point = torch.load(configure.old_weights_dir)
        network.load_state_dict(check_point)

    # if configure.if_data_augmentation:
    #     preprocessModule = CustomPreprocessModule(configure.patch_size, configure.aug_probability)

    if configure.if_cuda:
        network = network.to(device)
    param_total_nums = sum(p.numel() for p in network.parameters())
    my_logger.info("send the model to GPU memory...")
    my_logger.info("the total number of parameters is " + str(param_total_nums))
    print_GPU_memory_usage("create the model")

    optimizer = optim.Adam(network.parameters(), lr=learning_rate)
    scheduler = lr_scheduler.ExponentialLR(optimizer, gamma=0.99)

    global_step = 0

    for epoch in range(configure.old_epoch, epoch_num):
        epoch_start_time = datetime.datetime.now()
        for group_index in range(train_generator.group_num):
            network.train()
            big_train_data = None
            # Pytorch dataloader will convert the numpy array to tensor automatically
            for i, data_dict in enumerate(train_data_loader):
                configure.epoch_size = len(train_data_loader)
                configure.set_interval()
                train_data = data_dict["train_img"]["data"]
                # train_debug = train_data.numpy()
                # print(np.max(train_debug), np.min(train_debug))
                label = data_dict["label"]["data"]
                # label_debug = label.numpy()
                # print(np.max(label_debug), np.min(label_debug))
                # if configure.if_data_augmentation:
                #     train_data, label = preprocessModule.process(train_data.numpy(), label.numpy())
                #     big_train_data = torch.from_numpy(train_data)
                #     big_label = torch.from_numpy(label)
                #     if type(network) is BRAVENet:
                #         train_data, index_list = random_crop_3d(big_train_data,
                #                                                 [int(i // 2) for i in configure.patch_size])
                #         label = index_crop_3d(big_label, index_list)
                #     else:
                #         train_data, label = big_train_data, big_label
                if configure.if_cuda:
                    train_data = train_data.to(device, dtype=torch.float32)
                    label = label.to(device, dtype=torch.long)
                    if big_train_data is not None:
                        big_train_data = big_train_data.to(device, dtype=torch.float32)
                else:
                    train_data = train_data.type(torch.float32)
                    label = label.type(torch.long)

                optimizer.zero_grad()  # Because pytorch will accumulate the gradient
                predict = network(train_data)  # .squeeze(0)

                loss = computeLoss(configure.loss_function, predict, label)

                loss.backward()  # compute the current gradient and back propagation
                optimizer.step()  # update the weights
                global_step += 1
                train_dice = dice_coefficient_3D(predict, label, configure.class_num)
                train_dsc = DSC(predict, label, configure.threshold)
                # _, train_hd = Hausdorff_Distance_3D(predict, label)
                my_logger.info({
                    'epoch': epoch,
                    "group_index": str(group_index) + "/" + str(train_generator.group_num),
                    "i": str(i) + "/" + str(len(train_data_loader)),
                    'step': global_step,
                    'train loss': loss.item(),
                    'train accuracy': train_dice.item(),
                    'train dsc': train_dsc,
                })

                if global_step % configure.lr_schedule_interval == 1:
                    scheduler.step()
                    my_logger.info("Current learning rate is " + str(optimizer.state_dict()['param_groups'][0]['lr']))

                if not configure.if_debug and global_step % configure.save_model_interval == 1:
                    # modify the learning rate, scheduler should be applied after the optimizer’s update. the end of each
                    # epoch
                    torch.save(network.state_dict(),
                               os.path.join(current_weights_path, "epoch_" + str(epoch)))

                if not configure.if_debug and configure.if_valid and global_step % configure.valid_interval == 1:
                    # valid operation
                    network.eval()
                    total_dice = 0
                    total_hd = 0
                    total_dsc = 0
                    counter = 0
                    with torch.no_grad():
                        for i, valid_data_dict in enumerate(valid_data_loader):
                            # if type(network) is BRAVENet:
                            #     valid_img, index_list = random_crop_3d(big_valid_img,
                            #                                            [int(i // 2) for i in configure.patch_size])
                            #     valid_label = index_crop_3d(valid_label, index_list)
                            #     big_valid_img = big_valid_img.to(device, dtype=torch.float32)
                            # else:
                            #     valid_img, valid_label = big_valid_img, valid_label
                            valid_img = valid_data_dict["train_img"]["data"]
                            valid_label = valid_data_dict["label"]["data"]
                            valid_img = valid_img.to(device, dtype=torch.float32)
                            valid_label = valid_label.to(device, dtype=torch.float32)
                            predict = network(valid_img)  # .squeeze(0)
                            dice = dice_coefficient_3D(predict, valid_label, configure.class_num)
                            # total_hd += Hausdorff_Distance_3D(predict, valid_label)[1]
                            total_dsc += DSC(predict, valid_label, configure.threshold)
                            total_dice += dice.item()
                            counter += 1
                            valid_img.cpu()
                            valid_label.cpu()
                        my_logger.info({
                            'epoch': epoch,
                            'accuracy': total_dice / counter,
                            'dsc': total_dsc / counter
                        })
                        print_GPU_memory_usage("a valid")
                        if not configure.if_debug and configure.if_record_train_log:
                            train_logs_writer.add_scalars("show", {
                                "valid accuracy": total_dice / counter,
                                'valid dsc': total_dsc / counter,
                                "train accuracy": train_dice.item(),
                                "train loss": loss.item(),
                                "learning rate": optimizer.state_dict()['param_groups'][0]['lr']
                            }, global_step)
                    network.train()
            # if configure.train_valid_shuffle and global_step % configure.recrop_data_interval == 1:
            #     my_data_loader_generator.regenerate_data_loader()
            #     del common_train_data_loader
            #     del valid_data_loader
        epoch_end_time = datetime.datetime.now()
        my_logger.info("The duration time of " + str(epoch) + " is " + str(epoch_end_time - epoch_start_time))


if __name__ == '__main__':
    os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE"

    from config.XunFeiChallenge_config import UNet3DConfigure as myConfigure

    train_model(myConfigure)
