import datetime
import os
from glob import glob

import torch
from torch import optim
from torch.optim import lr_scheduler
from torch.utils.tensorboard import SummaryWriter

from models.EvaluationMetrics import Dice
from utils.CustomDataLoader import KiPA2022DataLoaderGenerator
from utils.LogUtil import my_logger
from utils.Utils import print_GPU_memory_usage, serializeObject
from utils.Utils import start_tensorboard
from models.Loss import computeLoss


def train_model(configure):
    my_logger.info("Initially allocated memory is " + str(torch.cuda.memory_allocated()))  # only torch tensor
    my_logger.info("The Max allocated memory is " + str(torch.cuda.max_memory_allocated()))

    current_weights_path = None
    if not configure.if_debug and configure.if_record_train_log:
        start_time = datetime.datetime.now().strftime('%b%d-%H-%M-%S')
        model_name = configure.model.__str__().split("\n")[0].replace("(", "")
        dateset_name: str = os.path.basename(configure.train_data_root_path).split("_")[0]
        current_weights_path = os.path.join(configure.weight_root_dir,
                                            dateset_name + "_" + model_name + "_" + start_time)
        if not os.path.exists(current_weights_path):
            os.mkdir(current_weights_path)
        my_logger.info("The save path of model weights is: " + str(current_weights_path))
        run_logs_dir = os.path.join(configure.log_root_dir, dateset_name + "_" + model_name + "_" + start_time)
        if not os.path.exists(run_logs_dir):
            os.mkdir(run_logs_dir)
        my_logger.info("The save path of run logs is: " + str(run_logs_dir))
        train_run_logs_dir = os.path.join(run_logs_dir, "train")
        if not os.path.exists(train_run_logs_dir):
            os.mkdir(train_run_logs_dir)
        serializeObject(configure, train_run_logs_dir)
        train_logs_writer = SummaryWriter(log_dir=train_run_logs_dir)
        run_order = r"tensorboard --logdir=" + run_logs_dir
        start_tensorboard(run_order)
        my_logger.info("Tensorboard start up...")

    device = torch.device('cuda')

    all_data_path_list = glob(os.path.join(configure.train_data_root_path, "*"))
    my_data_loader_generator = KiPA2022DataLoaderGenerator(all_data_path_list, configure)
    common_train_data_loader, valid_data_loader = my_data_loader_generator.get_data_loader()

    learning_rate = configure.learning_rate
    epoch_num = configure.training_epochs

    # The result has gone through softmax layer
    network = configure.model
    if configure.old_weights_dir is not None:
        check_point = torch.load(configure.old_weights_dir)
        network.load_state_dict(check_point)

    if configure.if_cuda:
        network = network.to(device)
    param_total_nums = sum(p.numel() for p in network.parameters())
    my_logger.info("send the model to GPU memory...")
    my_logger.info("the total number of parameters is " + str(param_total_nums))
    print_GPU_memory_usage("create the model")

    optimizer = optim.Adam(network.parameters(), lr=learning_rate)
    scheduler = lr_scheduler.ExponentialLR(optimizer, gamma=0.95)

    dice_computer = Dice(configure.class_num, threshold=configure.threshold)

    global_step = 0

    for epoch in range(configure.old_epoch, epoch_num):
        epoch_start_time = datetime.datetime.now()
        network.train()
        configure.epoch_size = len(common_train_data_loader)
        configure.set_interval()
        # Pytorch dataloader will convert the numpy array to tensor automatically
        for i, [train_data, label] in enumerate(common_train_data_loader):

            if configure.if_cuda:
                train_data = train_data.to(device, dtype=torch.float32, non_blocking=True)
                label = label.to(device, dtype=torch.long, non_blocking=True)
            else:
                train_data = train_data.type(torch.float32)
                label = label.type(torch.long)

            optimizer.zero_grad()  # Because pytorch will accumulate the gradient
            predict = network(train_data)  # .squeeze(0)
            loss = computeLoss(configure.loss_function, predict, label.float())
            loss.backward()  # compute the current gradient and back propagation
            optimizer.step()  # update the weights
            global_step += 1
            train_dice, dice_list = dice_computer(predict, label)

            # _, train_hd = Hausdorff_Distance_3D(predict, label)
            my_logger.info({
                'epoch': epoch,
                # "sample_index": str() + "/" + str(my_data_loader_generator.group_num),
                'step': global_step,
                'train loss': loss.item(),
                'train dice': train_dice.item(),
                'train dice list': [d.item() for d in dice_list]
                # 'train hausdorff distance': train_hd,
            })

            if not configure.if_debug and global_step % configure.save_model_interval == 1:
                scheduler.step()
                my_logger.info("Current learning rate is " + str(optimizer.state_dict()['param_groups'][0]['lr']))

            if not configure.if_debug and global_step % configure.lr_schedule_interval == 1:
                # modify the learning rate, scheduler should be applied after the optimizer’s update. the end of each
                # epoch
                torch.save(network.state_dict(),
                           os.path.join(current_weights_path,
                                        "epoch_" + str(epoch) + "global_step_" + str(global_step)))

            if configure.if_valid and global_step % configure.valid_interval == 1:
                # valid operation
                network.eval()
                total_avg_dice = 0
                valid_dice_list = [0 for i in range(configure.class_num - 1)]
                total_hd = 0
                counter = 0
                with torch.no_grad():
                    for i, [valid_img, valid_label] in enumerate(valid_data_loader):
                        valid_img = valid_img.to(device, dtype=torch.float32, non_blocking=True)
                        valid_label = valid_label.to(device, dtype=torch.float32, non_blocking=True)
                        predict = network(valid_img)  # .squeeze(0)
                        avg_dice, dice_list = dice_computer(predict, valid_label)
                        for i in range(len(valid_dice_list)):
                            valid_dice_list[i] += dice_list[i].item()
                        # total_hd += Hausdorff_Distance_3D(predict, valid_label)[1]
                        total_avg_dice += avg_dice.item()
                        counter += 1
                        valid_img.cpu()
                        valid_label.cpu()
                    for i in range(len(valid_dice_list)):
                        valid_dice_list[i] /= counter
                    my_logger.info({
                        'epoch': epoch,
                        'total_avg_dice': total_avg_dice / counter,
                        'dice_list': valid_dice_list

                        # 'hausdorff distance': total_hd / counter
                    })
                    print_GPU_memory_usage("a valid")
                    if not configure.if_debug and configure.if_record_train_log:
                        train_logs_writer.add_scalars("show", {
                            "valid dice": total_avg_dice / counter,
                            # 'hausdorff distance': total_hd / counter,
                            "train dice": train_dice.item(),
                            "train loss": loss.item(),
                            "train renal vein": dice_list[0],
                            "train kidney": dice_list[1],
                            "train renal artery": dice_list[2],
                            "train tumor": dice_list[3],
                            "valid renal vein": valid_dice_list[0],
                            "valid kidney": valid_dice_list[1],
                            "valid renal artery": valid_dice_list[2],
                            "valid tumor": valid_dice_list[3],
                            "learning rate": optimizer.state_dict()['param_groups'][0]['lr']
                        }, global_step)
                network.train()
        epoch_end_time = datetime.datetime.now()
        my_logger.info("The duration time of " + str(epoch) + " is " + str(epoch_end_time - epoch_start_time))


if __name__ == '__main__':
    os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE"
    # from config import VNetConfigure
    # from config import ModifiedUNet3DConfigure
    # from config import UNet3DConfigure
    from config.KiPA2022_config import ModifiedUNet3DConfigure as myConfigure

    # train_model(ModifiedUNet3DConfigure)
    # train_model(VNetConfigure)
    try:
        train_model(myConfigure)
    except KeyboardInterrupt:
        print("catch the control-c order")
