import json
import os
import time

import numpy as np
import torch
from PIL import Image
from matplotlib import pyplot as plt
from torch import optim
from torch.utils.data import DataLoader
from tqdm import tqdm

from utils.dataset.RVSCdataset import Dataset
from utils.dataset.params import RvscParams
from utils.evaluation.evaluationUtils import f_score, dsc
from utils.train.imageUtils import deeplab_dataset_collate, gray2rgb
from utils.train.loss import CE_Label_Loss, DiceLoss, LossHistory, CE_Loss


def get_lr(optimizer):
    for param_group in optimizer.param_groups:
        return param_group['lr']


def fit_one_epoch(model, num_classes, dice_loss, optimizer, loss_history, epoch,
                  epoch_size, epoch_size_val, gen, gen_val, interval_epoch, cuda, log_dir):
    net = model.train()
    total_loss = 0
    total_f_score = 0

    val_toal_loss = 0
    val_total_f_score = 0
    start_time = time.time()
    with tqdm(total=epoch_size, desc=f'Epoch {epoch + 1}/{interval_epoch}', postfix=dict, mininterval=0.3) as pbar:
        for iteration, batch in enumerate(gen):
            if iteration >= epoch_size:
                break
            imgs, pngs, labels = batch

            with torch.no_grad():
                imgs = torch.tensor(imgs, dtype=torch.float)
                pngs = torch.tensor(pngs, dtype=torch.float).long()
                labels = torch.tensor(labels, dtype=torch.float)
                if cuda:
                    imgs = imgs.cuda()
                    pngs = pngs.cuda()
                    labels = labels.cuda()

            optimizer.zero_grad()
            outputs = net(imgs)

            if dice_loss:
                loss = DiceLoss(outputs, labels)
            else:
                loss = CE_Label_Loss(outputs, pngs, num_classes=num_classes)

            with torch.no_grad():
                # 计算f_score
                _f_score = f_score(outputs, labels)

            loss.backward()
            optimizer.step()

            total_loss += loss.item()
            total_f_score += _f_score.item()

            waste_time = time.time() - start_time
            pbar.set_postfix(**{'total_loss': total_loss / (iteration + 1),
                                'f_score': total_f_score / (iteration + 1),
                                's/step': waste_time,
                                'lr': get_lr(optimizer)})
            pbar.update(1)

            start_time = time.time()

    print('Start Validation')
    with tqdm(total=epoch_size_val, desc=f'Epoch {epoch + 1}/{interval_epoch}', postfix=dict, mininterval=0.3) as pbar:
        for iteration, batch in enumerate(gen_val):
            if iteration >= epoch_size_val:
                break
            imgs, pngs, labels = batch
            with torch.no_grad():
                imgs = torch.tensor(imgs, dtype=torch.float)
                pngs = torch.tensor(pngs, dtype=torch.float).long()
                labels = torch.tensor(labels, dtype=torch.float)
                if cuda:
                    imgs = imgs.cuda()
                    pngs = pngs.cuda()
                    labels = labels.cuda()

                outputs = net(imgs)

                if dice_loss:
                    val_loss = DiceLoss(outputs, labels)
                else:
                    val_loss = CE_Label_Loss(outputs, pngs, num_classes=num_classes)

                # 计算f_score
                _f_score = f_score(outputs, labels)

                val_toal_loss += val_loss.item()
                val_total_f_score += _f_score.item()

            pbar.set_postfix(**{'total_loss': val_toal_loss / (iteration + 1),
                                'f_score': val_total_f_score / (iteration + 1),
                                'lr': get_lr(optimizer)})
            pbar.update(1)

    loss_history.append_loss(total_loss / (epoch_size + 1), val_toal_loss / (epoch_size_val + 1))
    print('Finish Validation')
    print('Epoch:' + str(epoch + 1) + '/' + str(interval_epoch))
    print('Total Loss: %.4f || Val Loss: %.4f ' % (total_loss / (epoch_size + 1), val_toal_loss / (epoch_size_val + 1)))

    print('Saving state, iter:', str(epoch + 1))
    torch.save(model.state_dict(), log_dir + 'Epoch%d-Total_Loss%.4f-Val_Loss%.4f.pth' % (
        (epoch + 1), total_loss / (epoch_size + 1), val_toal_loss / (epoch_size_val + 1)))


def fit_one_epoch2(net, device, args):
    net.to(device)

    loader_train, loader_valid = data_loaders(args)
    loaders = {"train": loader_train, "valid": loader_valid}

    epoch_size_train = loader_train.dataset.len // args.batch_size
    epoch_size_val = loader_valid.dataset.len // args.batch_size

    optimizer = optim.Adam(net.parameters(), lr=args.lr)

    cLoss = None
    if args.loss_function == 'DiceLoss':
        cLoss = DiceLoss()
    elif args.loss_function == 'CE_Loss':
        cLoss = CE_Loss(args.num_classes, device)

    best_validation_dsc = args.best_validation_dsc
    total_loss = 0
    val_toal_loss = 0
    loss_history = LossHistory(args.log_dir)

    start_time = time.time()
    for epoch in range(args.init_epoch, args.epochs):
        for phase in ["train", "valid"]:
            if phase == "train":
                net.train()
                epoch_size = epoch_size_train
            else:
                net.eval()
                print('Start Validation')
                epoch_size = epoch_size_val

            with tqdm(total=epoch_size, desc=f'Epoch {epoch + 1}/{args.epochs}', postfix=dict,
                      mininterval=0.3) as pbar:

                validation_pred = []
                validation_true = []

                for iteration, batch in enumerate(loaders[phase]):
                    imgs, masks, labels = batch

                    imgs = torch.tensor(imgs, dtype=torch.float)
                    masks = torch.tensor(masks, dtype=torch.int64)
                    labels = torch.tensor(labels, dtype=torch.float)

                    imgs = imgs.to(device)
                    masks = masks.to(device)
                    labels = labels.to(device)

                    optimizer.zero_grad()

                    with torch.set_grad_enabled(phase == "train"):
                        y_pred = net(imgs)

                        loss = cLoss(y_pred, masks)

                        if phase == "valid":
                            val_toal_loss += loss.item()

                            y_pred_np = y_pred.detach().cpu().numpy()
                            validation_pred.extend(
                                [y_pred_np[s] for s in range(y_pred_np.shape[0])]
                            )
                            y_true_np = labels.detach().cpu().numpy()
                            validation_true.extend(
                                [y_true_np[s] for s in range(y_true_np.shape[0])]
                            )

                            pbar.set_postfix(**{'total_loss': val_toal_loss / (iteration + 1),
                                                'lr': get_lr(optimizer)})
                            pbar.update(1)

                        if phase == "train":
                            loss.backward()
                            optimizer.step()
                            total_loss += loss.item()

                            waste_time = time.time() - start_time
                            pbar.set_postfix(**{'total_loss': total_loss / (iteration + 1),
                                                's/step': waste_time,
                                                'lr': get_lr(optimizer)})
                            pbar.update(1)

                            start_time = time.time()

                if phase == "valid":
                    loss_history.append_loss(total_loss / (epoch_size_train + 1), val_toal_loss / (epoch_size_val + 1))

                    mean_dsc = np.mean(
                        dsc_per_volume(
                            validation_pred,
                            validation_true,
                            args.num_classes
                        )
                    )

                    print('Finish Validation')
                    print('Epoch:' + str(epoch + 1) + '/' + str(args.epochs))
                    print('Total Loss: %.4f || Val Loss: %.4f || mean_dsc: %.4f' % (
                        total_loss / (epoch_size_train + 1), val_toal_loss / (epoch_size_val + 1), float(mean_dsc)))

                    if mean_dsc > best_validation_dsc:
                        best_validation_dsc = mean_dsc
                        print('Saving state, iter:', str(epoch + 1))
                        modelName = args.log_dir + '/Epoch%d-mean_dsc%.4f.pth' % ((epoch + 1), float(mean_dsc))
                        torch.save(net.state_dict(), modelName)
                        saveDetectedImage(validation_pred, args.num_classes)


def pretrain(model, model_path):
    print('Loading weights into state dict...')
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    model_dict = model.state_dict()
    pretrained_dict = torch.load(model_path, map_location=device)
    pretrained_dict = {k: v for k, v in pretrained_dict.items() if np.shape(model_dict[k]) == np.shape(v)}
    model_dict.update(pretrained_dict)
    model.load_state_dict(model_dict)
    print('Finished!')


def data_loaders(args):
    dataset_train, dataset_valid = datasets(args)

    loader_train = DataLoader(
        dataset=dataset_train,
        batch_size=args.batch_size,
        shuffle=True,
        drop_last=True,
        num_workers=args.workers,
        collate_fn=deeplab_dataset_collate
    )
    loader_valid = DataLoader(
        dataset=dataset_valid,
        batch_size=args.batch_size,
        drop_last=False,
        num_workers=args.workers,
        collate_fn=deeplab_dataset_collate
    )

    return loader_train, loader_valid


def datasets(args):
    train = Dataset(data_dir=[args.trainDataset_path, args.testDataset_path1],
                    image_size=args.inputs_size,
                    mask_type=args.mask_type,
                    img_norm_mean=RvscParams.TR_T1_IMG_MEAN.value,
                    img_norm_std=RvscParams.TR_T1_IMG_STD.value)
    valid = Dataset(data_dir=[args.testDataset_path2],
                    image_size=args.inputs_size,
                    mask_type=args.mask_type,
                    img_norm_mean=RvscParams.TEST2_IMG_MEAN.value,
                    img_norm_std=RvscParams.TEST2_IMG_STD.value)
    return train, valid


def dsc_per_volume(validation_pred, validation_true, num_classes):
    dsc_list = []
    for index in range(len(validation_pred)):
        y_pred = np.array(validation_pred[index]).squeeze()
        y_true = np.array(validation_true[index])
        dsc_list.append(dsc(y_pred, y_true, num_classes))
    return dsc_list


def makedirs(args):
    os.makedirs(args.log_dir, exist_ok=True)
    os.makedirs(args.logs, exist_ok=True)
    os.makedirs('../detected_images', exist_ok=True)


def snapshotargs(args):
    args_file = os.path.join(args.logs, "args.json")
    with open(args_file, "w") as fp:
        json.dump(vars(args), fp)


def saveDetectedImage(validation_preds, num_classes):
    n = len(validation_preds) // 10
    for i in range(10):
        image = gray2rgb(np.uint8(validation_preds[(i - 1) * n][0]))
        plt.figure()
        plt.imshow(image)
        plt.savefig('../detected_images/' + str(i) + '.png')
