#!/usr/bin/python3
# these code is for ISIC 2018: Skin Lesion Analysis Towards Melanoma Detection
# -*- coding: utf-8 -*-
# @Author  : Ran Gu
import os, cv2
import torch
import torch.nn as nn
import math
import torch.utils.data as Data
import argparse
import numpy as np
from tqdm import tqdm
import sklearn.metrics as metrics
from sklearn.metrics import roc_auc_score

from distutils.version import LooseVersion


from utils.dataset import coronary_dataset
from utils.transform import coronary_transform


from Models.networks.network import Comprehensive_Atten_Unet

from utils.dice_loss import SoftDiceLoss, get_soft_label, val_dice_isic
from utils.dice_loss import Intersection_over_Union_isic

from utils.evaluation import AverageMeter
from utils.binary import assd
from torch.optim.lr_scheduler import StepLR

from utils.logger import setup_logger

# distributed set
distributed = False
import torch.distributed as dist
import torch.backends.cudnn as cudnn
from torch.utils.data.distributed import DistributedSampler


def calculate_Accuracy(confusion):
    confusion=np.asarray(confusion)
    pos = np.sum(confusion, 1).astype(np.float32) # 1 for row
    res = np.sum(confusion, 0).astype(np.float32) # 0 for coloum
    tp = np.diag(confusion).astype(np.float32)
    IU = tp / (pos + res - tp)

    meanIU = np.mean(IU)
    Acc = np.sum(tp) / np.sum(confusion)
    Se = confusion[1][1] / (confusion[1][1]+confusion[1][0])
    Sp = confusion[0][0] / (confusion[0][0]+confusion[0][1])

    return meanIU, Acc, Se, Sp, IU


def train(train_loader, model, criterion, optimizer, args, epoch):
    losses = AverageMeter()
    step = 0
    model.train()
    for step, (x, y, _) in tqdm(enumerate(train_loader), total=len(train_loader)):
        image = x.float().cuda()
        target = y.float().cuda()

        output = model(image)                                      # model output

        target_soft = get_soft_label(target, args.num_classes)     # get soft label
        loss = criterion(output, target_soft, args.num_classes)    # the dice losses
        losses.update(loss.data, image.size(0))

        # compute gradient and do SGD step
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        if step % (math.ceil(float(len(train_loader.dataset))/args.batch_size)) == 0 and args.local_rank == 0:
            print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {losses.avg:.6f}'.format(
                epoch, step * len(image), len(train_loader.dataset),
                100. * step / len(train_loader), losses=losses))
            logger.info('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {losses.avg:.6f}'.format(
                epoch, step * len(image), len(train_loader.dataset),
                100. * step / len(train_loader), losses=losses))

    # print('The average loss:{losses.avg:.4f}'.format(losses=losses))
    return losses.avg


def valid_coronary(valid_loader, model, criterion, optimizer, args, epoch, minloss):
    val_losses = AverageMeter()
    val_isic_dice = AverageMeter()

    model.eval()
    for step, (t, k, _) in tqdm(enumerate(valid_loader), total=len(valid_loader)):
        image = t.float().cuda()
        target = k.float().cuda()

        output = model(image)                                             # model output
        output_dis = torch.max(output, 1)[1].unsqueeze(dim=1)
        output_soft = get_soft_label(output_dis, args.num_classes)
        target_soft = get_soft_label(target, args.num_classes)            # get soft label

        val_loss = criterion(output, target_soft, args.num_classes)       # the dice losses
        val_losses.update(val_loss.data, image.size(0))

        isic = val_dice_isic(output_soft, target_soft, args.num_classes)  # the dice score
        val_isic_dice.update(isic.data, image.size(0))

        if step % (math.ceil(float(len(valid_loader.dataset)) / args.batch_size)) == 0 and args.local_rank == 0:
            print('Valid Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {losses.avg:.6f}'.format(
                epoch, step * len(image), len(valid_loader.dataset), 100. * step / len(valid_loader),
                losses=val_losses))
            logger.info('Valid Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {losses.avg:.6f}'.format(
                epoch, step * len(image), len(valid_loader.dataset), 100. * step / len(valid_loader),
                losses=val_losses))

    # print('The ISIC Mean Average Dice score: {isic.avg: .4f}; '
    #       'The Average Loss score: {loss.avg: .4f}'.format(
    #        isic=val_isic_dice, loss=val_losses))

    if val_losses.avg < min(minloss):
        minloss.append(val_losses.avg)
        print(minloss)
        modelname = args.ckpt + '/' + 'min_loss' + '_' + args.data + '_checkpoint.pth.tar'
        print('the best model will be saved at {}'.format(modelname))
        state = {'epoch': epoch, 'state_dict': model.state_dict(), 'opt_dict': optimizer.state_dict()}
        torch.save(state, modelname)

    return val_losses.avg, val_isic_dice.avg


def test_coronary(test_loader, model):

    Background_IOU = []
    Vessel_IOU = []
    ACC = []
    SE = []
    SP = []
    AUC = []


    model.eval()
    with torch.no_grad():
        for step, (img, lab, image_name) in enumerate(test_loader):

            print("step: ", step)
            image = img.float().cuda()
            # target = lab.float().cuda()

            # output, atten2_map, atten3_map = model(image)  # model output
            # begin_time = time()
            output = model(image)
            # end_time = time()
            # pred_time = end_time - begin_time


            # 计算各种指标
            output = output.cpu().detach().numpy()
            output_01 = np.argmax(output, 1)
            output_save = output_01.transpose(1, 2, 0)
            save_path = "/home/handewei/project/segment/CA-Net/result/pred_filtered/" + image_name[0]
            cv2.imwrite(save_path, output_save * 255)

            output_01 = output_01.reshape([-1])
            target_01 = lab.detach().numpy().astype(np.int64).reshape([-1])
            my_confusion = metrics.confusion_matrix(output_01, target_01).astype(np.float32)
            meanIU, Acc, Se, Sp, IU = calculate_Accuracy(my_confusion)

            Background_IOU.append(IU[0])
            Vessel_IOU.append(IU[1])
            ACC.append(Acc)
            SE.append(Se)
            SP.append(Sp)

            y_pred = output[:, 1, :, :]
            y_pred = y_pred.reshape([-1])

            Auc = roc_auc_score(target_01, y_pred)
            AUC.append(Auc)


            print('Acc: {:.4f} | Se: {:.4f} | Sp: {:.4f} | Auc: {:.4f} | Background_IOU: {:.4f}, vessel_IOU: {:.4f}'
                  .format(Acc, Se, Sp, Auc, IU[0], IU[1]))

    print('avg_ Acc: {:.4f} | Se: {:.4f} | Sp: {:.4f} | Auc: {:.4f} | Background_IOU: {:.4f} |  vessel_IOU: {:.4f}'
          .format(np.average(ACC), np.average(SE), np.average(SP), np.average(AUC),
                  np.average(Background_IOU), np.average(Vessel_IOU)))
    logger.info('avg_ Acc: {:.4f} | Se: {:.4f} | Sp: {:.4f} | Auc: {:.4f} | Background_IOU: {:.4f} |  vessel_IOU: {:.4f}'
                .format(np.average(ACC), np.average(SE), np.average(SP), np.average(AUC),
                        np.average(Background_IOU), np.average(Vessel_IOU)))


def main(args):
    minloss = [1.0]
    start_epoch = args.start_epoch

    # loading the dataset
    print('loading the {0},{1},{2} dataset ...'.format('train', 'validation', 'test'))
    trainset = coronary_dataset(dataset_folder=args.root_path, train_type='train', transform=coronary_transform)
    validset = coronary_dataset(dataset_folder=args.root_path, train_type='val', transform=coronary_transform)
    testset = coronary_dataset(dataset_folder=args.root_path, train_type='test', transform=coronary_transform)

    if distributed:
        train_sampler = DistributedSampler(trainset)
        val_sampler = DistributedSampler(validset)
        test_sampler = DistributedSampler(testset)
    else:
        train_sampler = None
        val_sampler = None
        test_sampler = None

    trainloader = Data.DataLoader(dataset=trainset, batch_size=args.batch_size, num_workers=16, pin_memory=True, sampler=train_sampler)
    validloader = Data.DataLoader(dataset=validset, batch_size=args.batch_size, num_workers=16, pin_memory=True, sampler=val_sampler)
    testloader = Data.DataLoader(dataset=testset, batch_size=args.batch_size, num_workers=16, pin_memory=True, sampler=test_sampler)
    print('Loading is done\n')


    model = Comprehensive_Atten_Unet(args, args.num_input, args.num_classes)

    # Define optimizers and loss function
    optimizer = torch.optim.Adam(model.parameters(),
                                 lr=args.lr_rate,
                                 weight_decay=args.weight_decay)  # optimize all model parameters

    if torch.cuda.is_available():
        print('We can use', torch.cuda.device_count(), 'GPUs to train the network')
        # model = model.to(device)
        model = model.cuda()


    if distributed:
        print("Let's use", torch.cuda.device_count(), "GPUs!")

        torch.cuda.set_device(args.local_rank)  # 指定代码运行的GPU号
        model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank], output_device=args.local_rank)

    # collect the number of parameters in the network
    print("------------------------------------------")
    print("Network Architecture of Model AttU_Net:")
    num_para = 0
    for name, param in model.named_parameters():
        num_mul = 1
        for x in param.size():
            num_mul *= x
        num_para += num_mul
    # print(model)
    print("Number of trainable parameters {0} in Model {1}".format(num_para, args.id))
    print("------------------------------------------")

    # criterion
    criterion = SoftDiceLoss()
    scheduler = StepLR(optimizer, step_size=256, gamma=0.5)

    # resume
    if args.resume:
        if os.path.isfile(args.resume):
            print("=> Loading checkpoint '{}'".format(args.resume))
            checkpoint = torch.load(args.resume)
            start_epoch = checkpoint['epoch']
            model.load_state_dict(checkpoint['state_dict'])
            optimizer.load_state_dict(checkpoint['opt_dict'])
            print("=> Loaded checkpoint (epoch {})".format(checkpoint['epoch']))
        else:
            print("=> No checkpoint found at '{}'".format(args.resume))

    print("Start training ...")
    for epoch in range(start_epoch + 1, args.epochs + 1):
        train_avg_loss = train(trainloader, model, criterion, optimizer, args, epoch)
        print("train_avg_loss: ", train_avg_loss)

        with torch.no_grad():
            val_avg_loss, val_coronary_dice = valid_coronary(validloader, model, criterion, optimizer, args, epoch, minloss)
            print("val_avg_loss, val_coronary_dice: ", val_avg_loss, val_coronary_dice)
            logger.info("val_avg_loss, val_coronary_dice: ", val_avg_loss, val_coronary_dice)

        scheduler.step()

        # save models
        if epoch > args.particular_epoch:
            if epoch % args.save_epochs_steps == 0:
                filename = args.ckpt + '/' + str(epoch) + '_' + args.data + '_checkpoint.pth.tar'
                print('the model will be saved at {}'.format(filename))
                state = {'epoch': epoch, 'state_dict': model.state_dict(), 'opt_dict': optimizer.state_dict()}
                torch.save(state, filename)


    print('Training Done! Start testing')
    with torch.no_grad():
        test_coronary(testloader, model, args)

    print('Testing Done!')
    logger.info('Testing Done!')


if __name__ == '__main__':
    assert LooseVersion(torch.__version__) >= LooseVersion('0.4.0'), 'PyTorch>=0.4.0 is required'

    parser = argparse.ArgumentParser(description='Comprehensive attention network for biomedical Dataset')
    # Model related arguments
    parser.add_argument('--id', default='Comp_Atten_Unet',
                        help='a name for identitying the model. Choose from the following options: Unet')

    # Path related arguments
    parser.add_argument('--root_path', default='./data',
                        help='root directory of data')
    parser.add_argument('--ckpt', default='./saved_models',
                        help='folder to output checkpoints')

    # optimization related arguments
    parser.add_argument('--epochs', type=int, default=80, metavar='N',
                        help='number of epochs to train (default: 10)')
    parser.add_argument('--start_epoch', default=0, type=int,
                        help='epoch to start training. useful if continue from a checkpoint')
    parser.add_argument('--batch_size', type=int, default=4, metavar='N',
                        help='input batch size for training (default: 12)')
    parser.add_argument('--lr_rate', type=float, default=1e-4, metavar='LR',
                        help='learning rate (default: 0.001)')
    parser.add_argument('--num_input', default=3, type=int,
                        help='number of input image for each patient')
    parser.add_argument('--num_classes', default=2, type=int,
                        help='number of classes')
    parser.add_argument('--weight_decay', default=1e-8, type=float, help='weights regularizer')
    parser.add_argument('--particular_epoch', default=10, type=int,
                        help='after this number, we will save models more frequently')
    parser.add_argument('--save_epochs_steps', default=10, type=int,
                        help='frequency to save models after a particular number of epochs')
    parser.add_argument('--resume', default='',
                        help='the checkpoint that resumes from')

    # other arguments
    parser.add_argument('--data', default='coronary', help='choose the dataset')
    parser.add_argument('--out_size', default=(512, 512), help='the output image size')

    # DDP
    parser.add_argument("--local_rank", type=int, default=0)

    args = parser.parse_args()

    # log
    logger = setup_logger("semantic_segmentation", "./log", 0, filename='{}_lr={}_{}_log.txt'.format(args.id, args.lr_rate, args.data))
    logger.info(args)

    if args.local_rank == 0:
        print("Input arguments:")
        for key, value in vars(args).items():
            print("{:16} {}".format(key, value))

        args.ckpt = os.path.join(args.ckpt, args.data, args.id)
        print('Models are saved at %s' % (args.ckpt))

        if not os.path.isdir(args.ckpt):
            os.makedirs(args.ckpt)

        if args.start_epoch > 1:
            args.resume = args.ckpt + '/' + str(args.start_epoch) + '_' + args.data + '_checkpoint.pth.tar'

    # DDP
    if distributed:
        # cudnn related setting
        cudnn.benchmark = True  # True 可增加运行速度
        cudnn.deterministic = True  # True 避免结果的随机波动
        cudnn.enabled = True
        device = torch.device('cuda:{}'.format(args.local_rank))

        dist.init_process_group(backend="nccl")

        print("args.local_rank", args.local_rank)

    main(args)
