# -*- coding:utf-8 -*-
import argparse
import glob
import json
import os
import logging
import numpy as np

import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim
from six.moves import xrange
import nibabel as nib


parser = argparse.ArgumentParser(description='PyTorch Thorax Testing')
parser.add_argument('--resume', default='', type=str, metavar='PATH',
                    help='path to latest checkpoint (default: none)')
parser.add_argument('-b', '--batch-size', default=10, type=int,
                    metavar='N', help='mini-batch size (default: 10)')

best_loss = float('inf')

log_file = os.path.join("test_log.txt")
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(message)s', filename=log_file)

console = logging.StreamHandler()
console.setLevel(logging.INFO)
console.setFormatter(logging.Formatter('%(asctime)s %(message)s'))
logging.getLogger('').addHandler(console)


def main_test():
    global args, best_loss
    args = parser.parse_args()
    # os.environ['CUDA_VISIBLE_DEVICES'] = '2'

    # optionally resume from a checkpoint
    if args.resume:
        if os.path.isfile(args.resume):
            print("=> loading checkpoint '{}'".format(args.resume))
            model = torch.load(args.resume)
        else:
            print("=> no checkpoint found at '{}'".format(args.resume))
    model = torch.load('model_epoch_10.pkl', map_location=lambda storage, loc: storage)

    model = model.cuda()
    model = nn.DataParallel(model, device_ids=[0])
    cudnn.benchmark = True

    ckpts = 'ckpts'
    if not os.path.exists(ckpts): os.makedirs(ckpts)

    dice = []
    acc = []

    jsonPaths = glob.glob('/home/maoshunyi/train_images/maskrcnn/seg_images/new_lung_image/axis/val_json/*.json')

    for i in range(len(jsonPaths)):
        all_location = []
        i_jsonPath = jsonPaths[i]
        print(i_jsonPath)

        (fileDir, tempFileName) = os.path.split(i_jsonPath)
        (dirName, extension1) = os.path.splitext(tempFileName)

        tiffDir = os.path.abspath('.') + '/newTestData2D/' + dirName
        print(tiffDir)
        niiPaths = glob.glob(tiffDir + '/*/img.nii.gz')
        targetPaths = glob.glob(tiffDir + '/*/gt.nii.gz')
       
        predProbMapsPerClass = []
        gt = []
             
        logging.info('-----time -----')

        for j in range(len(niiPaths)):
            niiImg = nib.load(niiPaths[j])
            niiTarget = nib.load(targetPaths[j])
            x = niiImg.get_data()
            label = niiTarget.get_data()
            
            imgPath = niiPaths[j]

            (imgsDir, tempImgName) = os.path.split(imgPath)
            tiffName = imgsDir.split('/')[-1]
            location = tiffName.split('_')[0]

            all_location.append(float(location))
            x = x[np.newaxis, :]
            output = test(x, model)
            output = output.data.cpu().numpy()
            predProbMapsPerClass.append(output)
            gt.append(label)

        index = list(range(len(all_location)))
        index.sort(key=lambda s: -all_location[s])
        predProbMapsPerClass = np.array(predProbMapsPerClass)
        predProbMapsPerClass = predProbMapsPerClass.reshape(predProbMapsPerClass.shape[0], predProbMapsPerClass.shape[2], predProbMapsPerClass.shape[3], predProbMapsPerClass.shape[4])

        predSegmentation = np.argmax(predProbMapsPerClass, axis=1)
        predSegmentation = predSegmentation[index]
        
        gt = np.array(gt)
        gt = gt[index]
        
        seg = np.zeros(predSegmentation.shape, dtype=np.int16)
        seg[predSegmentation == 1] = 1
        seg[predSegmentation == 2] = 2
        seg[predSegmentation == 3] = 3
        seg[predSegmentation == 4] = 4
        seg[predSegmentation == 5] = 5
        print(predSegmentation.shape)

        dice_i = dice_evaluation(predSegmentation, gt)
        acc_i = acc_evaluation(predSegmentation, gt)

        dice.append(dice_i)
        acc.append(acc_i)

        dir = 'testResult'
        if not os.path.exists(dir): os.makedirs(dir)

        logging.info('--------------ACC: %f --------------------------' % acc_i)
        logging.info('--------------Dice 0: %f --------------------------' %dice_i[0])
        logging.info('--------------Dice 1: %f --------------------------' % dice_i[1])
        logging.info('--------------Dice 2: %f --------------------------' % dice_i[2])
        logging.info('--------------Dice 3: %f --------------------------' % dice_i[3])
        logging.info('--------------Dice 4: %f --------------------------' % dice_i[4])
        logging.info('--------------Dice 5: %f --------------------------' % dice_i[5])
        logging.info('--------------saving segments %i ----------------' % (i))

        savefile_name = tiffDir.replace('TestData2D', 'testResult')+'/result.nii.gz'
        savefile_name2 = tiffDir.replace('TestData2D', 'testResult')+'/gt.nii.gz'

        (imgsDir, tempImgName) = os.path.split(savefile_name)
        if not os.path.exists(imgsDir): os.makedirs(imgsDir)
        img = nib.Nifti1Image(seg, np.eye(4))
        nib.save(img, savefile_name)
        gtSave = nib.Nifti1Image(gt, np.eye(4))        
        nib.save(gtSave, savefile_name2)

    mean_dice = np.mean(dice,axis=0)
    mean_acc = np.mean(acc, axis=0)

    logging.info('--------------Total ACC: %f ------------------------' % mean_acc)
    logging.info('--------------Total Dice 0: %f ------------------------' %mean_dice[0])
    logging.info('--------------Total Dice 1: %f ------------------------' %mean_dice[1])
    logging.info('--------------Total Dice 2: %f ------------------------' %mean_dice[2])
    logging.info('--------------Total Dice 3: %f ------------------------' %mean_dice[3])
    logging.info('--------------Total Dice 4: %f ------------------------' %mean_dice[4])
    logging.info('--------------Total Dice 5: %f ------------------------' %mean_dice[5])


def test(x,model):

    model.eval()

    x =torch.from_numpy(np.array(x))
    x1 = torch.autograd.Variable(x.cuda())

    output = model(x1)
    softmax = torch.nn.Softmax()
    output = output.view(-1, 6, 512 ** 2).permute(0, 2, 1).contiguous()
    output = output.view(-1, 6)

    output = softmax(output)
    output = output.view(-1, 512 ** 2, 6).permute(0, 2, 1).contiguous()
    output = output.view(-1, 6, 512, 512)
    return output


def dice_evaluation(seg, gt):
    dice = np.ndarray(6)
    for label_i in xrange(6):
        count = 0
        s1 = seg
        s2 = gt
        for i in xrange(gt.shape[0]):
            for j in xrange(gt.shape[1]):
                for k in xrange(gt.shape[2]):
                    if(s1[i, j, k] == label_i and s2[i, j, k] == label_i):
                        count = count + 1
        print("Correct voxels: ", count)
        total = len(s1[s1 == label_i]) + len(s2[s2 == label_i])
        print("Predicted voxels: ", len(s1[s1 == label_i]))
        print("True voxels: ", len(s2[s2 == label_i]))
        if(total == 0):
            if(count == 0):
                dice[label_i] = 1
            else:
                dice[label_i] = 0
        else:
            dice[label_i] = (2 * count) / float(total)
        print("Dice", label_i ,": ", dice[label_i])
        print('\n')
    return dice



def acc_evaluation(seg, gt):
    s1 = np.array(seg)
    s2 = np.array(gt)
    correct_0 = sum(sum(sum((s1+s2) == 0)))
    correct_num = sum(sum(sum(s1 == s2)))
    acc = (correct_num-correct_0)/(s1.size - sum(sum(sum(s2 == 0))))
    return acc


if __name__ == '__main__':
    main_test()

