# coding = utf-8

'''
验证res_unet的结果是否是可信的
'''

import json

import cv2
import nibabel as nib
import numpy as np
import torch
from pathlib2 import Path
from torch.utils.data import DataLoader, SequentialSampler
from tqdm import tqdm

import utils.checkpoint as cp
from dataset import KiTS19
from dataset.transform import MedicalTransform
from network import ResUNet
from utils.vis import imshow
import os


def calc(seg, idx):
    bincount = np.bincount(seg.flatten())
    area = int(bincount[idx])

    value = []
    for i in range(seg.shape[0]):
        value.append(seg[i].max())
    value = np.array(value)

    slice_ = np.where(value > idx - 1)[0]
    num_slice = len(slice_)
    min_z = int(slice_.min())
    max_z = int(slice_.max()) + 1
    min_x = min_y = 10000
    max_x = max_y = -1
    for i in range(min_z, max_z):
        if seg[i].max() > idx - 1:
            mask = np.ma.masked_where(seg[i] > idx - 1, seg[i]).mask
            rect = cv2.boundingRect(mask.astype(np.uint8))
            min_x = min(min_x, rect[0])
            min_y = min(min_y, rect[1])
            max_x = max(max_x, rect[0] + rect[2])
            max_y = max(max_y, rect[1] + rect[3])

    roi = {'min_x': min_x, 'min_y': min_y, 'min_z': min_z,
           'max_x': max_x, 'max_y': max_y, 'max_z': max_z,
           'area': area, 'slice': num_slice}

    return roi



def validdate_resnet(data_path, model_file, num_gpu, batch_size, num_workers):
    dataset = KiTS19(data_path, stack_num=5, spec_classes=[0, 1, 1], img_size=(512,512),
                    use_roi=False, train_transform=None, valid_transform=None)
    net = ResUNet(in_ch=dataset.img_channels, out_ch=dataset.num_classes, base_ch=64)

    #模型初始化
    data = {'net': net}
    cp_file = Path(model_file)
    cp.load_params(data, cp_file, device='cpu')
    gpu_ids = [i for i in range(num_gpu)]
    torch.cuda.empty_cache()
    net = torch.nn.DataParallel(net, device_ids=gpu_ids).cuda()

    net.eval()
    torch.set_grad_enabled(False)

    train_subset = dataset.train_dataset
    train_case_slice_indices = dataset.train_case_slice_indices
    valid_subset = dataset.valid_dataset
    valid_case_slice_indices = dataset.valid_case_slice_indices

    train_sampler = SequentialSampler(train_subset)
    valid_sampler = SequentialSampler(valid_subset)

    train_data_loader = DataLoader(train_subset, batch_size=batch_size, sampler=train_sampler,
                             num_workers=num_workers, pin_memory=True)
    valid_data_loader = DataLoader(valid_subset, batch_size=batch_size, sampler=valid_sampler,
                                   num_workers=num_workers, pin_memory=True)

    case = 0
    dice_value = []
    vol_label = []
    vol_output = []
    for batch_idx, data in enumerate(train_data_loader):
        imgs, labels, idx = data['image'].cuda(), data['label'], data['index']
        outputs = net(imgs)
        outputs = outputs.argmax(dim=1)

        labels = labels.cpu().detach().numpy()
        outputs = outputs.cpu().detach().numpy()
        idx = idx.numpy()

        vol_label.append(labels)
        vol_output.append(outputs)

        while case < len(train_case_slice_indices) - 1 and idx[-1] >= train_case_slice_indices[case + 1] - 1:
            vol_output = np.concatenate(vol_output, axis=0)
            vol_label = np.concatenate(vol_label, axis=0)
            vol_num_slice = train_case_slice_indices[case + 1] - train_case_slice_indices[case]

            total_output = vol_output[:vol_num_slice]
            total_label = vol_label[:vol_num_slice]
            dice = 2 * (total_output*total_label).sum() / (total_output.sum() + total_label.sum() + 0.0001)
            dice_value.append(dice)
            print(case, dice)

            vol_output = [vol_output[vol_num_slice:]]
            vol_label = [vol_label[vol_num_slice:]]
            case += 1

    case = 0
    vol_label = []
    vol_output = []
    for batch_idx, data in enumerate(valid_data_loader):
        imgs, labels, idx = data['image'].cuda(), data['label'], data['index']
        outputs = net(imgs)
        outputs = outputs.argmax(dim=1)

        labels = labels.cpu().detach().numpy()
        outputs = outputs.cpu().detach().numpy()
        idx = idx.numpy()

        vol_label.append(labels)
        vol_output.append(outputs)

        while case < len(valid_case_slice_indices) - 1 and idx[-1] >= valid_case_slice_indices[case + 1] - 1:
            vol_output = np.concatenate(vol_output, axis=0)
            vol_label = np.concatenate(vol_label, axis=0)
            vol_num_slice = valid_case_slice_indices[case + 1] - valid_case_slice_indices[case]

            total_output = vol_output[:vol_num_slice]
            total_label = vol_label[:vol_num_slice]
            dice = 2 * (total_output * total_label).sum() / (total_output.sum() + total_label.sum() + 0.0001)
            dice_value.append(dice)
            print(case, dice)

            vol_output = [vol_output[vol_num_slice:]]
            vol_label = [vol_label[vol_num_slice:]]
            case += 1

    print(dice_value)
    print(np.average(np.array(dice_value)))


def get_roi_from_resnet(data_path, model_file, num_gpu, batch_size, num_workers, roi_file):
    dataset = KiTS19(data_path, stack_num=5, spec_classes=[0, 1, 1], img_size=(512, 512),
                     use_roi=False, train_transform=None, valid_transform=None)
    net = ResUNet(in_ch=dataset.img_channels, out_ch=dataset.num_classes, base_ch=64)

    # 模型初始化
    data = {'net': net}
    cp_file = Path(model_file)
    cp.load_params(data, cp_file, device='cpu')
    gpu_ids = [i for i in range(num_gpu)]
    torch.cuda.empty_cache()
    net = torch.nn.DataParallel(net, device_ids=gpu_ids).cuda()

    net.eval()
    torch.set_grad_enabled(False)

    train_subset = dataset.train_dataset
    train_case_slice_indices = dataset.train_case_slice_indices
    valid_subset = dataset.valid_dataset
    valid_case_slice_indices = dataset.valid_case_slice_indices

    train_sampler = SequentialSampler(train_subset)
    valid_sampler = SequentialSampler(valid_subset)

    train_data_loader = DataLoader(train_subset, batch_size=batch_size, sampler=train_sampler,
                                   num_workers=num_workers, pin_memory=True)
    valid_data_loader = DataLoader(valid_subset, batch_size=batch_size, sampler=valid_sampler,
                                   num_workers=num_workers, pin_memory=True)

    case = 0
    rois = {}
    vol_label = []
    vol_output = []
    for batch_idx, data in enumerate(train_data_loader):
        imgs, labels, idx = data['image'].cuda(), data['label'], data['index']
        outputs = net(imgs)
        outputs = outputs.argmax(dim=1)

        labels = labels.cpu().detach().numpy()
        outputs = outputs.cpu().detach().numpy()
        idx = idx.numpy()

        vol_label.append(labels)
        vol_output.append(outputs)

        while case < len(train_case_slice_indices) - 1 and idx[-1] >= train_case_slice_indices[case + 1] - 1:
            vol_output = np.concatenate(vol_output, axis=0)
            vol_label = np.concatenate(vol_label, axis=0)
            vol_num_slice = train_case_slice_indices[case + 1] - train_case_slice_indices[case]

            total_output = vol_output[:vol_num_slice]
            total_label = vol_label[:vol_num_slice]

            predict_liver = calc(total_output, idx=1)
            gt_liver = calc(total_label, idx=1)

            total_z, total_y, total_x = total_label.shape
            vol = {'total_x': total_x, 'total_y': total_y, 'total_z': total_z}

            case_roi = {'kidney': predict_liver, "vol":vol}
            case_id = dataset.case_idx_to_case_id(case, 'train')
            rois[f'case_{case_id:05d}'] = case_roi
            with open(roi_file, 'w') as f:
                json.dump(rois, f, indent=4, separators=(',', ': '))


            print(case_id, gt_liver["min_x"]-predict_liver["min_x"], predict_liver["max_x"] - gt_liver["max_x"],
                  gt_liver["min_y"] - predict_liver["min_y"], predict_liver["max_y"] - gt_liver["max_y"])



            vol_output = [vol_output[vol_num_slice:]]
            vol_label = [vol_label[vol_num_slice:]]
            case += 1


    case = 0
    vol_label = []
    vol_output = []
    for batch_idx, data in enumerate(valid_data_loader):
        imgs, labels, idx = data['image'].cuda(), data['label'], data['index']
        outputs = net(imgs)
        outputs = outputs.argmax(dim=1)

        labels = labels.cpu().detach().numpy()
        outputs = outputs.cpu().detach().numpy()
        idx = idx.numpy()

        vol_label.append(labels)
        vol_output.append(outputs)

        while case < len(valid_case_slice_indices) - 1 and idx[-1] >= valid_case_slice_indices[case + 1] - 1:
            vol_output = np.concatenate(vol_output, axis=0)
            vol_label = np.concatenate(vol_label, axis=0)
            vol_num_slice = valid_case_slice_indices[case + 1] - valid_case_slice_indices[case]

            total_output = vol_output[:vol_num_slice]
            total_label = vol_label[:vol_num_slice]

            predict_liver = calc(total_output, idx=1)
            gt_liver = calc(total_label, idx=1)

            total_z, total_y, total_x = total_label.shape
            vol = {'total_x': total_x, 'total_y': total_y, 'total_z': total_z}

            case_roi = {'kidney': predict_liver, "vol":vol}
            case_id = dataset.case_idx_to_case_id(case, 'valid')
            rois[f'case_{case_id:05d}'] = case_roi
            with open(roi_file, 'w') as f:
                json.dump(rois, f, indent=4, separators=(',', ': '))

            print(case_id, gt_liver["min_x"] - predict_liver["min_x"], predict_liver["max_x"] - gt_liver["max_x"],
                  gt_liver["min_y"] - predict_liver["min_y"], predict_liver["max_y"] - gt_liver["max_y"])

            vol_output = [vol_output[vol_num_slice:]]
            vol_label = [vol_label[vol_num_slice:]]
            case += 1


def validdate_resunet_to_numpy(data_path, model_file, num_gpu, batch_size, num_workers, output_path):
    dataset = KiTS19(data_path, stack_num=5, spec_classes=[0, 1, 1], img_size=(512, 512),
                     use_roi=False, train_transform=None, valid_transform=None)
    net = ResUNet(in_ch=dataset.img_channels, out_ch=dataset.num_classes, base_ch=64)

    # 模型初始化
    data = {'net': net}
    cp_file = Path(model_file)
    cp.load_params(data, cp_file, device='cpu')
    gpu_ids = [i for i in range(num_gpu)]
    torch.cuda.empty_cache()
    net = torch.nn.DataParallel(net, device_ids=gpu_ids).cuda()

    net.eval()
    torch.set_grad_enabled(False)

    train_subset = dataset.train_dataset
    train_case_slice_indices = dataset.train_case_slice_indices
    valid_subset = dataset.valid_dataset
    valid_case_slice_indices = dataset.valid_case_slice_indices

    train_sampler = SequentialSampler(train_subset)
    valid_sampler = SequentialSampler(valid_subset)

    train_data_loader = DataLoader(train_subset, batch_size=batch_size, sampler=train_sampler,
                                   num_workers=num_workers, pin_memory=True)
    valid_data_loader = DataLoader(valid_subset, batch_size=batch_size, sampler=valid_sampler,
                                   num_workers=num_workers, pin_memory=True)

    case = 0
    dice_value = []
    vol_label = []
    vol_output = []
    for batch_idx, data in enumerate(train_data_loader):
        imgs, labels, idx = data['image'].cuda(), data['label'], data['index']
        outputs = net(imgs)
        outputs = outputs.argmax(dim=1)

        labels = labels.cpu().detach().numpy()
        outputs = outputs.cpu().detach().numpy()
        idx = idx.numpy()

        vol_label.append(labels)
        vol_output.append(outputs)

        while case < len(train_case_slice_indices) - 1 and idx[-1] >= train_case_slice_indices[case + 1] - 1:
            vol_output = np.concatenate(vol_output, axis=0)
            vol_label = np.concatenate(vol_label, axis=0)
            vol_num_slice = train_case_slice_indices[case + 1] - train_case_slice_indices[case]

            total_output = vol_output[:vol_num_slice]
            total_label = vol_label[:vol_num_slice]

            case_id = dataset.case_idx_to_case_id(case, type='train')
            output_file = Path(output_path) / "predict_{}.npy".format(str(case_id).zfill(5))
            np.save(output_file, total_output)

            dice = 2 * (total_output * total_label).sum() / (total_output.sum() + total_label.sum() + 0.0001)
            dice_value.append(dice)
            print(case, dice)

            vol_output = [vol_output[vol_num_slice:]]
            vol_label = [vol_label[vol_num_slice:]]
            case += 1

    case = 0
    vol_label = []
    vol_output = []
    for batch_idx, data in enumerate(valid_data_loader):
        imgs, labels, idx = data['image'].cuda(), data['label'], data['index']
        outputs = net(imgs)
        outputs = outputs.argmax(dim=1)

        labels = labels.cpu().detach().numpy()
        outputs = outputs.cpu().detach().numpy()
        idx = idx.numpy()

        vol_label.append(labels)
        vol_output.append(outputs)

        while case < len(valid_case_slice_indices) - 1 and idx[-1] >= valid_case_slice_indices[case + 1] - 1:
            vol_output = np.concatenate(vol_output, axis=0)
            vol_label = np.concatenate(vol_label, axis=0)
            vol_num_slice = valid_case_slice_indices[case + 1] - valid_case_slice_indices[case]

            total_output = vol_output[:vol_num_slice]
            total_label = vol_label[:vol_num_slice]

            case_id = dataset.case_idx_to_case_id(case, type='valid')
            output_file = Path(output_path) / "predict_{}.npy".format(str(case_id).zfill(5))
            np.save(output_file, total_output)

            dice = 2 * (total_output * total_label).sum() / (total_output.sum() + total_label.sum() + 0.0001)
            dice_value.append(dice)
            print(case, dice)

            vol_output = [vol_output[vol_num_slice:]]
            vol_label = [vol_label[vol_num_slice:]]
            case += 1

    print(dice_value)
    print(np.average(np.array(dice_value)))



def validate_dice():
    origion_data_path = "/datasets/3Dircadb/origion"
    predict_data_path = "/home/diaozhaoshuo/log/BeliefFunctionNN/3diradb_chengkung/output"

    for i in [2, 6, 14, 16, 19]:
        origion_mask_file = os.path.join(origion_data_path, "case_{}/segment.npy".format(str(i).zfill(5)))
        predict_mask_file = os.path.join(predict_data_path, "predict_{}.npy".format(str(i).zfill(5)))

        mask = np.load(origion_mask_file)
        mask[mask>=1] = 1
        predict = np.load(predict_mask_file)

        dice = 2 * (mask*predict).sum() / (mask.sum() + predict.sum())
        print(dice)




if __name__ == '__main__':
    '''
    validdate_resunet_to_numpy(data_path="/datasets/3Dircadb/chengkung",
                     model_file="/home/diaozhaoshuo/log/BeliefFunctionNN/3diradb_chengkung/checkpoint/best.pth",
                     num_gpu=1, batch_size=3, num_workers=1,
                               output_path="/home/diaozhaoshuo/log/BeliefFunctionNN/3diradb_chengkung/output")
    '''

    validate_dice()