# coding = utf-8

'''
验证带有roi的unet结果
'''

import sys
import cc3d

import click
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from pathlib2 import Path
from tensorboardX import SummaryWriter
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler
from tqdm import tqdm

import utils.checkpoint as cp
from dataset import KiTS19
from dataset.transform import MedicalTransform
from loss import GeneralizedDiceLoss
from loss.util import class2one_hot
from network.unet import UNet
from utils.metrics import Evaluator
from utils.vis import imshow
import cv2


def post_processing(vol):
    vol_ = vol.copy()
    vol_[vol_ > 0] = 1
    vol_cc = cc3d.connected_components(vol_)
    cc_sum = [(i, vol_cc[vol_cc == i].shape[0]) for i in range(vol_cc.max() + 1)]
    cc_sum.sort(key=lambda x: x[1], reverse=True)
    cc_sum.pop(0)  # remove background
    reduce_cc = [cc_sum[i][0] for i in range(1, len(cc_sum)) if cc_sum[i][1] < cc_sum[0][1] * 0.1]
    for i in reduce_cc:
        vol[vol_cc == i] = 0

    return vol

def reverse_transform(vol, roi, dataset, transform):
    min_x = max(0, roi['kidney']['min_x'] - transform.roi_error_range)
    max_x = min(vol.shape[-1], roi['kidney']['max_x'] + transform.roi_error_range)
    min_y = max(0, roi['kidney']['min_y'] - transform.roi_error_range)
    max_y = min(vol.shape[-2], roi['kidney']['max_y'] + transform.roi_error_range)
    min_z = max(0, roi['kidney']['min_z'] - dataset.roi_error_range)
    max_z = min(roi['vol']['total_z'], roi['kidney']['max_z'] + dataset.roi_error_range)

    min_height = roi['vol']['total_y']
    min_width = roi['vol']['total_x']

    roi_rows = max_y - min_y
    roi_cols = max_x - min_x
    max_size = max(transform.output_size[0], transform.output_size[1])
    scale = max_size / float(max(roi_cols, roi_rows))
    rows = int(roi_rows * scale)
    cols = int(roi_cols * scale)

    if rows < min_height:
        h_pad_top = int((min_height - rows) / 2.0)
        h_pad_bottom = rows + h_pad_top
    else:
        h_pad_top = 0
        h_pad_bottom = min_height

    if cols < min_width:
        w_pad_left = int((min_width - cols) / 2.0)
        w_pad_right = cols + w_pad_left
    else:
        w_pad_left = 0
        w_pad_right = min_width

    for i in range(len(vol)):
        img = vol[i]
        reverse_padding_img = img[h_pad_top:h_pad_bottom, w_pad_left:w_pad_right]
        reverse_padding_img = reverse_padding_img.astype(np.uint8)
        reverse_resize_img = cv2.resize(reverse_padding_img, dsize=(max_x - min_x, max_y - min_y),
                                        interpolation=cv2.INTER_LINEAR)
        reverse_resize_img = reverse_resize_img.astype(np.int64)
        reverse_img = np.zeros((min_height, min_width))
        reverse_img[min_y:max_y, min_x: max_x] = reverse_resize_img
        vol[i] = reverse_img

    size = (1, min_height, min_width)
    vol_min_z = [np.zeros(size) for _ in range(0, min_z)]
    vol_max_z = [np.zeros(size) for _ in range(max_z, roi['vol']['total_z'])]

    vol = vol_min_z + [vol] + vol_max_z
    vol = np.concatenate(vol, axis=0)

    assert vol.shape == (roi['vol']['total_z'], roi['vol']['total_y'], roi['vol']['total_x'])

    return vol

def evaluation(net, dataset, batch_size, num_workers, type, transform):
    type = type.lower()
    if type == 'train':
        subset = dataset.train_dataset
        case_slice_indices = dataset.train_case_slice_indices
    elif type == 'valid':
        subset = dataset.valid_dataset
        case_slice_indices = dataset.valid_case_slice_indices

    sampler = SequentialSampler(subset)
    data_loader = DataLoader(subset, batch_size=batch_size, sampler=sampler,
                             num_workers=num_workers, pin_memory=True)
    evaluator = Evaluator(dataset.num_classes)

    case = 0
    vol_label = []
    vol_output = []
    vol_images = []
    result_map = {}

    dice1_all = []
    dice2_all = []

    with tqdm(total=len(case_slice_indices) - 1, ascii=True, desc=f'eval/{type:5}', dynamic_ncols=True) as pbar:
        for batch_idx, data in enumerate(data_loader):
            imgs, labels, idx = data['image'].cuda(), data['label'], data['index']

            outputs = net(imgs)
            outputs = outputs.argmax(dim=1)

            labels = labels.cpu().detach().numpy()
            outputs = outputs.cpu().detach().numpy()
            imgs = imgs.cpu().detach().numpy()
            idx = idx.numpy()




            vol_label.append(labels)
            vol_output.append(outputs)
            vol_images.append(imgs)

            while case < len(case_slice_indices) - 1 and idx[-1] >= case_slice_indices[case + 1] - 1:
                vol_output = np.concatenate(vol_output, axis=0)
                vol_label = np.concatenate(vol_label, axis=0)
                vol_images = np.concatenate(vol_images, axis=0)

                vol_num_slice = case_slice_indices[case + 1] - case_slice_indices[case]
                evaluator.add(vol_output[:vol_num_slice], vol_label[:vol_num_slice])

                output_case = vol_output[:vol_num_slice]
                label_case = vol_label[:vol_num_slice]
                images_case = vol_images[:vol_num_slice]

                #roi = dataset.get_roi(case, type=type)
                #vol = output_case.copy()
                #vol = reverse_transform(vol, roi, dataset, transform)
                #vol = vol.astype(np.uint8)
                output_case = post_processing(output_case)
                #print(vol.shape, output_case.shape)

                ####统计一些per_case的指标

                output_case1 = np.zeros(output_case.shape)
                label_case1 = np.zeros(label_case.shape)
                output_case1[output_case == 1] = 1
                output_case1[output_case == 2] = 1
                label_case1[label_case == 1] = 1
                label_case1[label_case == 2] = 1
                dice1 = 2 * (output_case1*label_case1).sum() / (output_case1.sum() + label_case1.sum())
                precision1 = (output_case1[label_case1 == 1] == 1).sum() / (output_case1 == 1).sum()
                recall1 = (output_case1[label_case1 == 1] == 1).sum() / (label_case1 == 1).sum()

                output_case2 = np.zeros(output_case.shape)
                label_case2 = np.zeros(label_case.shape)
                output_case2[output_case == 2] = 1
                label_case2[label_case == 2] = 1
                dice2 = 2 * (output_case2 * label_case2).sum() / (output_case2.sum() + label_case2.sum())
                precision2 = (output_case2[label_case2 == 1] == 1).sum() / (output_case2 == 1).sum()
                recall2 = (output_case2[label_case2 == 1] == 1).sum() / (label_case2 == 1).sum()

                result_map[label_case2.sum()] = [case+35,dice1, dice2, precision1, recall1,
                                                 precision2, recall2]

                dice1_all.append(dice1)
                dice2_all.append(dice2)



                vol_output = [vol_output[vol_num_slice:]]
                vol_label = [vol_label[vol_num_slice:]]
                vol_images = [vol_images[vol_num_slice:]]

                case += 1
                pbar.update(1)

    acc = evaluator.eval()


    for k in sorted(list(acc.keys())):
        if k == 'dc_each_case': continue
        print(f'{type}/{k}: {acc[k]:.5f}')


    tumor_dice_100w = []
    liver_dice_100w = []
    tumor_dice_10w = []
    liver_dice_10w = []
    tumor_dice_1w = []
    liver_dice_1w = []
    tumor_dice_small = []
    liver_dice_small = []


    for key in sorted(result_map.keys(), reverse=True):
        print(key, result_map[key])
        tumor_dice = result_map[key][2]
        liver_dice = result_map[key][1]
        if key >= 1000000:
            tumor_dice_100w.append(tumor_dice)
            liver_dice_100w.append(liver_dice)
        elif key >= 100000:
            tumor_dice_10w.append(tumor_dice)
            liver_dice_10w.append(liver_dice)
        elif key >= 10000:
            tumor_dice_1w.append(tumor_dice)
            liver_dice_1w.append(liver_dice)
        else:
            tumor_dice_small.append(tumor_dice)
            liver_dice_small.append(liver_dice)

    '''
    print(np.mean(np.array(tumor_dice_100w)), np.mean(np.array(liver_dice_100w)),
          np.mean(np.array(tumor_dice_10w)), np.mean(np.array(liver_dice_10w)),
          np.mean(np.array(tumor_dice_1w)), np.mean(np.array(liver_dice_1w)),
          np.mean(np.array(tumor_dice_small)), np.mean(np.array(liver_dice_small)))
    '''


    print(np.mean(np.array(dice1_all)), np.mean(np.array(dice2_all)))
    # score = (acc['dc_per_case_1'] + acc["dc_per_case_2"]) / 2
    # return score


def main():
    roi_error_range = 15
    transform = MedicalTransform(output_size=(512, 512), roi_error_range=roi_error_range, use_roi=True)
    dataset = KiTS19("/datasets/3Dircadb/chengkung", stack_num=0, spec_classes=[0, 1, 2], img_size=(512,512),
                     use_roi=True, roi_file="roi.json", roi_error_range=5,
                     train_transform=transform, valid_transform=transform)
    net = UNet(input_channel=dataset.img_channels, n_class=dataset.num_classes)

    data = {'net': net}
    cp_file = Path("/home/diaozhaoshuo/log/BeliefFunctionNN/chengkung/3dircad/unet/roi_1e_4/best.pth")
    cp.load_params(data, cp_file, device='cpu')
    model_01 = net.cuda()

    net.eval()
    torch.set_grad_enabled(False)
    transform.eval()

    evaluation(model_01, dataset, 3, 1, type='valid', transform=transform)




if __name__ == '__main__':
    main()