# coding = utf-8

'''
validate data for paper
'''

import sys

import click
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from pathlib2 import Path
from tensorboardX import SummaryWriter
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler
from tqdm import tqdm

import utils.checkpoint as cp
from LIST17.dataset import KiTS19
from dataset.transform import CropTransform
from loss import GeneralizedDiceLoss
from loss.util import class2one_hot
from network import MUNet
from utils.metrics import Evaluator
from utils import Metirc
from utils.vis import imshow
from post_processing import select_tumor,select_tumor_densecrf
import matplotlib.pyplot as plt
import os
import cv2
import SimpleITK as sitk

def draw(case_id, output_case, output_case1, output_case2, label_case, label_case1, label_case2, images_case):
    origion_path = Path(
        "/media/dl/Lancer/predict/gabor_class_instance8")
    case_path = origion_path / "case_{}".format(str(case_id).zfill(5))
    label_liver_path = case_path / "label_liver"
    label_tumor_path = case_path / "label_tumor"
    predict_liver_path = case_path / "predict_liver"
    predict_tumor_path = case_path / "predict_tumor"
    fusion_path = case_path / "fusion"
    predict_fusion_path = case_path / "fusion_predict"
    # image_path = case_path/ "image"

    if not label_liver_path.exists():
        label_liver_path.mkdir(parents=True)
    if not label_tumor_path.exists():
        label_tumor_path.mkdir(parents=True)
    if not predict_liver_path.exists():
        predict_liver_path.mkdir(parents=True)
    if not predict_tumor_path.exists():
        predict_tumor_path.mkdir(parents=True)
    if not fusion_path.exists():
        fusion_path.mkdir(parents=True)
    if not predict_fusion_path.exists():
        predict_fusion_path.mkdir(parents=True)

    for i in range(output_case.shape[0]):
        if np.max(output_case[i]) == 0:
            continue
        if np.max(output_case[i]) >= 1:
            plt.imsave(os.path.join(str(predict_liver_path), "{}.png".format(str(i).zfill(3))), output_case1[i],
                       cmap="gray")
        if np.max(output_case[i]) >= 2:
            plt.imsave(os.path.join(str(predict_tumor_path), "{}.png".format(str(i).zfill(3))),
                       output_case2[i],
                       cmap="gray")

    for i in range(label_case.shape[0]):
        if np.max(label_case[i]) == 0:
            continue
        if np.max(label_case[i]) >= 1:
            plt.imsave(os.path.join(str(label_liver_path), "{}.png".format(str(i).zfill(3))), label_case1[i],
                       cmap="gray")
        if np.max(label_case[i]) >= 2:
            plt.imsave(os.path.join(str(label_tumor_path), "{}.png".format(str(i).zfill(3))),
                       label_case2[i],
                       cmap="gray")

    for i in range(images_case.shape[0]):
        image = images_case[i][1] * 255
        image = image.astype(np.uint8)
        image = cv2.cvtColor(image, cv2.COLOR_GRAY2BGR)
        label = label_case[i]

        if np.max(label) == 0:
            continue

        # plt.imsave(os.path.join(str(image_path), "{}.png".format(str(i).zfill(3))), image)

        if 1 in np.unique(label):
            label_copy = np.zeros(label.shape)
            label_copy[label == 1] = 1
            label_copy[label == 2] = 1
            label_copy = label_copy * 255
            label_copy = label_copy.astype(np.uint8)
            contours, _ = cv2.findContours(label_copy, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
            for counter in contours:
                data_list = []
                for t in range(counter.shape[0]):
                    j = counter[t][0]
                    data_list.append(j)
                cv2.polylines(image, np.array([data_list], np.int32), True, [0, 255, 0], thickness=1)

        if 2 in np.unique(label):
            label_copy = np.zeros(label.shape)
            label_copy[label == 2] = 1
            label_copy = label_copy * 255
            label_copy = label_copy.astype(np.uint8)
            contours, _ = cv2.findContours(label_copy, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
            for counter in contours:
                data_list = []
                for t in range(counter.shape[0]):
                    j = counter[t][0]
                    data_list.append(j)
                cv2.polylines(image, np.array([data_list], np.int32), True, [255, 0, 0], thickness=1)

        plt.imsave(os.path.join(str(fusion_path), "{}.png".format(str(i).zfill(3))), image)

    for i in range(images_case.shape[0]):
        image = images_case[i][1] * 255
        image = image.astype(np.uint8)
        image = cv2.cvtColor(image, cv2.COLOR_GRAY2BGR)
        label = output_case[i]

        if np.max(label) == 0:
            continue

        # plt.imsave(os.path.join(str(image_path), "{}.png".format(str(i).zfill(3))), image)

        if 1 in np.unique(label):
            label_copy = np.zeros(label.shape)
            label_copy[label == 1] = 1
            label_copy[label == 2] = 1
            label_copy = label_copy * 255
            label_copy = label_copy.astype(np.uint8)
            contours, _ = cv2.findContours(label_copy, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
            for counter in contours:
                data_list = []
                for t in range(counter.shape[0]):
                    j = counter[t][0]
                    data_list.append(j)
                cv2.polylines(image, np.array([data_list], np.int32), True, [0, 255, 0], thickness=1)

        if 2 in np.unique(label):
            label_copy = np.zeros(label.shape)
            label_copy[label == 2] = 1
            label_copy = label_copy * 255
            label_copy = label_copy.astype(np.uint8)
            contours, _ = cv2.findContours(label_copy, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
            for counter in contours:
                data_list = []
                for t in range(counter.shape[0]):
                    j = counter[t][0]
                    data_list.append(j)
                cv2.polylines(image, np.array([data_list], np.int32), True, [255, 0, 0], thickness=1)

        plt.imsave(os.path.join(str(predict_fusion_path), "{}.png".format(str(i).zfill(3))), image)

def save_numpy(case_id,output_case, prob_case):
    origion_path = Path(
        "/media/dl/Lancer/predict/gabor_class_instance8")
    case_path = origion_path / "case_{}".format(str(case_id).zfill(5))
    predict_numpy = case_path / "predict_numpy"
    prob_numpy = case_path/"prob_numpy"


    if not predict_numpy.exists():
        predict_numpy.mkdir(parents=True)
    if not prob_numpy.exists():
        prob_numpy.mkdir(parents=True)


    for i in range(prob_case.shape[0]):
        np.save(os.path.join(str(prob_numpy), "{}.png".format(str(i).zfill(3))), prob_case[i])

    for i in range(output_case.shape[0]):
        np.save(os.path.join(str(predict_numpy), "{}.png".format(str(i).zfill(3))), output_case[i])

def get_voxel_spacing(case_id):
    data_path = "/datasets/qiye/DongBeiDaXue2/image_venous"
    file_list = sorted(os.listdir(data_path))
    file_name = os.path.join(data_path, file_list[case_id-50])
    data = sitk.ReadImage(file_name)
    data = np.array(data.GetSpacing())
    return data


def evaluation(net, dataset, batch_size, num_workers, type):
    type = type.lower()
    if type == 'train':
        subset = dataset.train_dataset
        case_slice_indices = dataset.train_case_slice_indices
    elif type == 'valid':
        subset = dataset.valid_dataset
        case_slice_indices = dataset.valid_case_slice_indices

    sampler = SequentialSampler(subset)
    data_loader = DataLoader(subset, batch_size=batch_size, sampler=sampler,
                             num_workers=num_workers, pin_memory=True)
    evaluator = Evaluator(4)


    case = 0
    vol_label = []
    vol_output = []
    vol_images = []
    vol_prob = []
    result_map = {}

    dice1_all = []
    dice2_all = []
    recall1_all = []
    precision1_all = []
    recall2_all = []
    precision2_all = []
    voe_all = []
    rvd_all = []
    assd_all = []
    rmsd_all = []


    with tqdm(total=len(case_slice_indices) - 1, ascii=True, desc=f'eval/{type:5}', dynamic_ncols=True) as pbar:
        for batch_idx, data in enumerate(data_loader):
            imgs, labels, idx = data['image'].cuda(), data['label'].cuda(), data['index']

            outputs = net(imgs)
            #outputs = outputs["output"]
            outputs = F.softmax(outputs, dim=1)
            prob = outputs[:, 2, :, :]
            outputs = outputs.argmax(dim=1)

            labels = labels.cpu().detach().numpy()
            outputs = outputs.cpu().detach().numpy()
            imgs = imgs.cpu().detach().numpy()
            prob = prob.cpu().detach().numpy()
            idx = idx.numpy()




            vol_label.append(labels)
            vol_output.append(outputs)
            vol_images.append(imgs)
            vol_prob.append(prob)

            while case < len(case_slice_indices) - 1 and idx[-1] >= case_slice_indices[case + 1] - 1:
                vol_output = np.concatenate(vol_output, axis=0)
                vol_label = np.concatenate(vol_label, axis=0)
                vol_images = np.concatenate(vol_images, axis=0)
                vol_prob = np.concatenate(vol_prob, axis=0)

                vol_num_slice = case_slice_indices[case + 1] - case_slice_indices[case]
                evaluator.add(vol_output[:vol_num_slice], vol_label[:vol_num_slice])

                output_case = vol_output[:vol_num_slice]
                label_case = vol_label[:vol_num_slice]
                images_case = vol_images[:vol_num_slice]
                prob_case = vol_prob[:vol_num_slice]
                output_case = select_tumor(output_case, label_case, prob_case)

                ####统计一些per_case的指标
                case_id = dataset.case_idx_to_case_id(case, type=type)
                output_case1 = np.zeros(output_case.shape)
                label_case1 = np.zeros(label_case.shape)
                output_case1[output_case == 1] = 1
                output_case1[output_case == 2] = 1
                output_case1[output_case == 3] = 1
                label_case1[label_case == 1] = 1
                label_case1[label_case == 2] = 1
                label_case[label_case == 3] = 1
                dice1 = 2 * (output_case1 * label_case1).sum() / (output_case1.sum() + label_case1.sum())
                precision1 = (output_case1[label_case1 == 1] == 1).sum() / (output_case1 == 1).sum()
                recall1 = (output_case1[label_case1 == 1] == 1).sum() / (label_case1 == 1).sum()

                voe =  1- ((output_case1 * label_case1).sum() / (output_case1.sum() + label_case1.sum() - (output_case1 * label_case1).sum()))
                rvd = (output_case1.sum() / label_case1.sum()) - 1

                label_case1 = label_case1.astype(np.uint8)
                output_case1 = output_case1.astype(np.uint8)
                #spacing = get_voxel_spacing(case_id)
                #metrix = Metirc(real_mask=label_case1, pred_mask=output_case1, voxel_spacing=spacing)
                ##assd_all.append(metrix.get_ASSD())
                #rmsd_all.append(metrix.get_RMSD())


                output_case2 = np.zeros(output_case.shape)
                label_case2 = np.zeros(label_case.shape)
                output_case2[output_case == 2] = 1
                label_case2[label_case == 2] = 1
                dice2 = 2 * (output_case2 * label_case2).sum() / (output_case2.sum() + label_case2.sum())
                precision2 = (output_case2[label_case2 == 1] == 1).sum() / ((output_case2 == 1).sum() + 0.00001)
                recall2 = (output_case2[label_case2 == 1] == 1).sum() / (label_case2 == 1).sum()

                result_map[label_case2.sum()] = [case_id, dice1, dice2, precision1, recall1,
                                                 precision2, recall2]

                dice1_all.append(dice1)
                dice2_all.append(dice2)

                recall1_all.append(recall1)
                recall2_all.append(recall2)

                precision1_all.append(precision1)
                precision2_all.append(precision2)

                voe_all.append(voe)
                rvd_all.append(rvd)

                ####可视化的绘制
                #draw(case_id=case_id, output_case=output_case, output_case1=output_case1, output_case2=output_case2,
                #     label_case=label_case, label_case1=label_case1, label_case2=label_case2, images_case=images_case)

                #save_numpy(case_id=case_id, output_case=output_case, prob_case=prob_case)


                vol_output = [vol_output[vol_num_slice:]]
                vol_label = [vol_label[vol_num_slice:]]
                vol_images = [vol_images[vol_num_slice:]]
                vol_prob = [vol_prob[vol_num_slice:]]

                case += 1
                pbar.update(1)

    acc = evaluator.eval()


    for k in sorted(list(acc.keys())):
        if k == 'dc_each_case': continue
        print(f'{type}/{k}: {acc[k]:.5f}')


    for case_idx in range(len(acc['dc_each_case'])):
        case_id = dataset.case_idx_to_case_id(case_idx, type)
        dc_each_case = acc['dc_each_case'][case_idx]
        for cls in range(len(dc_each_case)):
            dc = dc_each_case[cls]


    tumor_dice_100w = []
    liver_dice_100w = []
    tumor_dice_10w = []
    liver_dice_10w = []
    tumor_dice_1w = []
    liver_dice_1w = []
    tumor_dice_small = []
    liver_dice_small = []


    for key in sorted(result_map.keys(), reverse=True):
        print(result_map[key][0], key, result_map[key][2], result_map[key][6], result_map[key][5])
        tumor_dice = result_map[key][2]
        liver_dice = result_map[key][1]
        if key >= 1000000:
            tumor_dice_100w.append(tumor_dice)
            liver_dice_100w.append(liver_dice)
        elif key >= 100000:
            tumor_dice_10w.append(tumor_dice)
            liver_dice_10w.append(liver_dice)
        elif key >= 10000:
            tumor_dice_1w.append(tumor_dice)
            liver_dice_1w.append(liver_dice)
        else:
            tumor_dice_small.append(tumor_dice)
            liver_dice_small.append(liver_dice)


    print(np.mean(np.array(dice2_all)), np.mean(np.array(voe_all)), np.mean(np.array(rvd_all)))
    #print(type, "loss:", np.mean(np.array(loss_list)))
    # score = (acc['dc_per_case_1'] + acc["dc_per_case_2"]) / 2
    # return score


def validate_evaluation():
    dataset = KiTS19("/datasets/3DIRCADB/chengkun", stack_num=3, img_size=(512, 512),
                     use_roi=False, roi_file=None, roi_error_range=5,
                     train_transform=None, valid_transform=None)
    #dataset = KiTSInstance("/datasets/DongbeiDaxue/chengkun_remove_only_liver", stack_num=3, img_size=(512, 512),
    #                       train_transform=None, valid_transform=None)

    model_01 = MUNet(input_channel=dataset.img_channels, n_class=dataset.num_classes)
    data = {'net': model_01}



    cp_file = Path("/home/diaozhaoshuo/log/BeliefFunctionNN/chengkung/3dircad/munet/model/munet_liver.pth")
    cp.load_params(data, cp_file, device='cpu')
    model_01 = model_01.cuda()
    model_01.eval()
    evaluation(model_01, dataset, batch_size=3, num_workers=1, type="valid")
    #evaluation(model_01, dataset, batch_size=1, num_workers=1, type="train")





if __name__ == '__main__':
    validate_evaluation()