import glob
import os
import pickle

import nibabel
import numpy
import torch.nn.functional as F
from torch import nn
from tqdm import tqdm

from config.Parse2022_config import ModifiedUNet3DConfigure
from models.EvaluationMetrics import *
from models.Modified3DUNet import Modified3DUNet
from models.UNETR import UNETR
from utils.DataUtils import read_nii_file_with_mask, preprocess
from utils.LogUtil import my_logger
from utils.PostProcessing import closing, max_connected_domain
from utils.ThresholdSegmentation import myAdaptiveThreshold


# from utils.DataUtils import generate_test_data, generate_test_data_UNetTransformer


def load_pickle(file: str, mode: str = 'rb'):
    with open(file, mode) as f:
        a = pickle.load(f)
    return a


def use_nnUNet_directly():
    pkl_file_path = r"H:\laboratory\project\blood_vessel\nnUNet-master\pretrained_model\Task008_HepaticVessel\3d_fullres\Task008_HepaticVessel\nnUNetTrainerV2__nnUNetPlansv2.1\fold_0\model_final_checkpoint.model.pkl"
    info = load_pickle(pkl_file_path)
    model_file_path = r"H:\laboratory\project\blood_vessel\nnUNet-master\pretrained_model\Task008_HepaticVessel\3d_fullres\Task008_HepaticVessel\nnUNetTrainerV2__nnUNetPlansv2.1\fold_0\model_final_checkpoint.model"
    model = torch.load(model_file_path)
    print("good")


def predict_3d_unet():
    import os
    os.environ['KMP_DUPLICATE_LIB_OK'] = 'TRUE'
    device = torch.device('cuda')
    # model = UNet(input_size=want_width, conv_kernel_size=3, conv_stride=1)
    model = Modified3DUNet(1, 1)
    # pkl_file_path = r"H:\laboratory\project\blood_vessel\nnUNet-master\pretrained_model\Task008_HepaticVessel\3d_fullres\Task008_HepaticVessel\nnUNetTrainerV2__nnUNetPlansv2.1\fold_0\model_final_checkpoint.model.pkl"
    # info = load_pickle(pkl_file_path)
    # model_file_path = r"H:\laboratory\project\blood_vessel\nnUNet-master\pretrained_model\Task008_HepaticVessel\3d_fullres\Task008_HepaticVessel\nnUNetTrainerV2__nnUNetPlansv2.1\fold_0\model_final_checkpoint.model"
    # model = torch.load(model_file_path)
    # model.eval()
    # model = model.to(device)
    weights_path = r"G:\Python_Project\own-unet-pytorch\weights\_3d_unet_15samples\epoch_1916"
    check_point = torch.load(weights_path)
    model.load_state_dict(check_point)
    model = model.to(device)

    # test_img_path = r"D:\gs\data_set\DRIVE\test\images\10_test.tif"
    test_img_path = r"D:\mimics_data\test_data\sample2"
    test_img, dicom_object_list, index_list, pixel_min, pixel_max = generate_test_data(test_img_path)
    test_img = torch.from_numpy(test_img)
    test_img = test_img.unsqueeze(0)
    test_img = test_img.unsqueeze(0)

    test_img = test_img.to(device, dtype=torch.float32)
    with torch.no_grad():
        predict = model(test_img)[1]
        predict = nn.Sigmoid()(predict)
        predict = predict.cpu()
        debug_predict = predict.numpy() * (pixel_max - pixel_min)
        # predict = predict.argmax(dim=0)
        # predict = F.one_hot(predict, 2).permute(2, 0, 1)
        # predict = softmax(predict, dim=0)
        # predict = predict.cpu().numpy()
    # for img_array in debug_predict[0][0]:
    #     pyplot.imshow(img_array)
    #     pyplot.show()
    target_path = os.path.join(test_img_path, "predict_result_5")
    if not os.path.exists(target_path):
        os.mkdir(target_path)
    for i, dicom_object in enumerate(dicom_object_list):
        new_pixel_array = numpy.zeros_like(dicom_object.pixel_array)
        new_pixel_array[index_list[2]:index_list[3], index_list[4]:index_list[5]] = debug_predict[0][0][i, :, :]

        if dicom_object[0x0028, 0x0100].value == 16:  # 如果dicom文件矩阵是16位格式
            new_pixel_array = new_pixel_array.astype(numpy.uint16)  # newimg 是图像矩阵 ds是dcm
        elif dicom_object[0x0028, 0x0100].value == 8:
            new_pixel_array = new_pixel_array.astype(numpy.uint8)

        dicom_object.PixelData = new_pixel_array.tobytes()

        dicom_object.pixel_array[:, :] = new_pixel_array[:, :]

        dicom_object.save_as(os.path.join(target_path, str(i) + ".dcm"))


def predict_UNet_Transformer(configure):
    import os
    os.environ['KMP_DUPLICATE_LIB_OK'] = 'TRUE'
    device = torch.device('cuda')
    # model = UNet(input_size=want_width, conv_kernel_size=3, conv_stride=1)
    model = UNETR(
        in_channels=configure.in_channel_num,
        out_channels=configure.out_channel_num,
        img_size=configure.train_data_size
    ).to(device)

    weights_path = r"G:\laboratory\project\blood_vessel\own-unet-pytorch\weights\UNETR\epoch_772"
    check_point = torch.load(weights_path)
    model.load_state_dict(check_point)
    model = model.to(device)

    # test_img_path = r"D:\gs\data_set\DRIVE\test\images\10_test.tif"
    test_img_path = r"D:\mimics_data\test_data\sample2"
    cropped_data_list, pixel_value_boundary_list, dicom_object_list, index_list = generate_test_data_UNetTransformer(
        test_img_path,
        configure.train_data_size)

    predict_result_list = []
    with torch.no_grad():
        for i, cropped_data in enumerate(cropped_data_list):
            test_img = torch.from_numpy(cropped_data)
            test_img = test_img.unsqueeze(0)
            test_img = test_img.unsqueeze(0)
            test_img = test_img.to(device, dtype=torch.float32)
            predict = model(test_img)
            predict = nn.Sigmoid()(predict)
            predict = predict.cpu()
            debug_predict = predict.numpy() * (pixel_value_boundary_list[i][1] - pixel_value_boundary_list[i][0])
            predict_result_list.append(debug_predict)
        # predict = predict.argmax(dim=0)
        # predict = F.one_hot(predict, 2).permute(2, 0, 1)
        # predict = softmax(predict, dim=0)
        # predict = predict.cpu().numpy()
    # for img_array in debug_predict[0][0]:
    #     pyplot.imshow(img_array)
    #     pyplot.show()
    target_path = os.path.join(test_img_path, "predict_result_6")
    if not os.path.exists(target_path):
        os.mkdir(target_path)

    for i, dicom_object in enumerate(dicom_object_list):
        new_pixel_array = numpy.zeros_like(dicom_object.pixel_array)
        for j, predict_result in enumerate(predict_result_list):
            if index_list[j][0] - index_list[j][0] <= i < index_list[j][1] - index_list[j][0]:
                new_pixel_array[index_list[j][2]:index_list[j][3], index_list[j][4]:index_list[j][5]] = \
                    predict_result[0][0][i, :, :]

        if dicom_object[0x0028, 0x0100].value == 16:  # 如果dicom文件矩阵是16位格式
            new_pixel_array = new_pixel_array.astype(numpy.uint16)  # newimg 是图像矩阵 ds是dcm
        elif dicom_object[0x0028, 0x0100].value == 8:
            new_pixel_array = new_pixel_array.astype(numpy.uint8)

        dicom_object.PixelData = new_pixel_array.tobytes()

        dicom_object.pixel_array[:, :] = new_pixel_array[:, :]

        dicom_object.save_as(os.path.join(target_path, str(i) + ".dcm"))


def model_predict(configure):
    # Hausdorff_Distance_3D
    Threshold = configure.threshold
    patch_size = configure.patch_size
    os.environ['KMP_DUPLICATE_LIB_OK'] = 'TRUE'
    device = torch.device('cuda')
    model = configure.model
    weights_path = configure.weights_path
    check_point = torch.load(weights_path)
    model.load_state_dict(check_point)
    model = model.to(device)

    sample_name = os.path.basename(configure.test_img_path)
    # Box_boundary 中坐标前开后闭
    train_img, label, train_I_affine, label_I_affine, Box_boundary = read_nii_file_with_mask(configure.test_img_path, )
    train_img = preprocess(train_img)

    patch_index_list = []
    original_shape = train_img.shape
    z_num = (Box_boundary[1] - Box_boundary[0]) // patch_size[0]
    x_num = (Box_boundary[3] - Box_boundary[2]) // patch_size[1]
    y_num = (Box_boundary[5] - Box_boundary[4]) // patch_size[2]
    assert z_num > 0
    assert x_num > 0
    assert y_num > 0
    for i in range(z_num + 1):
        z_start = i * patch_size[0] + Box_boundary[0]
        z_end = (i + 1) * patch_size[0] + Box_boundary[0]
        if z_end > Box_boundary[1]:  # the last one
            z_end = Box_boundary[1]
            z_start = Box_boundary[1] - patch_size[0]
        for j in range(x_num + 1):
            x_start = j * patch_size[1] + Box_boundary[2]
            x_end = (j + 1) * patch_size[1] + Box_boundary[2]
            if x_end > Box_boundary[3]:  # the last one
                x_end = Box_boundary[3]
                x_start = Box_boundary[3] - patch_size[1]
            for k in range(y_num + 1):
                y_start = k * patch_size[2] + Box_boundary[4]
                y_end = (k + 1) * patch_size[2] + Box_boundary[4]
                if y_end > Box_boundary[5]:  # the last one
                    y_end = Box_boundary[5]
                    y_start = Box_boundary[5] - patch_size[2]
                patch_index_list.append(
                    [z_start, z_end, x_start, x_end, y_start, y_end])

    # predict
    if os.environ.get("COMPUTERNAME") == "DESKTOP-16QO4BT":
        predict_root_path = r"H:\laboratory\project\blood_vessel\MICCAI2022-Draft\predict_result"
    else:
        predict_root_path = r"C:\gs\code\parse2022-draft\predict_result"

    predict_file_path = os.path.join(predict_root_path,
                                     os.path.basename(weights_path) + "_" + str(
                                         Threshold) + "_" + sample_name + "_" + "_predict_result.nii.gz")
    predict_file2_path = os.path.join(predict_root_path,
                                      os.path.basename(weights_path) + "_" + str(
                                          Threshold) + "_" + sample_name + "_" + "_predict_result2.nii.gz")
    post_proc_file_path = os.path.join(predict_root_path,
                                       os.path.basename(weights_path) + "_" + str(
                                           Threshold) + "_" + sample_name + "_" + "_post_proc_result.nii.gz")
    predict_result = numpy.zeros_like(train_img)
    predict_result_no_sigmoid = numpy.zeros_like(train_img)

    with torch.no_grad():
        for patch_index in patch_index_list:
            patch = train_img[patch_index[0]:patch_index[1], patch_index[2]:patch_index[3],
                    patch_index[4]:patch_index[5]]
            patch_label = label[patch_index[0]:patch_index[1], patch_index[2]:patch_index[3],
                          patch_index[4]:patch_index[5]]
            patch = torch.from_numpy(patch).type(torch.float32).to(device)
            patch_label = torch.from_numpy(patch_label).long().to(device)
            patch = patch.unsqueeze(0)

            patch = patch.unsqueeze(0)
            patch_label = patch_label.unsqueeze(0)
            patch_label = patch_label.unsqueeze(0)

            # patch_label_debug = torch.cat([patch_label, patch_label, patch_label, patch_label], dim=0)

            # input_tensor = torch.zeros([1, 2, 128, 128, 64]).to(device)
            # tmp_accu_1 = dice_coefficient_3D(input_tensor, patch_label, configure.class_num)
            # tmp_accu_2 = dice_coefficient_3D(patch_label_debug, patch_label_debug, configure.class_num)

            predict, predict_no_sigmoid = model(patch)

            # total_HD, avg_HD = Hausdorff_Distance_3D(predict, patch_label)
            dice = dice_coefficient_3D(predict, patch_label, configure.class_num).item()
            # my_logger.info("The patch total HD is " + str(total_HD))
            # my_logger.info("The patch average HD is " + str(avg_HD))
            my_logger.info("The patch dice is " + str(dice))
            # predict = predict[1]
            predict_result[patch_index[0]:patch_index[1], patch_index[2]:patch_index[3],
            patch_index[4]:patch_index[5]] = predict[0][0].cpu().numpy()
            predict_result_no_sigmoid[patch_index[0]:patch_index[1], patch_index[2]:patch_index[3],
            patch_index[4]:patch_index[5]] = predict_no_sigmoid[0][0].cpu().numpy()
        predict_result_threshold = numpy.where(predict_result > Threshold, 1, 0)
        predict_result_ad_threshold = myAdaptiveThreshold(predict_result)
        # tensor_predict_result = torch.from_numpy(predict_result_threshold).unsqueeze(0).unsqueeze(0)
        # tensor_label = torch.from_numpy(label).unsqueeze(0).unsqueeze(0)
        whole_dice = dice_coefficient_3D(predict_result_threshold, label, configure.class_num).item()
        # whole_HD = Hausdorff_Distance_3D(predict_result_threshold, label)
        my_logger.info("The whole dice is " + str(whole_dice))
        ad_whole_dice = dice_coefficient_3D(predict_result_ad_threshold, label, configure.class_num).item()
        my_logger.info("After adaptive threshold, the whole dice is " + str(ad_whole_dice))
        # my_logger.info("The whole HD is " + str(whole_HD))

        # post_processing_result = closing(predict_result_threshold)
        np.save("train_img", train_img)
        np.save("predict_result_threshold", predict_result_threshold)

        # post_processing_result = predict_result_threshold
        # post_processing_result = dense_crf(train_img, predict_result_threshold)
        # np.save("crf_result", post_processing_result)

        max_connected_result = max_connected_domain(predict_result_threshold)
        np.save("max_connected_domain", max_connected_result)

        closing_result = closing(predict_result_threshold)
        np.save("closing_result", closing_result)

        # post_processing_result = torch.from_numpy(post_processing_result).unsqueeze(0).unsqueeze(0)
        whole_dice = dice_coefficient_3D(max_connected_result, label, configure.class_num).item()
        # whole_HD = Hausdorff_Distance_3D(max_connected_result, label)
        my_logger.info("After maximum connection operation, the whole dice is " + str(whole_dice))
        # my_logger.info("After maximum connection operation, the whole HD is " + str(whole_HD))

        whole_dice = dice_coefficient_3D(closing_result, label, configure.class_num).item()
        # whole_HD = Hausdorff_Distance_3D(closing_result, label)
        my_logger.info("After closing operation, the whole dice is " + str(whole_dice))
        # my_logger.info("After closing operation, the whole HD is " + str(whole_HD))

        max_closing_result = max_connected_domain(closing_result)
        np.save("max_closing_result", max_closing_result)
        whole_dice = dice_coefficient_3D(max_closing_result, label, configure.class_num).item()
        # whole_HD = Hausdorff_Distance_3D(max_closing_result, label)
        my_logger.info("After 2 operation, the whole dice is " + str(whole_dice))
        # my_logger.info("After 2 operation, the whole HD is " + str(whole_HD))

        nibabel.Nifti1Image(predict_result_threshold, label_I_affine).to_filename(predict_file_path)
        nibabel.Nifti1Image(max_closing_result, label_I_affine).to_filename(post_proc_file_path)
        nibabel.Nifti1Image(predict_result_no_sigmoid, label_I_affine).to_filename(predict_file2_path)
    print("good")


def do_evaluation(eval_configure, model_configure_list):
    """
    This function is used to predict the evaluation data which don't have the label
    :param eval_configure:
    :param model_configure_list:
    :return:
    """

    # os.environ['KMP_DUPLICATE_LIB_OK'] = 'TRUE'
    device = torch.device('cuda')
    # model = eval_configure.model
    # weights_path = eval_configure.weights_path
    # check_point = torch.load(weights_path)
    # model.load_state_dict(check_point)
    # model = model.to(device)

    sample_list = glob.glob(os.path.join(eval_configure.valid_data_root_path, "*"))
    eval_array_list = []
    eval_box_list = []
    eval_affine_list = []
    # Box_boundary 中坐标前开后闭
    for sample_path in sample_list:
        eval_array, eval_array_affine, Box_boundary = read_nii_file_with_mask(sample_path, if_label=False)
        eval_array = preprocess(eval_array)
        eval_array_list.append(eval_array)
        eval_affine_list.append(eval_array_affine)
        eval_box_list.append(Box_boundary)

        with torch.no_grad():
            for sample_path, eval_array, eval_box, eval_affine in zip(sample_list, eval_array_list, eval_box_list,
                                                                      eval_affine_list):
                sample_name = os.path.basename(sample_path)
                predict_file_path = os.path.join(eval_configure.valid_data_root_path,
                                                 sample_name, "valid")
                if not os.path.exists(predict_file_path):
                    os.mkdir(predict_file_path)
                predict_file_path = os.path.join(predict_file_path, sample_name + ".nii.gz")
                if eval_configure.if_overlap:
                    predict_result_list = []
                    for configure in model_configure_list:
                        patch_index_list = []
                        eval_array = preprocess(eval_array)
                        patch_size = configure.patch_size
                        actual_patch_size = [int(i // 2) for i in configure.patch_size]
                        z_num = (eval_box[1] - eval_box[0]) // actual_patch_size[0]
                        x_num = (eval_box[3] - eval_box[2]) // actual_patch_size[1]
                        y_num = (eval_box[5] - eval_box[4]) // actual_patch_size[2]
                        assert z_num > 0
                        assert x_num > 0
                        assert y_num > 0
                        for i in range(z_num + 1):
                            z_start = i * actual_patch_size[0] + eval_box[0]
                            z_end = (i + 1) * actual_patch_size[0] + eval_box[0]
                            if z_end > eval_box[1]:  # the last one
                                z_end = eval_box[1]
                                z_start = eval_box[1] - actual_patch_size[0]
                            for j in range(x_num + 1):
                                x_start = j * actual_patch_size[1] + eval_box[2]
                                x_end = (j + 1) * actual_patch_size[1] + eval_box[2]
                                if x_end > eval_box[3]:  # the last one
                                    x_end = eval_box[3]
                                    x_start = eval_box[3] - actual_patch_size[1]
                                for k in range(y_num + 1):
                                    y_start = k * actual_patch_size[2] + eval_box[4]
                                    y_end = (k + 1) * actual_patch_size[2] + eval_box[4]
                                    if y_end > eval_box[5]:  # the last one
                                        y_end = eval_box[5]
                                        y_start = eval_box[5] - actual_patch_size[2]
                                    patch_index_list.append(
                                        [z_start, z_end, x_start, x_end, y_start, y_end])
                        patch_size = configure.patch_size
                        model = configure.model
                        weights_path = configure.weights_path
                        check_point = torch.load(weights_path)
                        model.load_state_dict(check_point)
                        model = model.to(device)
                        model.eval()
                        model_predict = numpy.zeros_like(eval_array)
                        size_diff_0_0 = int((patch_size[0] - actual_patch_size[0]) / 2)
                        size_diff_0_1 = patch_size[0] - actual_patch_size[0] - size_diff_0_0
                        size_diff_1_0 = int((patch_size[1] - actual_patch_size[1]) / 2)
                        size_diff_1_1 = patch_size[1] - actual_patch_size[1] - size_diff_1_0
                        size_diff_2_0 = int((patch_size[2] - actual_patch_size[2]) / 2)
                        size_diff_2_1 = patch_size[2] - actual_patch_size[2] - size_diff_2_0
                        eval_array_padding = F.pad(torch.from_numpy(eval_array).unsqueeze(0), [size_diff_2_0,
                                                                                               size_diff_2_1,
                                                                                               size_diff_1_0,
                                                                                               size_diff_1_1,
                                                                                               size_diff_0_0,
                                                                                               size_diff_0_1], "reflect")
                        for eval_index in tqdm(patch_index_list):
                            # test_patch = test_image[test_index[0]:test_index[1], test_index[2]:test_index[3],
                            #              test_index[4]:test_index[5]]
                            test_patch_padding = eval_array_padding[0,
                                                 eval_index[0] - size_diff_0_0 + size_diff_0_0:eval_index[
                                                                                                   1] + size_diff_0_1 + size_diff_0_0,
                                                 eval_index[2] - size_diff_1_0 + size_diff_1_0:eval_index[
                                                                                                   3] + size_diff_1_1 + size_diff_1_0,
                                                 eval_index[4] - size_diff_2_0 + size_diff_2_0:eval_index[
                                                                                                   5] + size_diff_2_1 + size_diff_2_0]

                            test_patch_padding = test_patch_padding.to(device, dtype=torch.float32)
                            test_patch_padding = test_patch_padding.unsqueeze(0).unsqueeze(0)

                            # active_predict, predict = model(test_patch)
                            patch_predict_padding = model(test_patch_padding)
                            patch_predict = patch_predict_padding[:, :,
                                            size_diff_0_0:size_diff_0_0 + actual_patch_size[0],
                                            size_diff_1_0:size_diff_1_0 + actual_patch_size[1],
                                            size_diff_2_0:size_diff_2_0 + actual_patch_size[2]]
                            # active_predict = torch.where(active_predict > configure.threshold, 1, 0)

                            model_predict[eval_index[0]:eval_index[1], eval_index[2]:eval_index[3],
                            eval_index[4]:eval_index[5]] = patch_predict.cpu().numpy()
                        model_predict = np.where(model_predict > configure.threshold, 1, 0)
                        predict_result_list.append(model_predict)
                    for predict_result in predict_result_list:
                        nibabel.Nifti1Image(predict_result, eval_affine).to_filename(predict_file_path)


if __name__ == '__main__':
    # class Modified3DUNetPredictConf:
    #     class_num = 1
    #     if os.environ.get("COMPUTERNAME") == "DESKTOP-16QO4BT":
    #         weights_path = r"H:\laboratory\project\blood_vessel\MICCAI2022-Draft\weights\MUNet_epoch_39"
    #     else:
    #         weights_path = r"C:\gs\code\parse2022-draft\weights\Modified3DUNet(1653108670972311\epoch_1"
    #     model = Modified3DUNet(1, class_num)
    #
    #     if os.environ.get("COMPUTERNAME") == "DESKTOP-16QO4BT":
    #         test_img_path = r"D:\dataset\vessel\Parse_2022_train_data\PA000144"
    #     else:
    #         test_img_path = r"C:\dataset\vessel\Parse_2022_train_data\PA000194"
    #     threshold = 0.95
    class predict_configure:
        if_have_label = True
        if_overlap = True
        if_read_preprocess = False
        valid_data_root_path = r"C:\dataset\vessel\Parse2022_valid_data"


    class ModifiedUNet3DPredictConf(ModifiedUNet3DConfigure):
        class_num = 1
        weights_path = r"..\weights\Parse2022_AttentionUNet_Aug02-22-21-18\epoch_61global_step_60401"

        threshold = 0.5


    do_evaluation(predict_configure, [ModifiedUNet3DPredictConf])

    # class VoxResNetPredictConf:
    #     from models.VoxResNet import VoxResNet_V0
    #     class_num = 1
    #     weights_path = r"C:\gs\code\parse2022-draft\weights\VoxResNet_V0May30_17-05-44\epoch_39"
    #     model = VoxResNet_V0(1, class_num)
    #     test_img_path = r"D:\dataset\vessel\Parse_2022_train_data\PA000309"
    #     threshold = 0.5
    #

    # class VNetPredictConf:
    #     from models.VNetOfficial import VNet
    #     weights_path = r"C:\gs\code\parse2022-draft\weights\VNet(1653159764375406\epoch_19"
    #     model = VNet()
    #     class_num = 1
    #     test_img_path = r"D:\dataset\vessel\Parse_2022_train_data\PA000309"
    #     threshold = 0.5
    #
    # test_img_list = [r"D:\dataset\vessel\Parse_2022_train_data\PA000309",
    #                  r"D:\dataset\vessel\Parse_2022_train_data\PA000245",
    #                  r"D:\dataset\vessel\Parse_2022_train_data\PA000144"]
    # for test_img_path in test_img_list:
    #     UNet3DPredictConf.test_img_path = test_img_path
    #     model_predict(UNet3DPredictConf)

    # model_predict(UNet3DPredictConf)
    #
    # predict_UNet_Transformer(configure=UNetTransformerConfigure)

    # test_img_path = r"D:\dataset\vessel\Parse_2022_train_data\PA000245"
    # ModifiedUNet3d_predict()
