import glob

import SimpleITK
import nibabel
import numpy
import numpy as np
import torch.nn.functional as F
import torchio
from tqdm import tqdm

from config.XunFeiChallenge_config import *
from models.EvaluationMetrics import *
from utils.DataUtils import preprocess
from utils.ThresholdSegmentation import *
from utils.Utils import saveMhdFile


def threshold_predict(sample_path):
    threshold = 100


def do_predict(predict_configure, model_configures, sample_path):
    # Hausdorff_Distance_3D
    os.environ['KMP_DUPLICATE_LIB_OK'] = 'TRUE'
    device = torch.device('cuda')

    sample_name = os.path.basename(sample_path)
    if not isinstance(model_configures, list):
        epoch_name = os.path.basename(model_configures.weights_path)
        dataset_name, model_name, _ = os.path.basename(
            os.path.dirname(os.path.abspath(model_configures.weights_path))).split("_")
    else:
        epoch_name = "000"
        dataset_name, model_name, _ = os.path.basename(
            os.path.dirname(os.path.abspath(model_configures[0].weights_path))).split("_")
        model_name = "multiple"

    print(sample_name)
    if predict_configure.if_have_label:
        if not predict_configure.if_read_preprocess:
            test_data_path = os.path.join(sample_path, "image", sample_name + ".mhd")
        else:
            test_data_path = os.path.join(sample_path, "MRA.nii.gz")
    else:
        if not predict_configure.if_read_preprocess:
            test_data_path = os.path.join(sample_path, "test", sample_name + ".mhd")
        else:
            test_data_path = os.path.join(sample_path, "MRA.nii.gz")

    test_image_head_file = None
    if not predict_configure.if_read_preprocess:
        test_image_head_file = SimpleITK.ReadImage(test_data_path)
        test_image_array = SimpleITK.GetArrayFromImage(test_image_head_file)
        if predict_configure.if_have_label:
            mask_nii_img = nibabel.load(os.path.join(sample_path, "..", "..", "TrainMask", sample_name + ".nii.gz"))
        else:
            mask_nii_img = nibabel.load(os.path.join(sample_path, "..", "..", "TestMask", sample_name + ".nii.gz"))
        mask = mask_nii_img.get_data().astype(np.int).transpose((2, 1, 0))
        test_image_array = test_image_array * mask
    else:
        test_nii_img = nibabel.load(test_data_path)
        test_image_array = test_nii_img.get_data().astype(np.int).transpose((2, 1, 0))
        mask_nii_img = nibabel.load(os.path.join(sample_path, "mask.nii.gz"))
        mask = mask_nii_img.get_data().astype(np.int).transpose((2, 1, 0))
        test_image_array = test_image_array * mask

    print(np.max(test_image_array))
    print(np.min(test_image_array))
    # test_image_array = np.where(test_image_array < 200, 0, test_image_array)
    # otsu_mask = getOTSUMask(test_image_array)
    # test_image = bm3d_denoise(test_image_array)
    # saveMhdFile(test_image_head_file, test_image,
    #             os.path.join(".", sample_name + '_denoise.mhd'))

    label = None
    label_nii_img = None
    if predict_configure.if_have_label:
        if not predict_configure.if_read_preprocess:
            label_path = os.path.join(sample_path, "label", sample_name + ".mhd")
            label = SimpleITK.ReadImage(label_path)
            label = SimpleITK.GetArrayFromImage(label)
        else:
            label_path = os.path.join(sample_path, "label.nii.gz")
            label_nii_img = nibabel.load(label_path)
            label = label_nii_img.get_data().transpose((2, 1, 0))
            label = label.astype(np.int)

    # predict
    if os.environ.get("COMPUTERNAME") == "DESKTOP-16QO4BT":
        if predict_configure.if_have_label:
            predict_root_path = r"D:\dataset\vessel\xunfei_challenge_upload\train_labels_experiment"
        else:
            predict_root_path = r"D:\dataset\vessel\xunfei_challenge_upload\labels"
    else:
        if predict_configure.if_have_label:
            predict_root_path = r"D:\dataset\vessel\xunfei_challenge_upload\train_labels_experiment"
        else:
            predict_root_path = r"D:\dataset\vessel\xunfei_challenge_upload\labels"

    predict_result = None
    with torch.no_grad():
        predict_list = []
        if predict_configure.if_overlap:
            for configure in model_configures:
                if "Modified" in configure.model.__str__().split("\n")[0].replace("(", ""):
                    test_image = torchio.ZNormalization()(numpy.expand_dims(test_image_array, axis=0))[0]
                else:
                    test_image = preprocess(test_image_array)

                patch_size = configure.patch_size
                actual_patch_size = [int(i // 2) for i in configure.patch_size]
                test_index_list = []
                original_shape = test_image.shape
                z_num = int(np.ceil(original_shape[0] / actual_patch_size[0]))
                x_num = int(np.ceil(original_shape[1] / actual_patch_size[1]))
                y_num = int(np.ceil(original_shape[2] / actual_patch_size[2]))
                for i in range(z_num):
                    z_start = i * actual_patch_size[0]
                    z_end = (i + 1) * actual_patch_size[0]
                    if z_end > original_shape[0]:  # the last one
                        z_end = original_shape[0]
                        z_start = original_shape[0] - actual_patch_size[0]
                    for j in range(x_num):
                        x_start = j * actual_patch_size[1]
                        x_end = (j + 1) * actual_patch_size[1]
                        if x_end > original_shape[1]:  # the last one
                            x_end = original_shape[1]
                            x_start = original_shape[1] - actual_patch_size[1]
                        for k in range(y_num):
                            y_start = k * actual_patch_size[2]
                            y_end = (k + 1) * actual_patch_size[2]
                            if y_end > original_shape[2]:  # the last one
                                y_end = original_shape[2]
                                y_start = original_shape[2] - actual_patch_size[2]
                            test_index_list.append([z_start, z_end, x_start, x_end, y_start, y_end])
                model = configure.model
                weights_path = configure.weights_path
                check_point = torch.load(weights_path)
                model.load_state_dict(check_point)
                model = model.to(device)
                model.eval()
                model_predict = numpy.zeros_like(test_image)
                size_diff_0_0 = int((patch_size[0] - actual_patch_size[0]) / 2)
                size_diff_0_1 = patch_size[0] - actual_patch_size[0] - size_diff_0_0
                size_diff_1_0 = int((patch_size[1] - actual_patch_size[1]) / 2)
                size_diff_1_1 = patch_size[1] - actual_patch_size[1] - size_diff_1_0
                size_diff_2_0 = int((patch_size[2] - actual_patch_size[2]) / 2)
                size_diff_2_1 = patch_size[2] - actual_patch_size[2] - size_diff_2_0
                test_image_padding = F.pad(torch.from_numpy(test_image).unsqueeze(0), [size_diff_2_0,
                                                                                       size_diff_2_1,
                                                                                       size_diff_1_0,
                                                                                       size_diff_1_1,
                                                                                       size_diff_0_0,
                                                                                       size_diff_0_1], "reflect")
                for test_index in tqdm(test_index_list):
                    test_patch = test_image[test_index[0]:test_index[1], test_index[2]:test_index[3],
                                 test_index[4]:test_index[5]]
                    test_patch_padding = test_image_padding[0,
                                         test_index[0] - size_diff_0_0 + size_diff_0_0:test_index[
                                                                                           1] + size_diff_0_1 + size_diff_0_0,
                                         test_index[2] - size_diff_1_0 + size_diff_1_0:test_index[
                                                                                           3] + size_diff_1_1 + size_diff_1_0,
                                         test_index[4] - size_diff_2_0 + size_diff_2_0:test_index[
                                                                                           5] + size_diff_2_1 + size_diff_2_0]

                    # compare_result = np.where(test_patch_padding[size_diff_0_0:size_diff_0_0 + actual_patch_size[0],
                    #                           size_diff_1_0:size_diff_1_0 + actual_patch_size[1],
                    #                           size_diff_2_0:size_diff_2_0 + actual_patch_size[2]].numpy() == test_patch,
                    #                           0, 1)
                    # compare_result_sum = np.sum(compare_result)
                    # print("The sum of difference is " + str(compare_result_sum))

                    test_patch_padding = test_patch_padding.to(device, dtype=torch.float32)
                    test_patch_padding = test_patch_padding.unsqueeze(0).unsqueeze(0)

                    # active_predict, predict = model(test_patch)
                    patch_predict_padding = model(test_patch_padding)
                    patch_predict = patch_predict_padding[:, :,
                                    size_diff_0_0:size_diff_0_0 + actual_patch_size[0],
                                    size_diff_1_0:size_diff_1_0 + actual_patch_size[1],
                                    size_diff_2_0:size_diff_2_0 + actual_patch_size[2]]
                    # active_predict = torch.where(active_predict > configure.threshold, 1, 0)

                    model_predict[test_index[0]:test_index[1], test_index[2]:test_index[3],
                    test_index[4]:test_index[5]] = patch_predict.cpu().numpy()
                predict_list.append(model_predict)

        else:
            for configure in model_configures:
                if "Modified" in configure.model.__str__().split("\n")[0].replace("(", ""):
                    test_image = torchio.ZNormalization()(numpy.expand_dims(test_image_array, axis=0))[0]
                else:
                    test_image = preprocess(test_image_array)
                patch_size = configure.patch_size
                test_index_list = []
                original_shape = test_image.shape
                z_num = int(np.ceil(original_shape[0] / patch_size[0]))
                x_num = int(np.ceil(original_shape[1] / patch_size[1]))
                y_num = int(np.ceil(original_shape[2] / patch_size[2]))
                for i in range(z_num):
                    z_start = i * patch_size[0]
                    z_end = (i + 1) * patch_size[0]
                    if z_end > original_shape[0]:  # the last one
                        z_end = original_shape[0]
                        z_start = original_shape[0] - patch_size[0]
                    for j in range(x_num):
                        x_start = j * patch_size[1]
                        x_end = (j + 1) * patch_size[1]
                        if x_end > original_shape[1]:  # the last one
                            x_end = original_shape[1]
                            x_start = original_shape[1] - patch_size[1]
                        for k in range(y_num):
                            y_start = k * patch_size[2]
                            y_end = (k + 1) * patch_size[2]
                            if y_end > original_shape[2]:  # the last one
                                y_end = original_shape[2]
                                y_start = original_shape[2] - patch_size[2]
                            test_index_list.append([z_start, z_end, x_start, x_end, y_start, y_end])
                model = configure.model
                weights_path = configure.weights_path
                check_point = torch.load(weights_path)
                model.load_state_dict(check_point)
                model = model.to(device)
                model.eval()
                model_predict = numpy.zeros_like(test_image)
                for test_index in tqdm(test_index_list):
                    test_patch = test_image[test_index[0]:test_index[1], test_index[2]:test_index[3],
                                 test_index[4]:test_index[5]]
                    test_patch = torch.from_numpy(test_patch).to(device, dtype=torch.float32)
                    test_patch = test_patch.unsqueeze(0).unsqueeze(0)
                    # active_predict, predict = model(test_patch)
                    patch_predict = model(test_patch)

                    # active_predict = torch.where(active_predict > configure.threshold, 1, 0)

                    model_predict[test_index[0]:test_index[1], test_index[2]:test_index[3],
                    test_index[4]:test_index[5]] = patch_predict.cpu().numpy()
                predict_list.append(model_predict)
            # predict_result_no_active[test_index[0]:test_index[1], test_index[2]:test_index[3],
            # test_index[4]:test_index[5]] = predict.cpu().numpy()
    ht_predict_result = None
    si_predict_result = None
    if len(predict_list) == 1:
        predict_result = predict_list[0]
        ht_predict_result = hysteresis_threshold(predict_result)
        si_predict_result = simple_threshold(predict_result, configure.threshold)
    elif len(predict_list) > 1:
        simple_predict_list = []
        ht_predict_list = []
        for i in range(len(model_configures)):
            simple_predict_list.append(np.expand_dims(simple_threshold(predict_list[i], configure.threshold), axis=0))
            ht_predict_list.append(np.expand_dims(hysteresis_threshold(predict_list[i]), axis=0))
        si_predict_result = numpy.concatenate(tuple(simple_predict_list), axis=0)
        ht_predict_result = numpy.concatenate(tuple(ht_predict_list), axis=0)
        si_predict_result = np.sum(si_predict_result, axis=0)
        ht_predict_result = np.sum(ht_predict_result, axis=0)
        si_predict_result = np.where(si_predict_result >= 2, 1, 0)
        ht_predict_result = np.where(ht_predict_result >= 2, 1, 0)

    # predict_result = closing(predict_result)
    # predict_result = max_connected_domain(predict_result, 100)
    # predict_result = predict_result.astype(np.uint16)
    # predict_result = predict_result * otsu_mask
    # thousand_num = np.sum(np.where(predict_result == 1.00, 1, 0))
    # print("The number of possibility 1 is " + str(thousand_num))
    if predict_configure.if_have_label:
        # if not predict_configure.if_read_preprocess:
        #     saveMhdFile(test_image_head_file, predict_result * 1000,
        #                 os.path.join(predict_root_path,
        #                              dataset_name + "_" + model_name + "_" + epoch_name + "_" + sample_name + '_prob.mhd'))
        #
        # else:
        #     ni_img = nibabel.Nifti1Image(predict_result.transpose((2, 1, 0)) * 1000, label_nii_img.affine)
        #     output_path = os.path.join(predict_root_path,
        #                                dataset_name + "_" + model_name + "_" + epoch_name + "_" + sample_name + "_prob.nii.gz")
        #     nibabel.save(ni_img, output_path)

        # ht_predict_result = hysteresis_threshold(predict_result)
        # si_predict_result = simple_threshold(predict_result, configure.threshold)

        sim_dsc = DSC(si_predict_result, label, configure.threshold)
        print("After simple threshold, the DSC is " + str(sim_dsc))
        hy_des = DSC(ht_predict_result, label, configure.threshold)
        print("After hysteresis threshold, the DSC is " + str(hy_des))

        predict_correct, predict_omit, predict_redundant = get_wrong_voxels(si_predict_result, label)
        final_predict_result = predict_correct + 2 * predict_omit + 3 * predict_redundant
        sum_correct = numpy.sum(predict_correct)
        sum_omit = numpy.sum(predict_omit)
        sum_redundant = numpy.sum(predict_redundant)
        print("simple:  correct:" + str(sum_correct) + "  omit:" + str(sum_omit) + "  redundant:" + str(sum_redundant))

        if not predict_configure.if_read_preprocess:
            saveMhdFile(test_image_head_file, final_predict_result,
                        os.path.join(predict_root_path,
                                     dataset_name + "_" + model_name + "_" + epoch_name + "_" + sample_name + '_simple.mhd'))
        else:
            ni_img = nibabel.Nifti1Image(final_predict_result.transpose((2, 1, 0)), label_nii_img.affine)
            output_path = os.path.join(predict_root_path,
                                       dataset_name + "_" + model_name + "_" + epoch_name + "_" + sample_name + '_simple.nii.gz')
            nibabel.save(ni_img, output_path)

        predict_correct, predict_omit, predict_redundant = get_wrong_voxels(ht_predict_result, label)
        final_predict_result = predict_correct + 2 * predict_omit + 3 * predict_redundant
        sum_correct = numpy.sum(predict_correct)
        sum_omit = numpy.sum(predict_omit)
        sum_redundant = numpy.sum(predict_redundant)
        print(
            "hysteresis:  correct:" + str(sum_correct) + "  omit:" + str(sum_omit) + "  redundant:" + str(
                sum_redundant))
        if not predict_configure.if_read_preprocess:
            saveMhdFile(test_image_head_file, final_predict_result,
                        os.path.join(predict_root_path,
                                     dataset_name + "_" + model_name + "_" + epoch_name + "_" + sample_name + '_hysteresis.mhd'))
        else:
            ni_img = nibabel.Nifti1Image(final_predict_result.transpose((2, 1, 0)), label_nii_img.affine)
            output_path = os.path.join(predict_root_path,
                                       dataset_name + "_" + model_name + "_" + epoch_name + "_" + sample_name + '_hysteresis.nii.gz')
            nibabel.save(ni_img, output_path)

        return sim_dsc, hy_des
    else:
        # saveMhdFile(test_image_head_file, predict_result,
        #             os.path.join(predict_root_path, sample_name + '_prob.mhd'))
        # ht_predict_result = hysteresis_threshold(predict_result).astype(np.uint16)
        saveMhdFile(test_image_head_file, ht_predict_result,
                    os.path.join(predict_root_path, sample_name + '.mhd'))
        # si_predict_result = simpleThreshold(predict_result, configure.threshold).astype(np.uint16)
        # saveMhdFile(test_image_head_file, si_predict_result, os.path.join(predict_root_path, sample_name + '.mhd'))
        return None


def predict(p):
    class PredictConf(UNet3DConfigure):
        if_have_label = True
        if_read_preprocess = False
        data_root_path = r"D:\dataset\vessel\xunfei_challenge_mri"
        # if os.environ.get("COMPUTERNAME") == "DESKTOP-16QO4BT":
        if if_have_label:
            if if_read_preprocess:
                test_image_path = os.path.join(data_root_path, "TrainingdataPreprocessing")
            else:
                test_image_path = os.path.join(data_root_path, "Trainingdata")
        else:
            if if_read_preprocess:
                test_image_path = os.path.join(data_root_path, "TestingdataPreprocessing")
            else:
                test_image_path = os.path.join(data_root_path, "Testingdata")

        crop_method = 2
        weights_path = p

    if PredictConf.if_have_label:
        total_simple_dsc = 0.0
        total_hy_dsc = 0.0
        sample_list = glob.glob(os.path.join(PredictConf.test_image_path, "*"))
        for sample_path in sample_list:
            tmp_result = do_predict(PredictConf, sample_path)
            total_simple_dsc += tmp_result[0]
            total_hy_dsc += tmp_result[1]
        avg_simple_dsc = total_simple_dsc / len(sample_list)
        avg_hy_dsc = total_hy_dsc / len(sample_list)
        print(os.path.basename(p) + ": " + str(avg_simple_dsc))
        print(os.path.basename(p) + ": " + str(avg_hy_dsc))
    else:
        sample_list = glob.glob(os.path.join(PredictConf.test_image_path, "*"))
        for sample_path in sample_list:
            do_predict(PredictConf, sample_path)


def find_best_epoch():
    weight_root_path = r"C:\gs\code\parse2022-draft\weights\Training dataModified3DUNetJul10-12-13-57"
    weights_path_list = glob.glob(os.path.join(weight_root_path, "*"))
    for weight_path in weights_path_list:
        predict(weight_path)


def multi_predict():
    class predict_configure:
        if_have_label = True
        if_overlap = True
        if_read_preprocess = False

    data_root_path = r"D:\dataset\vessel\xunfei_challenge_mri"
    out_test_image_path = ""
    if predict_configure.if_have_label:
        if predict_configure.if_read_preprocess:
            out_test_image_path = os.path.join(data_root_path, "TrainingdataPreprocessing")
        else:
            out_test_image_path = os.path.join(data_root_path, "Trainingdata")
    else:
        if predict_configure.if_read_preprocess:
            out_test_image_path = os.path.join(data_root_path, "TestingdataPreprocessing")
        else:
            out_test_image_path = os.path.join(data_root_path, "Testingdata")

    class PredictConf3(UNetTransformerConfigure):
        crop_method = 2
        test_image_path = out_test_image_path
        weights_path = r"..\weights\Trainingdata_UNETR_Jul29-00-07-16\epoch_375"

    class PredictConf1(UNet3DConfigure):
        crop_method = 2
        test_image_path = out_test_image_path
        weights_path = r"..\weights\Trainingdata_ResidualUNet3D_Jul25-10-28-44\epoch_85"

    class PredictConf2(AttentionUNetConfigure):
        crop_method = 2
        test_image_path = out_test_image_path
        weights_path = r"..\weights\Trainingdata_AttentionUNet_Jul20-10-40-20\epoch_678"

    # PredictConf1, PredictConf3,
    configure_list = [PredictConf2, PredictConf1, PredictConf3]
    if predict_configure.if_have_label:
        total_simple_dsc = 0.0
        total_hy_dsc = 0.0
        sample_list = glob.glob(os.path.join(out_test_image_path, "*"))
        for sample_path in sample_list:
            tmp_result = do_predict(predict_configure, configure_list, sample_path)
            total_simple_dsc += tmp_result[0]
            total_hy_dsc += tmp_result[1]
        avg_simple_dsc = total_simple_dsc / len(sample_list)
        avg_hy_dsc = total_hy_dsc / len(sample_list)
        print("multiple: " + str(avg_simple_dsc))
        print("multiple: " + str(avg_hy_dsc))
    else:
        sample_list = glob.glob(os.path.join(out_test_image_path, "*"))
        for sample_path in sample_list:
            do_predict(predict_configure, configure_list, sample_path)


if __name__ == '__main__':
    # weights_path = r"..\weights\Trainingdata_ResidualUNet3D_Jul25-10-28-44\epoch_85"
    # predict(weights_path)
    multi_predict()
