# This example is to test the segmentation evaluation metrics originally for the parcel segmentation task.
# These metrics are very suitable when segmentation contours are provided,
# but segmentation classes are not given or not emphasized.

# For example, in the parcel segmentation task, we don't have each segmentation's class,
# so we cannot use mIoU metric to evaluation the segmentation result.
# We are able to compare the consisency between the segmentation result and the ground truth.

# The essential step is to compile and install the C++ library using `common/ml_utils/seg_eval/setup.py`
# After installation, you can successfully `import EvalSPModule`. For the tentative use,
# you can use `/media/workspace/zhicheng/my_envs/venv_20211020/bin/python` as the Python interpreter,
# which has installed the compiled `EvalSPModule` and the third-party library `fastremap`.

# TODO: Hangzong will integrate this `setup.py` with the general `setup.py`


def test_function():
    import numpy as np
    import EvalSPModule
    import fastremap

    sample_gt_labels = np.array(
        [
            [1, 1, 1, 1, 1],
            [1, 1, 1, 1, 1],
            [1, 1, 1, 0, 0],
            [1, 1, 1, 0, 0],
            [2, 2, 2, 2, 2],
            [2, 2, 2, 2, 2],
            [2, 2, 2, 2, 2],
            [2, 2, 2, 2, 2],
            [3, 3, 3, 3, 3],
            [3, 3, 3, 3, 3],
            [3, 3, 3, 3, 3],
            [3, 3, 3, 3, 3],
        ]
    )

    sample_sp_labels = np.array(
        [
            [3, 3, 3, 3, 3],
            [3, 3, 3, 3, 3],
            [3, 3, 3, 3, 3],
            [1, 1, 1, 1, 1],
            [1, 1, 1, 1, 1],
            [1, 1, 1, 1, 1],
            [1, 1, 1, 1, 1],
            [1, 1, 1, 1, 1],
            [1, 1, 1, 1, 1],
            [1, 1, 1, 1, 1],
            [2, 2, 2, 2, 2],
            [2, 2, 2, 2, 2],
        ]
    )

    # For correctly calculate the numbers, the labels need to be relabeled starting from 0, as consecutive integers
    if np.any(sample_sp_labels == 0):
        # if the array contains 0
        sample_sp_labels, _ = fastremap.renumber(
            sample_sp_labels, start=1, in_place=False
        )
    else:
        sample_sp_labels, _ = fastremap.renumber(
            sample_sp_labels, start=0, in_place=False
        )
    if np.any(sample_gt_labels == 0):
        sample_gt_labels, _ = fastremap.renumber(
            sample_gt_labels, start=1, in_place=False
        )
    else:
        sample_gt_labels, _ = fastremap.renumber(
            sample_gt_labels, start=0, in_place=False
        )

    h, w = sample_sp_labels.shape

    r"""
    Verify ASA implementation (Achievable Segmentation Accuracy)
    ASA(G, S) = 1/N * \sum_{S_j} \max_{G_i} \{\| S_j \cap G_i \|\}
    """

    # 1. ASA = (13+20+10)/60 = 0.7167
    label_list = sample_sp_labels.flatten().tolist()
    gtseg_list = sample_gt_labels.flatten().tolist()

    asa = EvalSPModule.computeASA(
        label_list, gtseg_list, 0
    )  # the last parameter is for return the label map, default is 0
    print("ASA is: {}".format(round(asa, 4)))
    print("Bergh Underseg Error is: {}".format(round(1 - asa, 4)))

    # 2. ASA_2 = (13+20+9)/60 = 0.7
    sample_sp_labels_2 = sample_sp_labels.copy()
    sample_gt_labels_2 = sample_gt_labels.copy()
    sample_gt_labels_2[-1][-1] = 10  # change one pixel with a new label
    if np.any(sample_gt_labels_2 == 0):
        # If gt labels contain 0
        sample_gt_labels_2, _ = fastremap.renumber(
            sample_gt_labels_2, start=1, in_place=False
        )
    else:
        sample_gt_labels_2, _ = fastremap.renumber(
            sample_gt_labels_2, start=0, in_place=False
        )
    label_list_2 = sample_sp_labels_2.flatten().tolist()
    gtseg_list_2 = sample_gt_labels_2.flatten().tolist()

    asa_2 = EvalSPModule.computeASA(label_list_2, gtseg_list_2, 0)
    print("ASA_2 is: {}".format(round(asa_2, 4)))
    print("Bergh Underseg Error is: {}".format(round(1 - asa_2, 4)))

    """
    Verify segmentation counts (underseg, one-to-one, overseg)
    A threshold (perc_{}) must be given that describe the overlap percentage over GT.
    If overlap_area < (1 - perc_{}), then overseg += 1.
    If overlap_area >= (1 - perc_{}) AND sp_area over gt_area < (1 + perc_{}), then one-to-one += 1.
    If overlap_area >= (1 - perc_{}) AND sp_area over gt_area >= (1 + perc_{}), then underseg += 1.
    If two SPs overlap one GT equally, e.g. Both 0s in SPs and 2s in SPs overlap 3s in GT are 0.5,
    select the smaller one, i.e. 2s in SPs, for sp_area over gt_area calculation
    """

    # 0s overlap_area over gt_area is 0.5 (2/4); sp_area over gt_area is 3.75 (15/4)
    # 1s overlap_area over gt_area is 0.8125 (13/16); sp_area over gt_area is 0.9375 (15/16)
    # 2s overlap_area over gt_area is 1 (20/20); sp_area over gt_area is 1.75 (35/20)
    # 3s overlap_area over gt_area is 0.5 (10/20); sp_area over gt_area is 0.5 (10/20)

    perc_1 = 0.1
    # In gt labels,
    # 0s are over (0.5 < 1-0.1),
    # 1s are over (0.8125 < 1-0.1),
    # 2s are under (1 >= 1-0.1; 1.75 >= 1+0.1),
    # 3s are over (0.5 < 1-0.1)
    print("Overlap thresh is: {}".format(1 - perc_1))
    (
        nr_underseg_1,
        nr_one_to_one_1,
        nr_overseg_1,
        nr_gt_1,
    ) = EvalSPModule.computeSegCatCounts(
        label_list, gtseg_list, perc_1, 0
    )  # the last parameter is for return the label map, default is 0
    print(
        "underseg is: {} | one-to-one is {} | overseg is {} | gt is {}".format(
            nr_underseg_1, nr_one_to_one_1, nr_overseg_1, nr_gt_1
        )
    )

    perc_2 = 0.3
    # In gt labels,
    # 0s are over (0.5 < 1-0.3),
    # 1s are one-to-one (0.8125 > 1-0.3; 0.9375 < 1+0.3),
    # 2s are under (1 >= 1-0.3; 1.75 >= 1+0.3),
    # 3s are over (0.5 < 1-0.3)
    print("Overlap thresh is: {}".format(1 - perc_2))
    (
        nr_underseg_2,
        nr_one_to_one_2,
        nr_overseg_2,
        nr_gt_2,
    ) = EvalSPModule.computeSegCatCounts(
        label_list, gtseg_list, perc_2, 0
    )  # the last parameter is for return the label map, default is 0
    print(
        "underseg is: {} | one-to-one is {} | overseg is {} | gt is {}".format(
            nr_underseg_2, nr_one_to_one_2, nr_overseg_2, nr_gt_2
        )
    )

    perc_3 = 0.5
    # In gt labels,
    # 0s are under (0.5 >= 1-0.5; 3.75 > 1+0.5),
    # 1s are one-to-one (0.8125 > 1-0.5; 0.9375 < 1+0.5),
    # 2s are under (1 >= 1-0.5; 1.75 >= 1+0.5),
    # 3s are one-to-one (0.5 >= 1-0.5; 0.5 < 1+0.5),
    print("Overlap thresh is: {}".format(1 - perc_3))
    (
        nr_underseg_3,
        nr_one_to_one_3,
        nr_overseg_3,
        nr_gt_3,
    ) = EvalSPModule.computeSegCatCounts(
        label_list, gtseg_list, perc_3, 0
    )  # the last parameter is for return the label map, default is 0
    print(
        "underseg is: {} | one-to-one is {} | overseg is {} | gt is {}\n".format(
            nr_underseg_3, nr_one_to_one_3, nr_overseg_3, nr_gt_3
        )
    )

    """
    Verify BR implementation (Boundary Recall)
    First calculate the boundary maps of GT/SP.
    For a given radius d, and a given target boundary pixel of GT/SP,
    look up if there is at least one boundary pixel of SP/GT in the dxd matrix.
    If at least one label from SP/GT hits, calculate this target pixel of GT/SP's the nearest pixel
    from the candidate matrix of SP/GT (dxd).
    """

    search_radius = 1
    # Look up the surrounding 1 pixel, so [3x3] matrix
    # GT's boundary recall = 8 hit / 13 all = 0.6153846
    # SP's boundary recall = 5 hit / 10 all = 0.5
    # Average error pixels of TP on GT = (1 pixel-dist * 7 pixels + 0 pixel-dist * 1 pixel) / 8 pixels = 7/8 = 0.875
    # Average error pixels of TP on SP = (1 pixel-dist * 4 pixels + 0 pixel-dist * 1 pixel) / 5 pixels = 4/5 = 0.8
    (
        br_gt,
        br_sp,
        average_error_pixel_gt,
        average_error_pixel_sp,
    ) = EvalSPModule.computeBR(label_list, gtseg_list, h, w, search_radius)
    print("Searching radius is: {}".format(search_radius))
    print(
        "boundary recall on GT: {}% | boundary recall on SP: {}%".format(
            round(br_gt * 100, 4), round(br_sp * 100, 4)
        )
    )
    print(
        "average error pixels of TP on GT: {} | average error pixels of TP on SP: {}".format(
            round(average_error_pixel_gt, 4), round(average_error_pixel_sp, 4)
        )
    )

    search_radius = 2
    # Look up the surrounding 1 pixel, so [5x5] matrix
    # GT's boundary recall = 13 hit / 13 all = 1
    # SP's boundary recall = 10 hit / 10 all = 1
    # Average error pixels of TP on GT =
    # (1 pixel-dist * 7 pixels + 0 pixel-dist * 1 pixel + 2 pixel-dist * 5) / 13 pixels = 17/13 = 1.3076923
    # Average error pixels of TP on SP =
    # (1 pixel-dist * 4 pixels + 0 pixel-dist * 1 pixel + 2 pixel-dist * 5) / 10 pixels = 14/10 = 1.4
    (
        br_gt,
        br_sp,
        average_error_pixel_gt,
        average_error_pixel_sp,
    ) = EvalSPModule.computeBR(label_list, gtseg_list, h, w, search_radius)
    print("Searching radius is: {}".format(search_radius))
    print(
        "boundary recall on GT: {}% | boundary recall on SP: {}%".format(
            round(br_gt * 100, 4), round(br_sp * 100, 4)
        )
    )
    print(
        "average error pixels of TP on GT: {} | average error pixels of TP on SP: {}".format(
            round(average_error_pixel_gt, 4), round(average_error_pixel_sp, 4)
        )
    )

    search_radius = 3
    # Look up the surrounding 1 pixel, so [7x7] matrix
    # GT's boundary recall = 13 hit / 13 all = 1
    # SP's boundary recall = 10 hit / 10 all = 1
    # Average error pixels of TP on GT =
    # (1 pixel-dist * 7 pixels + 0 pixel-dist * 1 pixel + 2 pixel-dist * 5) / 13 pixels = 17/13 = 1.3076923
    # Average error pixels of TP on SP =
    # (1 pixel-dist * 4 pixels + 0 pixel-dist * 1 pixel + 2 pixel-dist * 5) / 10 pixels = 14/10 = 1.4
    (
        br_gt,
        br_sp,
        average_error_pixel_gt,
        average_error_pixel_sp,
    ) = EvalSPModule.computeBR(label_list, gtseg_list, h, w, search_radius)
    print("Searching radius is: {}".format(search_radius))
    print(
        "boundary recall on GT: {}% | boundary recall on SP: {}%".format(
            round(br_gt * 100, 4), round(br_sp * 100, 4)
        )
    )
    print(
        "average error pixels of TP on GT: {} | average error pixels of TP on SP: {}".format(
            round(average_error_pixel_gt, 4), round(average_error_pixel_sp, 4)
        )
    )

    # OUTPUT
    """
    ASA is: 0.7167
    Bergh Underseg Error is: 0.2833
    ASA_2 is: 0.7
    Bergh Underseg Error is: 0.3
    Overlap thresh is: 0.9
    underseg is: 1 | one-to-one is 0 | overseg is 3 | gt is 4
    Overlap thresh is: 0.7
    underseg is: 1 | one-to-one is 1 | overseg is 2 | gt is 4
    Overlap thresh is: 0.5
    underseg is: 2 | one-to-one is 2 | overseg is 0 | gt is 4

    Searching radius is: 1
    boundary recall on GT: 61.5385% | boundary recall on SP: 50.0%
    average error pixels of TP on GT: 0.875 | average error pixels of TP on SP: 0.8
    Searching radius is: 2
    boundary recall on GT: 100.0% | boundary recall on SP: 100.0%
    average error pixels of TP on GT: 1.3077 | average error pixels of TP on SP: 1.4
    Searching radius is: 3
    boundary recall on GT: 100.0% | boundary recall on SP: 100.0%
    average error pixels of TP on GT: 1.3077 | average error pixels of TP on SP: 1.4
    """


if __name__ == "__main__":
    test_function()
