"""
All things related to line detection evaluation
"""
import torch
import os
import numpy as np
import time
import math

from evaluation.eval_utils import get_dilation_kernel, convert_raw_exported_predictions, line_map_to_segments, \
    line_score_map_to_segments
from third_party.lcnn.lcnn.metric import msTPFP, ap
from model.metrics import super_nms
from torch.nn.functional import softmax
from train import convert_junc_predictions
from dataset.transforms.homographic_transforms import compute_valid_mask
import copy
import cv2
from misc.visualize_util import plot_junctions, plot_line_segments_from_segments
from model.line_detector_val import (
    LineDetectorOurs2,
)
from utils import parse_h5_data
# Import the geometry utils
from misc.geometry_utils import get_overlap_orth_line_dist
from scipy.ndimage.morphology import binary_erosion

# [Debug] import the plotting library for debug
import matplotlib

matplotlib.use("Agg")
import matplotlib.pyplot as plt


##############################
## mAPH simple related code ##
##############################
# Container for heatmap evaluation results
class LineDetectResult_mAPH_simple(object):
    def __init__(self, num_intervals=1000):
        # record the data
        # _precision and _recall accumulates counts in different ways (due to the distance tolerance).
        self.data = {
            "tp_precision": np.zeros([num_intervals, 1]),
            "fp_precision": np.zeros([num_intervals, 1]),
            "tp_recall": np.zeros([num_intervals, 1]),
            "num_points": 0
        }
        self.num_intervals = num_intervals

    # Accumulate the results
    def update(self, line_res):
        # Update the accumulative counts
        self.data["tp_precision"] += line_res["tp_precision"]
        self.data["fp_precision"] += line_res["fp_precision"]
        self.data["tp_recall"] += line_res["tp_recall"]
        self.data["num_points"] += line_res["num_gt"]

    # Compute the precision and recall
    def compute_pr(self):
        # import ipdb; ipdb.set_trace()
        tp_precision = np.squeeze(self.data["tp_precision"], axis=-1)
        fp_precision = np.squeeze(self.data["fp_precision"], axis=-1)
        tp_recall = np.squeeze(self.data["tp_recall"], axis=-1)

        precision = self.div0(tp_precision, tp_precision + fp_precision)
        recall = self.div0(tp_recall, self.data["num_points"])

        # Add entries for plotting
        recall = np.concatenate([[0], recall, [1]])
        precision = np.concatenate([[0], precision, [0]])
        # TODO: Get the max accumulated results ??
        precision = np.maximum.accumulate(precision[::-1])[::-1]

        # Compute mAP
        mAP = self.compute_mAP(precision, recall)

        return {
            "precision": precision,
            "recall": recall,
            "mAP": mAP
        }

    # Compute mAP
    @staticmethod
    def compute_mAP(precision, recall):
        return np.sum(precision[1:] * (recall[1:] - recall[:-1]))

    @staticmethod
    def div0(a, b):
        with np.errstate(divide='ignore', invalid='ignore'):
            c = np.true_divide(a, b)
            idx = ~np.isfinite(c)
            c[idx] = np.where(a[idx] == 0, 1, 0)  # -inf inf NaN
        return c


# Plot line segments given points and line map
def line_segments_to_heatmap(points, line_map, image_size):
    # copy the line map
    line_map_tmp = copy.copy(line_map)

    # Parse line_map back to segment pairs
    segments = np.zeros([0, 4])
    for idx in range(points.shape[0]):
        # if no connectivity, just skip it
        if line_map_tmp[idx, :].sum() == 0:
            continue
        # record the line segment
        else:
            for idx2 in np.where(line_map_tmp[idx, :] == 1)[0]:
                p1 = points[idx, :]  # HW format
                p2 = points[idx2, :]  # HW format
                segments = np.concatenate((segments, np.array([p1[0], p1[1], p2[0], p2[1]])[None, ...]), axis=0)

                # Update line_map
                line_map_tmp[idx, idx2] = 0
                line_map_tmp[idx2, idx] = 0

    # Initialize empty heatmap
    line_heatmap = np.zeros(image_size)

    # Draw segment pairs
    for idx in range(segments.shape[0]):
        seg = segments[idx, :]
        start_point = tuple(np.round((seg[1], seg[0])).astype(np.int32))
        end_point = tuple(np.round((seg[3], seg[2])).astype(np.int32))

        # Draw in x, y format
        line_heatmap = cv2.line(line_heatmap, start_point, end_point, (255., 255., 255.), thickness=1)

    return line_heatmap


# # Get the dilation kernel based on the distance threshold
# def get_dilation_kernel(dist_thresh):
#     # Initialize empty kernel
#     kernel_len = round(2 * dist_thresh + 1)
#     output_kernel = np.zeros([kernel_len, kernel_len])
#     center = np.array([[dist_thresh, dist_thresh]])

#     # Intitialize the meshgrid
#     xv, yv = np.meshgrid(np.arange(0, kernel_len, 1),
#                          np.arange(0, kernel_len, 1),
#                          indexing='xy')
#     coord_map = np.concatenate([yv[..., None], xv[..., None]], axis=-1)
#     coord_map_flattened = coord_map.reshape([-1, 2])
#     dist = np.sqrt(np.sum((coord_map_flattened - center) **2, axis=-1))
#     coord_valid = coord_map_flattened[dist <= dist_thresh, :]

#     output_kernel[coord_valid[:, 0], coord_valid[:, 1]] =1

#     return output_kernel.astype(np.uint8)


# Compute tp fp for line prediction
def compute_mAPH_simple_TPFP(line_detector, junc_pred, heatmap_pred, valid_mask,
                             heatmap_gt, dist_thresh_ratio=0.01, num_intervals=100):
    """
    line_detector: A partially configured detector module (with some hyper-parameter fixed).
    junc_pred: Nx2 array of all predicted junctions (HW format)
    heatmap_pred: Dense prob map with same size as image.
    valid_mask: Image-sized mask for valid region.
    heatmap_gt: Dense ground truth heatmap with same size as image.
    dist_thresh_ratio: Distance tolerance ratio (ratio to the diagonal length of the image).
    num_intervals: Number of intervals used to compute the precision recall curve.
    """
    # Squeeze the batch dimension of the input and gt
    heatmap_pred = heatmap_pred[..., 1]
    heatmap_gt = heatmap_gt.astype(np.uint8)
    valid_mask = valid_mask

    # Compute the distance threshold based on the length of image diagonal
    h, w = heatmap_pred.shape
    dist_thresh = round(((h ** 2 + w ** 2) ** 0.5) * dist_thresh_ratio)

    # Define all the recording interval array.
    tp_precision_array = np.zeros([num_intervals, 1])
    fp_precision_array = np.zeros([num_intervals, 1])
    tp_recall_array = np.zeros([num_intervals, 1])
    thresh_intervals = np.linspace(0.95, 0.05, num_intervals, endpoint=True)

    # Precompute the dilated ground truth based on the distance threshold.
    dilation_kernel = get_dilation_kernel(0.5 * dist_thresh)
    heatmap_gt_dilated = cv2.dilate(heatmap_gt, dilation_kernel, iterations=1)
    num_gt = np.sum(heatmap_gt)

    # Iterate through all the threshold.
    for idx, thresh in enumerate(thresh_intervals):
        # Compute the line prediction using the simple algorithm
        # line_connect_map_pred = simple_line_detection(junc_pred, heatmap_pred, thresh)
        # Change the detection threshold
        line_detector.detection_thresh = thresh
        line_connect_map_pred = line_detector.forward(junc_pred, heatmap_pred)

        # Convert line_map + junction map => line prediction map
        line_map_pred = line_segments_to_heatmap(junc_pred, line_connect_map_pred,
                                                 [h, w])

        # Compute the precision
        tp_precision = np.sum(line_map_pred * heatmap_gt_dilated)
        fp_precision = np.sum(line_map_pred) - tp_precision

        # Compute the recall
        line_map_pred_dilated = cv2.dilate(line_map_pred.astype(np.uint8), dilation_kernel, iterations=1)
        line_map_pred_dilated = (line_map_pred_dilated / 255.).astype(np.uint8)
        tp_recall = np.sum(heatmap_gt[..., 0] * line_map_pred_dilated)
        # import ipdb; ipdb.set_trace()

        # Record the results for single threshold
        tp_precision_array[idx] = tp_precision
        fp_precision_array[idx] = fp_precision
        tp_recall_array[idx] = tp_recall

    return {
        "tp_precision": tp_precision_array,
        "fp_precision": fp_precision_array,
        "tp_recall": tp_recall_array,
        "num_gt": num_gt
    }


# Evaluate the line detection algorithm by simple mAPH
def evaluate_mAPH_simple(dataset, detector, num_intervals=1000):
    # Initialize the recorder here
    recorder = LineDetectResult_mAPH_simple(num_intervals=num_intervals)

    # TODO: Should we write a dataset class for the exported dataset?
    dataset_keys = list(dataset.keys())
    # Iterate through all the datapoints
    for idx, key in enumerate(dataset_keys):
        # Get and convert data.
        h5_data = dataset[key]
        data = parse_h5_data(h5_data)

        # Get the junction point predictions and convert to Nx2 format
        junc_points_pred = np.where(data["junc_pred_nms"])
        junc_points_pred = np.concatenate([junc_points_pred[0][..., None], junc_points_pred[1][..., None]], axis=-1)

        # Get the heatmap predictions
        heatmap_pred = data["heatmap_pred"]
        valid_mask = data["valid_mask"]

        # Get the groundtruth
        heatmap_gt = data["heatmap_gt"]

        # Compute the result
        num_intervals = recorder.num_intervals
        line_res = compute_mAPH_simple_TPFP(detector, junc_points_pred, heatmap_pred, valid_mask,
                                            heatmap_gt, 0.01, num_intervals=num_intervals)
        recorder.update(line_res)

        # Print some info
        if (idx + 1) % 50 == 0:
            print("\t [ %d / %d ] mAP: %.5f" % (idx + 1, len(dataset_keys), recorder.compute_pr()["mAP"]))

    # import ipdb; ipdb.set_trace()
    line_pr_results = recorder.compute_pr()
    print("\t mAP: ", line_pr_results["mAP"])


######################
## sAP related code ##
######################
# Container for heatmap evaluation results
class LineDetectResult_sAP(object):
    def __init__(self, num_intervals=1000):
        # record the data
        # _precision and _recall accumulates counts in different ways (due to the distance tolerance).
        self.data = {
            "tp_precision": np.zeros([num_intervals, 1]),
            "fp_precision": np.zeros([num_intervals, 1]),
            "tp_recall": np.zeros([num_intervals, 1]),
            "num_points": 0
        }
        self.num_intervals = num_intervals

    # Accumulate the results
    def update(self, line_res):
        # Update the accumulative counts
        self.data["tp_precision"] += line_res["tp_precision"]
        self.data["fp_precision"] += line_res["fp_precision"]
        self.data["tp_recall"] += line_res["tp_recall"]
        self.data["num_points"] += line_res["num_gt"]

    # Compute the precision and recall
    def compute_pr(self):
        # import ipdb; ipdb.set_trace()
        tp_precision = np.squeeze(self.data["tp_precision"], axis=-1)
        fp_precision = np.squeeze(self.data["fp_precision"], axis=-1)
        tp_recall = np.squeeze(self.data["tp_recall"], axis=-1)

        precision = self.div0(tp_precision, tp_precision + fp_precision)
        recall = self.div0(tp_recall, self.data["num_points"])

        # Add entries for plotting
        recall = np.concatenate([[0], recall, [1]])
        precision = np.concatenate([[0], precision, [0]])
        # TODO: Get the max accumulated results ??
        precision = np.maximum.accumulate(precision[::-1])[::-1]

        # Compute mAP
        mAP = self.compute_mAP(precision, recall)

        return {
            "precision": precision,
            "recall": recall,
            "mAP": mAP
        }

    # Compute mAP
    @staticmethod
    def compute_mAP(precision, recall):
        return np.sum(precision[1:] * (recall[1:] - recall[:-1]))

    @staticmethod
    def div0(a, b):
        with np.errstate(divide='ignore', invalid='ignore'):
            c = np.true_divide(a, b)
            idx = ~np.isfinite(c)
            c[idx] = np.where(a[idx] == 0, 1, 0)  # -inf inf NaN
        return c


# Compute tp fp of sAP metric from single data
def compute_sAP_TPFP(line_detector, junctions_pred, heatmap_pred, junctions_gt, line_map_gt, dist_thresh=5):
    """
    line_detector: A partially configured detector module (with some hyper-parameter fixed).
    junctions_pred: Nx2 array of all predicted junctions (HW format)
    heatmap_pred: Dense prob map with same size as image.
    junctions_gt: Nx2 array of all the ground truth junctions.
    line_map_gt: NxN ground truth line connectivity map.
    """
    # [Debug]
    junctions_pred_raw = junctions_pred.copy()

    # Compute the score map
    line_score_map_pred, junctions_pred = line_detector.detect_score_map(junctions_pred, heatmap_pred)

    # [Debug] pre-filter by using higher detection threshold
    detect_thresh = 0.45
    line_detector.detect_thresh = detect_thresh
    line_map_pred, _ = line_detector.detect(junctions_pred_raw, heatmap_pred)

    # Convert junctions + line_map to line segments (N*2*2 format)
    # Convert junctions to 0~127
    H, W = heatmap_pred.shape[:2]
    H_ratio, W_ratio = H / 128., W / 128.
    junctions_pred = junctions_pred.astype(np.float32)
    junctions_pred[:, 0] = junctions_pred[:, 0] / H_ratio
    junctions_pred[:, 1] = junctions_pred[:, 1] / W_ratio
    junctions_gt[:, 0] = junctions_gt[:, 0] / H_ratio
    junctions_gt[:, 1] = junctions_gt[:, 1] / W_ratio

    line_segments_pred, line_scores_pred = line_score_map_to_segments(junctions_pred, line_score_map_pred)
    line_segments_gt = line_map_to_segments(junctions_gt, line_map_gt)

    # Perform the evaluation (on sAP-5 first)
    num_gt = line_segments_gt.shape[0]
    tp, fp = msTPFP(line_segments_pred, line_segments_gt, threshold=dist_thresh)

    # [Debug] Plot their segments after
    tp_idx = np.where(tp)[0]
    tp_line_score = line_scores_pred[tp_idx]
    line_score_mask = tp_line_score > detect_thresh
    filtered_idx = tp_idx[line_score_mask]
    line_segments_filtered = line_segments_pred[filtered_idx, :, :]
    pred_line_plot_lcnn = plot_line_segments_from_segments(np.zeros([512, 512]), line_segments_filtered * 4.,
                                                           line_width=2)
    plt.imshow(pred_line_plot_lcnn)
    plt.savefig("pred_line_plot_lcnn.png")

    # [Debug]
    line_segments_pred_debug = line_map_to_segments(junctions_pred, line_map_pred)
    tp_debug, fp_debug = msTPFP(line_segments_pred_debug, line_segments_gt, threshold=dist_thresh)
    line_segments_ours = line_segments_pred_debug[tp_debug.astype(np.bool), :, :]
    pred_line_plot_ours = plot_line_segments_from_segments(np.zeros([512, 512]), line_segments_ours * 4., line_width=2)
    plt.imshow(pred_line_plot_ours)
    plt.savefig("pred_line_plot_ours.png")

    import ipdb;
    ipdb.set_trace()

    return tp, fp, line_scores_pred, num_gt


# Evaluate the line detection algorithm by sAP
def evaluate_sAP(exported_dataset, gt_dataset, detector, num_intervals=1000):
    # Initialize the sAP recorder here
    # recorder = LineDetectResult_sAP(num_intervals=num_intervals)
    # import ipdb; ipdb.set_trace()
    # Get all the dataset keys
    dataset_keys = list(exported_dataset.keys())

    # Define the distance thresh
    dist_thresh = 5

    # Iterate through all the datapoints
    num_gt = 0
    lcnn_tp, lcnn_fp, lcnn_scores = [], [], []
    for idx, key in enumerate(dataset_keys):
        print("Debug: ", idx)
        # Get and convert data.
        h5_data = exported_dataset[key]
        data = parse_h5_data(h5_data)

        # Get the converted data
        converted_data = convert_raw_exported_predictions(data)
        junctions_pred = converted_data["junctions_pred"]
        heatmap_pred = converted_data["heatmap_pred"]
        valid_mask = converted_data["valid_mask"]

        # Get the ground truths
        gt_data = gt_dataset.get_data_from_key(key)
        junctions_gt = gt_data["junctions"]
        line_map_gt = gt_data["line_map_pos"]

        import ipdb;
        ipdb.set_trace()
        # Compute the sAP
        tp, fp, line_scores_pred, num_gt_ = compute_sAP_TPFP(detector,
                                                             junctions_pred, heatmap_pred,
                                                             junctions_gt, line_map_gt,
                                                             dist_thresh=dist_thresh
                                                             )
        num_gt += num_gt_
        lcnn_tp.append(tp)
        lcnn_fp.append(fp)
        lcnn_scores.append(line_scores_pred)
        # lcnn_scores.append(lcnn_score)

    # import ipdb; ipdb.set_trace()
    lcnn_tp = np.concatenate(lcnn_tp)
    lcnn_fp = np.concatenate(lcnn_fp)
    lcnn_scores = np.concatenate(lcnn_scores)
    lcnn_index = np.argsort(-lcnn_scores)
    # Sort them according to the score
    lcnn_tp = np.cumsum(lcnn_tp[lcnn_index]) / num_gt
    lcnn_fp = np.cumsum(lcnn_fp[lcnn_index]) / num_gt

    # Generate the precision recall plot here
    recall = lcnn_tp
    precision = lcnn_tp / np.maximum(lcnn_tp + lcnn_fp, 1e-9)

    recall = np.concatenate(([0.0], recall, [1.0]))
    precision = np.concatenate(([0.0], precision, [0.0]))

    for i in range(precision.size - 1, 0, -1):
        precision[i - 1] = max(precision[i - 1], precision[i])
    # import matplotlib
    # matplotlib.use('Agg')
    # import matplotlib.pyplot as plt
    plt.plot(recall, precision)
    plt.savefig("pr_plot.png")

    AP = ap(lcnn_tp, lcnn_fp)
    import ipdb;
    ipdb.set_trace()
    print("Pause here and debug")

    return AP


################################
## Repeatability related code ##
################################
# Evaluate the line detection algorithm by repeatability
def evaluate_repeatability(dataset, model, line_detector, dist_tolerance_lst, split="viewpoint"):
    """
    dataset: A torch dataset object (synthetic shapes, wireframe, etc.)
    model: A torch model for the heatmap and junction prediction.
    dist_tolerance_lst: List of epsilons used to measure correct line segment matches.
    line_detector: line detector object taking heatmap and junctions as input.
    """
    # Get number of repeatability evaluation datapoints
    num_datapoints = len(dataset.rep_eval_datapoints[split])
    # Get current device
    device = next(model.parameters()).device

    # Initialize the repeatability dict
    repeatability_dict = {}
    for dist in dist_tolerance_lst:
        repeatability_dict[dist] = []

    # Iterate through all the data
    for idx in range(num_datapoints):
        # if idx % 5 == 0:
        data = dataset.get_rep_eval_data(split, idx)
        ref_image = data["ref_image"][None, ...].to(device)
        target_image = data["target_image"][None, ...].to(device)
        H_mat = data["homo_mat"]

        # Compute the valid mask for the target image
        image_size = list(ref_image.shape[2:])
        valid_mask = compute_valid_mask(image_size, H_mat, -2)[0]

        # Predictions on ref image
        net_outputs_ref = model(ref_image)
        junc_np = convert_junc_predictions(net_outputs_ref["junctions"], 8, 0.154, 300)
        junctions = np.where(junc_np["junc_pred_nms"].squeeze())
        junctions_ref = np.concatenate([junctions[0][..., None], junctions[1][..., None]], axis=-1)

        heatmap = softmax(net_outputs_ref["heatmap"].detach(), dim=1).cpu().numpy().transpose(0, 2, 3, 1)
        heatmap_ref = heatmap[0, :, :, 1]

        # import ipdb; ipdb.set_trace()
        line_map_ref, junctions_ref = line_detector.detect(
            junctions_ref,
            heatmap_ref,
            recover_junction=False
        )
        # Convert to line segments
        line_segments_ref = line_map_to_segments(junctions_ref, line_map_ref)

        # Predictions on the target image
        net_outputs_target = model(target_image)
        junc_np = convert_junc_predictions(net_outputs_target["junctions"], 8, 0.154, 300)
        junctions = np.where(junc_np["junc_pred_nms"].squeeze() * valid_mask)
        junctions_target = np.concatenate([junctions[0][..., None], junctions[1][..., None]], axis=-1)

        heatmap = softmax(net_outputs_target["heatmap"].detach(), dim=1).cpu().numpy().transpose(0, 2, 3, 1)
        heatmap_target = heatmap[0, :, :, 1]
        line_map_target, junctions_target = line_detector.detect(
            junctions_target,
            heatmap_target,
            recover_junction=False
        )
        # Convert to line segments
        line_segments_target = line_map_to_segments(junctions_target, line_map_target)

        # TODO: Filter out the line segments out of the boundary.
        # Compute repeatability
        num_segments_ref = line_segments_ref.shape[0]
        num_segments_target = line_segments_target.shape[0]
        # import ipdb; ipdb.set_trace()

        # Warp ref line segments to target
        # Convert to xy format => homogeneous
        line_ref_homo = np.concatenate([np.flip(line_segments_ref, -1), np.ones([num_segments_ref, 2, 1])], axis=-1)
        line_ref_warped = line_ref_homo.dot(H_mat.T)
        # Normalize => back to HW format
        line_segments_ref_warped = np.flip(line_ref_warped[:, :, :2] / line_ref_warped[:, :, 2:], -1)

        # Filter out the out-of-boundary segments (True => keep)
        boundary_mask = np.sum(np.sum((line_segments_ref_warped < 0).astype(np.int), axis=-1), axis=-1)
        boundary_mask += np.sum((line_segments_ref_warped[:, :, 0] >= image_size[0] - 1).astype(np.int), axis=-1)
        boundary_mask += np.sum((line_segments_ref_warped[:, :, 1] >= image_size[1] - 1).astype(np.int), axis=-1)
        boundary_mask = (boundary_mask == 0)
        line_segments_ref_warped = line_segments_ref_warped[boundary_mask, :, :]

        valid_region_mask1 = valid_mask[line_segments_ref_warped[:, 0, 0].astype(np.int), line_segments_ref_warped[:, 0,
                                                                                          1].astype(np.int)] == 1.
        valid_region_mask2 = valid_mask[line_segments_ref_warped[:, 1, 0].astype(np.int), line_segments_ref_warped[:, 1,
                                                                                          1].astype(np.int)] == 1.
        valid_region_mask = valid_region_mask1 * valid_region_mask2
        line_segments_ref_warped = line_segments_ref_warped[valid_region_mask, :, :]
        # Valid number of segments in ref
        num_valid_segments_ref = line_segments_ref_warped.shape[0]

        # Compute distance matrix
        diff = (((line_segments_ref_warped[:, None, :, None] - line_segments_target[:, None]) ** 2).sum(-1)) ** 0.5
        diff = np.minimum(
            diff[:, :, 0, 0] + diff[:, :, 1, 1], diff[:, :, 0, 1] + diff[:, :, 1, 0]
        )

        # Compute reference to target correctness
        ref_target_min_dist = np.min(diff, 1)

        ref_target_correctness_lst = []
        for dist_tolerance in dist_tolerance_lst:
            ref_target_correctness = np.sum((ref_target_min_dist <= dist_tolerance).astype(np.int))
            ref_target_correctness_lst.append(ref_target_correctness)

        # Warp target line segments to ref
        line_target_homo = np.concatenate([np.flip(line_segments_target, -1), np.ones([num_segments_target, 2, 1])],
                                          axis=-1)
        line_target_warped = line_target_homo.dot(np.linalg.inv(H_mat.T))
        line_segments_target_warped = np.flip(line_target_warped[:, :, :2] / line_target_warped[:, :, 2:], -1)

        # Compute distance matrix
        diff = (((line_segments_target_warped[:, None, :, None] - line_segments_ref[:, None]) ** 2).sum(-1)) ** 0.5
        diff = np.minimum(
            diff[:, :, 0, 0] + diff[:, :, 1, 1], diff[:, :, 0, 1] + diff[:, :, 1, 0]
        )

        # Compute target to reference correctness
        target_ref_min_dist = np.min(diff, 1)

        target_ref_correctness_lst = []
        for dist_tolerance in dist_tolerance_lst:
            target_ref_correctness = np.sum((target_ref_min_dist <= dist_tolerance).astype(np.int))
            target_ref_correctness_lst.append(target_ref_correctness)

        # Record the final correctness
        for i, dist in enumerate(dist_tolerance_lst):
            correctness = (ref_target_correctness_lst[i] + target_ref_correctness_lst[i]) / (
                        num_valid_segments_ref + num_segments_target)
            repeatability_dict[dist].append(correctness)

        # import ipdb; ipdb.set_trace()
        # Print some info
        if idx % 20 == 0:
            print("Iter: ", idx)
            for dist in dist_tolerance_lst:
                print("\t Rep-%d: %f" % (dist, np.mean(repeatability_dict[dist])))

            # [Debug]
            # ref_image_np = ref_image.cpu().numpy().squeeze()
            # target_image_np = target_image.cpu().numpy().squeeze()

            # line_plot = plot_line_segments_from_segments(target_image.squeeze().cpu().numpy(), line_segments_ref_warped, line_width=2)
            # line_plot2 = plot_line_segments_from_segments(ref_image.squeeze().cpu().numpy(), line_segments_target_warped, line_width=2)
            # line_plot3 = plot_line_segments_from_segments(target_image.squeeze().cpu().numpy(), line_segments_target, line_width=2)
            # line_plot4 = plot_line_segments_from_segments(ref_image.squeeze().cpu().numpy(), line_segments_ref, line_width=2)
            # matplotlib.use("Agg")
            # plt.imshow(line_plot4)
            # plt.savefig("./seg_plot.png")

            # import ipdb; ipdb.set_trace()
    return repeatability_dict
    # import ipdb; ipdb.set_trace()


# Evaluate the line detection algorithm by repeatability
def evaluate_repeatability_v2(
        dataset,
        line_detector,
        dist_tolerance_lst,
        distance_metric="sAP",
        split="viewpoint",
        save_visual=True,
        erode_border=False,
        erode_border_margin=2
):
    """
    dataset: A torch dataset object (synthetic shapes, wireframe, etc.)
    line_detector: line detector object taking input image 1xcxHxW tensor as input
    dist_tolerance_lst: List of epsilons used to measure correct line segment matches.
    distance_metric: Distance metric to use. Include "sAP", "orthogonal_distance", etc.
    split: evaluate on "viewpoint", "illumination", or "all" splits
    save_visual: save first several images for visualization.
    """
    # Get number of repeatability evaluation datapoints
    num_datapoints = len(dataset.rep_eval_datapoints[split])
    # Get current device
    device = line_detector.device

    # Fetch number of detect_thresh and inlier_thresh
    if isinstance(line_detector, LineDetectorOurs2):
        num_detect_thresh = line_detector.line_detector.num_detect_thresh
        num_inlier_thresh = line_detector.line_detector.num_inlier_thresh
    # For baseline methods
    else:
        num_detect_thresh = 1
        num_inlier_thresh = 1

    # Initialize the repeatability dict
    repeatability_dict = {}
    for dist in dist_tolerance_lst:
        repeatability_dict[dist] = np.zeros([num_detect_thresh, num_inlier_thresh, num_datapoints],
                                            dtype=np.float32)

    # Initialize the localization error idct
    local_error_dict = {}
    for dist in dist_tolerance_lst:
        local_error_dict[dist] = np.zeros([num_detect_thresh, num_inlier_thresh, num_datapoints],
                                          dtype=np.float32)

    runtime_lst = []
    num_segments_lst = np.zeros([num_detect_thresh, num_inlier_thresh, num_datapoints], dtype=np.float32)

    # Iterate through all the data
    for idx in range(num_datapoints):
        # if idx % 5 == 0:
        data = dataset.get_rep_eval_data(split, idx)
        ref_image = data["ref_image"][None, ...].to(device)
        target_image = data["target_image"][None, ...].to(device)
        H_mat = data["homo_mat"]

        # Compute the valid mask for the target image
        image_size = list(ref_image.shape[2:])
        assert not 1 in image_size
        valid_mask = compute_valid_mask(image_size, H_mat, -2)

        # Predictions on ref image
        # line_segments_ref, heatmap_ref = line_detector(ref_image, return_heatmap=True)
        if isinstance(line_detector, LineDetectorOurs2):
            outputs_ref = line_detector(ref_image, profile=True, return_heatmap=True)
            line_segments_ref = outputs_ref["line_segments"]
            runtime_lst.append(outputs_ref["time"])
            heatmap_ref = outputs_ref["heatmap"]
        else:
            raise ValueError("Unknow line detector instance")

        # Predictions on the target image.
        # line_segments_target, heatmap_target = line_detector(target_image, valid_mask, return_heatmap=True)
        if isinstance(line_detector, LineDetectorOurs2):
            outputs_target = line_detector(target_image, valid_mask, profile=True, return_heatmap=True)
            line_segments_target = outputs_target["line_segments"]
            runtime_lst.append(outputs_target["time"])
            heatmap_target = outputs_target["heatmap"]
        else:
            raise ValueError("Unknow line detector instance")

        # Compute repeatability and localization error over all combinations
        if isinstance(line_segments_ref, list):
            for detect_thresh_idx in range(num_detect_thresh):
                for inlier_thresh_idx in range(num_inlier_thresh):
                    # Fetch the corresponding entries.
                    line_segments_ref_ = line_segments_ref[detect_thresh_idx][inlier_thresh_idx]
                    line_segments_target_ = line_segments_target[detect_thresh_idx][inlier_thresh_idx]

                    rep_results, loc_results, _ = compute_metrics_v2(
                        line_segments_ref_, line_segments_target_,
                        valid_mask, H_mat, image_size, dist_tolerance_lst, distance_metric,
                        erode_border, erode_border_margin
                    )

                    # Record the results
                    for dist in dist_tolerance_lst:
                        repeatability_dict[dist][detect_thresh_idx, inlier_thresh_idx, idx] = rep_results[dist]
                        local_error_dict[dist][detect_thresh_idx, inlier_thresh_idx, idx] = loc_results[dist]
                    num_segments_ref_ = line_segments_ref_.shape[0]
                    num_segments_target_ = line_segments_target_.shape[0]
                    num_segments_lst[detect_thresh_idx, inlier_thresh_idx, idx] = (
                                                                                              num_segments_ref_ + num_segments_target_) / 2.
        # single detect_thresh and single inlier thresh case
        else:
            detect_thresh_idx = 0
            inlier_thresh_idx = 0
            rep_results, loc_results, line_segments_filtered = compute_metrics_v2(
                line_segments_ref, line_segments_target,
                valid_mask, H_mat, image_size, dist_tolerance_lst, distance_metric,
                erode_border, erode_border_margin
            )
            line_segments_ref_valid = line_segments_filtered["line_segments_ref"]
            line_segments_target_valid = line_segments_filtered["line_segments_target"]

            # Record the results
            for dist in dist_tolerance_lst:
                repeatability_dict[dist][detect_thresh_idx, inlier_thresh_idx, idx] = rep_results[dist]
                local_error_dict[dist][detect_thresh_idx, inlier_thresh_idx, idx] = loc_results[dist]
            num_segments_ref_ = line_segments_ref.shape[0]
            num_segments_target_ = line_segments_target.shape[0]
            num_segments_lst[detect_thresh_idx, inlier_thresh_idx, idx] = (
                                                                                      num_segments_ref_ + num_segments_target_) / 2.

        # Print some info
        if idx % 20 == 0:
            detect_thresh_center = num_detect_thresh // 2
            inlier_thresh_center = num_inlier_thresh // 2
            print("Iter: ", idx)
            for dist in dist_tolerance_lst:
                print("\t Rep-%02d: %f \t Loc-%02d: %f" % (
                    dist, np.sum(repeatability_dict[dist][detect_thresh_center, inlier_thresh_center, :]) / \
                    np.sum(
                        (repeatability_dict[dist][detect_thresh_center, inlier_thresh_center, :] > 0.).astype(np.int)),
                    dist, np.sum(local_error_dict[dist][detect_thresh_center, inlier_thresh_center, :]) / \
                    np.sum(
                        (local_error_dict[dist][detect_thresh_center, inlier_thresh_center, :] > 0.).astype(np.int))))
            # Display runtime
            print("\t average runtime: %f s/image" % (np.mean(runtime_lst)))
            # Display average line segments
            print("\t average number of segments: %f segs/image" % (
                        np.sum(num_segments_lst[detect_thresh_center, inlier_thresh_center, :]) / \
                        np.sum((num_segments_lst[detect_thresh_center, inlier_thresh_center, :] > 0).astype(np.int))))

        # [Debug]
        # Save visualizations
        if idx <= 20 and (num_detect_thresh == 1) and (num_inlier_thresh == 1):
            ref_image_np = ref_image.cpu().numpy().squeeze()
            target_image_np = target_image.cpu().numpy().squeeze()

            save_visualizations(
                idx,
                ref_image, target_image,
                line_segments_ref, line_segments_ref_valid,
                line_segments_target, line_segments_target_valid,
                heatmap_ref, heatmap_target,
                mode="heatmap"
            )

    return repeatability_dict, local_error_dict, runtime_lst, num_segments_lst


def save_visualizations(
        idx,
        ref_image,
        target_image,
        line_segments_ref,
        line_segments_ref_valid,
        line_segments_target,
        line_segments_target_valid,
        heatmap_ref,
        heatmap_target,
        mode="heatmap"
):
    """
    mode: ["heatmap", "segments"]
    """
    ref_junctions = np.concatenate(
        [line_segments_ref[:, 0, :], line_segments_ref[:, 1, :]],
        axis=0
    )
    target_junctions = np.concatenate(
        [line_segments_target[:, 0, :], line_segments_target[:, 1, :]],
        axis=0
    )
    line_width = 2
    junc_size = 4
    ref_image_plot = ref_image.cpu().numpy().transpose(0, 2, 3, 1).squeeze()
    target_image_plot = target_image.cpu().numpy().transpose(0, 2, 3, 1).squeeze()
    junc_plot_ref = plot_junctions(ref_image_plot, ref_junctions, junc_size=junc_size)
    junc_plot_target = plot_junctions(ref_image_plot, target_junctions, junc_size=junc_size)
    line_plot_ref = plot_line_segments_from_segments(ref_image_plot, line_segments_ref, line_width=line_width,
                                                     junc_size=junc_size)
    line_plot_ref_filtered = plot_line_segments_from_segments(ref_image_plot, line_segments_ref_valid,
                                                              line_width=line_width, junc_size=junc_size)
    line_plot_target = plot_line_segments_from_segments(target_image_plot, line_segments_target, line_width=line_width,
                                                        junc_size=junc_size)
    line_plot_target_filtered = plot_line_segments_from_segments(ref_image_plot, line_segments_target_valid,
                                                                 line_width=line_width, junc_size=junc_size)
    matplotlib.use("Agg")

    # Plot them into one plots
    fig, axes = plt.subplots(1, 5, figsize=(18, 8), dpi=240)
    if mode == "segments":
        # ref lines in ref views
        axes[0].imshow(line_plot_ref)
        axes[0].axis('off')
        axes[0].set_title("ref_segments")
        # target lines in ref views
        # axes[1].imshow(line_plot2)
        axes[1].imshow(line_plot_ref_filtered)
        axes[1].axis('off')
        axes[1].set_title("ref_segments_filtered")
        # ref lines in target views
        axes[2].imshow(line_plot_target)
        axes[2].axis('off')
        axes[2].set_title("target_segments")

        # reference heatmap
        axes[3].imshow(line_plot_target_filtered)
        axes[3].axis('off')
        axes[3].set_title("target_segments_filtered")
        # target heatmap
        # axes[4].imshow(heatmap_target, vmax=1., vmin=0., cmap="viridis")
        axes[4].imshow(junc_plot)
        axes[4].axis('off')
        axes[4].set_title("junction_plots")
    else:
        # ref lines in ref views
        axes[0].imshow(line_plot_ref)
        axes[0].axis('off')
        axes[0].set_title("ref_segments")
        # target lines in ref views
        # axes[1].imshow(line_plot2)
        axes[1].imshow(line_plot_target_filtered)
        axes[1].axis('off')
        axes[1].set_title("target_segments_filtered")
        # ref lines in target views
        axes[2].imshow(line_plot_target)
        axes[2].axis('off')
        axes[2].set_title("target_segments")

        # reference heatmap
        axes[3].imshow(heatmap_ref, vmax=1., vmin=0.)
        axes[3].axis('off')
        axes[3].set_title("heatmap_ref")
        # target heatmap
        # axes[4].imshow(heatmap_target, vmax=1., vmin=0., cmap="viridis")
        axes[4].imshow(heatmap_target, vmax=1., vmin=0.)
        axes[4].axis('off')
        axes[4].set_title("heatmap_target")

    os.makedirs("./sandbox", exist_ok=True)
    plt.tight_layout()
    plt.subplots_adjust(wspace=0.02, hspace=0.0)
    plt.savefig("./sandbox/rep_seg_plot_%d.png" % (idx))

    # Close the figure
    plt.close(fig)


# Convert torch predictions to numpy arrays for evaluation
def convert_junc_predictions_HA(junc_prob, grid_size, detect_thresh=1 / 65, topk=300):
    # Convert to probability outputs first
    # ipdb.set_trace()
    # junc_prob = softmax(predictions.detach(), dim=1).cpu()
    junc_pred_np = junc_prob[None, ...]
    junc_pred_np_nms = super_nms(junc_pred_np, grid_size, detect_thresh, topk)
    junc_pred_np = junc_pred_np.squeeze()

    return {
        "junc_pred": junc_pred_np,
        "junc_pred_nms": junc_pred_np_nms
    }


# Wrap the detection process on HA results
def line_detection_HA(line_detector, junction_prob, heatmap, device):
    start_time = time.time()
    # Squeeze the heatmap to only HxW
    heatmap = heatmap.squeeze()
    heatmap[heatmap < 0.1] = 0.

    # Convert junction probability to junctions
    grid_size = 8
    junc_outputs = convert_junc_predictions_HA(junction_prob, grid_size, detect_thresh=1 / 65, topk=0)
    junctions = np.where(junc_outputs["junc_pred_nms"].squeeze())
    junctions = np.concatenate([junctions[0][..., None], junctions[1][..., None]], axis=-1)

    line_map, junctions = line_detector.detect_torch(
        junctions, heatmap,
        recover_junction=False,
        device=device
    )
    if isinstance(line_map, torch.Tensor):
        line_map = line_map.cpu().numpy()
    if isinstance(junctions, torch.Tensor):
        junctions = junctions.cpu().numpy()

    # If it's a line map with multiple detect_thresh and inlier_thresh
    if len(line_map.shape) > 2:
        num_detect_thresh = line_map.shape[0]
        num_inlier_thresh = line_map.shape[1]
        line_segments = []
        for detect_idx in range(num_detect_thresh):
            line_segments_inlier = []
            for inlier_idx in range(num_inlier_thresh):
                line_map_tmp = line_map[detect_idx, inlier_idx, :, :]
                line_segments_tmp = line_map_to_segments(junctions, line_map_tmp)
                line_segments_inlier.append(line_segments_tmp)
            line_segments.append(line_segments_inlier)
    else:
        line_segments = line_map_to_segments(junctions, line_map)
    end_time = time.time()

    return {
        "line_segments": line_segments,
        "heatmap": heatmap,
        "time": end_time - start_time
    }


# Evaluate the line detection algorithm on homography adaptation exported dataset
def evaluate_repeatability_HA(dataset, line_detector, device, dist_tolerance_lst, save_visual=True):
    """
    dataset: A h5 dataset object (synthetic shapes, wireframe, etc.)
    line_detector: line detector object taking input image 1xcxHxW tensor as input
    device: The torch device to put tensors.
    dist_tolerance_lst: List of epsilons used to measure correct line segment matches.
    split: evaluate on "viewpoint", "illumination", or "all" splits
    save_visual: save first several images for visualization.
    """
    # Get number of repeatability evaluation datapoints
    num_datapoints = len(list(dataset.keys()))
    datapoints = sorted(list(dataset.keys()))

    # Fetch number of detect_thresh and inlier_thresh
    num_detect_thresh = line_detector.num_detect_thresh
    num_inlier_thresh = line_detector.num_inlier_thresh

    # Initialize the repeatability dict and localization error dict
    repeatability_dict = {}
    for dist in dist_tolerance_lst:
        repeatability_dict[dist] = np.zeros([num_detect_thresh, num_inlier_thresh, num_datapoints],
                                            dtype=np.float32)
    local_error_dict = {}
    for dist in dist_tolerance_lst:
        local_error_dict[dist] = np.zeros([num_detect_thresh, num_inlier_thresh, num_datapoints],
                                          dtype=np.float32)

    runtime_lst = []
    num_segments_lst = np.zeros([num_detect_thresh, num_inlier_thresh, num_datapoints], dtype=np.float32)

    # Iterate through all the data
    for idx, datapoint in enumerate(datapoints):
        data = parse_h5_data(dataset[datapoint])
        ref_image = torch.tensor(data["ref_image"][None, ...].transpose(0, 3, 1, 2))
        # ref_image = data["ref_image"][None, ...]
        ref_junc_prob = data["ref_junc_prob_mean"].astype(np.float32)
        ref_heatmap_prob = data["ref_heatmap_prob_mean"].squeeze().astype(np.float32)
        target_image = torch.tensor(data["target_image"][None, ...].transpose(0, 3, 1, 2))
        target_junc_prob = data["target_junc_prob_mean"].astype(np.float32)
        target_heatmap_prob = data["target_heatmap_prob_mean"].squeeze().astype(np.float32)
        # data["homo_mat"] is 1x3x3
        H_mat = data["homo_mat"].astype(np.float32)

        # Compute the valid mask for the target image
        # import ipdb; ipdb.set_trace()
        image_size = list(ref_image.shape[2:])
        assert not 1 in image_size
        valid_mask = compute_valid_mask(image_size, H_mat, -2)[0]

        # Predictions on ref image
        outputs_ref = line_detection_HA(
            line_detector, ref_junc_prob,
            ref_heatmap_prob, device
        )
        line_segments_ref = outputs_ref["line_segments"]
        runtime_lst.append(outputs_ref["time"])
        heatmap_ref = outputs_ref["heatmap"]

        # Predictions on the target image.
        outputs_target = line_detection_HA(
            line_detector, target_junc_prob,
            target_heatmap_prob, device
        )
        line_segments_target = outputs_target["line_segments"]
        runtime_lst.append(outputs_target["time"])
        heatmap_target = outputs_target["heatmap"]

        # Compute repeatability and localization error over all combinations
        if isinstance(line_segments_ref, list):
            for detect_thresh_idx in range(num_detect_thresh):
                for inlier_thresh_idx in range(num_inlier_thresh):
                    # Fetch the corresponding entries.
                    line_segments_ref_ = line_segments_ref[detect_thresh_idx][inlier_thresh_idx]
                    line_segments_target_ = line_segments_target[detect_thresh_idx][inlier_thresh_idx]

                    # Move them to cpu?
                    # line_segments_ref_ = line_segments_ref_.cpu()
                    # line_segments_target_ = line_segments_target_.cpu()

                    rep_results, loc_results, _, _ = compute_metrics(
                        line_segments_ref_, line_segments_target_,
                        valid_mask, H_mat, image_size, dist_tolerance_lst
                    )

                    # Record the results
                    for dist in dist_tolerance_lst:
                        repeatability_dict[dist][detect_thresh_idx, inlier_thresh_idx, idx] = rep_results[dist]
                        local_error_dict[dist][detect_thresh_idx, inlier_thresh_idx, idx] = loc_results[dist]
                    num_segments_ref_ = line_segments_ref_.shape[0]
                    num_segments_target_ = line_segments_target_.shape[0]
                    num_segments_lst[detect_thresh_idx, inlier_thresh_idx, idx] = (
                                                                                              num_segments_ref_ + num_segments_target_) / 2.
        # single detect_thresh and single inlier thresh case
        else:
            detect_thresh_idx = 0
            inlier_thresh_idx = 0
            rep_results, loc_results, line_segments_ref_warped, line_segments_target_warped = compute_metrics(
                line_segments_ref, line_segments_target,
                valid_mask, H_mat, image_size, dist_tolerance_lst
            )
            # Record the results
            for dist in dist_tolerance_lst:
                repeatability_dict[dist][detect_thresh_idx, inlier_thresh_idx, idx] = rep_results[dist]
                local_error_dict[dist][detect_thresh_idx, inlier_thresh_idx, idx] = loc_results[dist]
            num_segments_ref_ = line_segments_ref.shape[0]
            num_segments_target_ = line_segments_target.shape[0]
            num_segments_lst[detect_thresh_idx, inlier_thresh_idx, idx] = (
                                                                                      num_segments_ref_ + num_segments_target_) / 2.

        # # TODO: Filter out the line segments out of the boundary.
        # # Compute repeatability
        # num_segments_ref = line_segments_ref.shape[0]
        # num_segments_target = line_segments_target.shape[0]

        # # Warp ref line segments to target
        # # Convert to xy format => homogeneous
        # line_ref_homo = np.concatenate([np.flip(line_segments_ref, -1), np.ones([num_segments_ref, 2, 1])], axis=-1)
        # line_ref_warped = line_ref_homo.dot(H_mat.T)
        # # Normalize => back to HW format
        # line_segments_ref_warped = np.flip(line_ref_warped[:, :, :2] / line_ref_warped[:, :, 2:], -1)

        # # Filter out the out-of-boundary segments (True => keep)
        # boundary_mask = np.sum(np.sum((line_segments_ref_warped < 0).astype(np.int), axis=-1), axis=-1)
        # boundary_mask += np.sum((line_segments_ref_warped[:, :, 0] >= image_size[0]-1).astype(np.int), axis=-1)
        # boundary_mask += np.sum((line_segments_ref_warped[:, :, 1] >= image_size[1]-1).astype(np.int), axis=-1)
        # boundary_mask = (boundary_mask == 0)
        # line_segments_ref_warped = line_segments_ref_warped[boundary_mask, :, :]

        # valid_region_mask1 = valid_mask[line_segments_ref_warped[:, 0, 0].astype(np.int), line_segments_ref_warped[:, 0, 1].astype(np.int)] == 1.
        # valid_region_mask2 = valid_mask[line_segments_ref_warped[:, 1, 0].astype(np.int), line_segments_ref_warped[:, 1, 1].astype(np.int)] == 1.
        # valid_region_mask = valid_region_mask1 * valid_region_mask2
        # line_segments_ref_warped = line_segments_ref_warped[valid_region_mask, :, :]
        # # Valid number of segments in ref
        # num_valid_segments_ref = line_segments_ref_warped.shape[0]

        # # Compute distance matrix
        # diff = (((line_segments_ref_warped[:, None, :, None] - line_segments_target[:, None]) ** 2).sum(-1)) ** 0.5
        # diff = np.minimum(
        #     diff[:, :, 0, 0] + diff[:, :, 1, 1], diff[:, :, 0, 1] + diff[:, :, 1, 0]
        # )

        # # Compute reference to target correctness
        # try:
        #     ref_target_min_dist = np.min(diff, 1)
        # except:
        #     # if diff is empty
        #     ref_target_min_dist = np.ones([diff.shape[0], 1]) * (dist_tolerance_lst[-1] + 100.)

        # ref_target_correctness_lst = []
        # ref_target_loc_error_lst = []
        # for dist_tolerance in dist_tolerance_lst:
        #     # Compute the correctness for repeatability
        #     ref_correct_mask = ref_target_min_dist <= dist_tolerance
        #     ref_target_correctness = np.sum((ref_correct_mask).astype(np.int))
        #     ref_target_correctness_lst.append(ref_target_correctness)

        #     # Compute the localization error
        #     ref_target_loc_error = ref_target_min_dist[ref_correct_mask]
        #     ref_target_loc_error_lst.append(ref_target_loc_error)

        # # Warp target line segments to ref
        # line_target_homo = np.concatenate([np.flip(line_segments_target, -1), np.ones([num_segments_target, 2, 1])], axis=-1)
        # line_target_warped = line_target_homo.dot(np.linalg.inv(H_mat.T))
        # line_segments_target_warped = np.flip(line_target_warped[:, :, :2] / line_target_warped[:, :, 2:], -1)

        # # Compute distance matrix
        # diff = (((line_segments_target_warped[:, None, :, None] - line_segments_ref[:, None]) ** 2).sum(-1)) ** 0.5
        # diff = np.minimum(
        #     diff[:, :, 0, 0] + diff[:, :, 1, 1], diff[:, :, 0, 1] + diff[:, :, 1, 0]
        # )

        # # Compute target to reference correctness
        # try:
        #     target_ref_min_dist = np.min(diff, 1)
        # except:
        #     # if diff is empty
        #     target_ref_min_dist = np.ones([diff.shape[0], 1]) * (dist_tolerance_lst[-1] + 100.)
        #     # import ipdb; ipdb.set_trace()

        # target_ref_correctness_lst = []
        # target_ref_loc_error_lst = []
        # for dist_tolerance in dist_tolerance_lst:
        #     # Compute the correctness for repeatability
        #     traget_correct_mask = target_ref_min_dist <= dist_tolerance
        #     target_ref_correctness = np.sum((traget_correct_mask).astype(np.int))
        #     target_ref_correctness_lst.append(target_ref_correctness)

        #     # Compute the localization error
        #     target_ref_loc_error = target_ref_min_dist[traget_correct_mask]
        #     target_ref_loc_error_lst.append(target_ref_loc_error)

        # # Record the final correctness
        # for i, dist in enumerate(dist_tolerance_lst):
        #     # Compute the final repeatability
        #     correctness = (ref_target_correctness_lst[i] + target_ref_correctness_lst[i]) / (num_valid_segments_ref + num_segments_target)
        #     repeatability_dict[dist].append(correctness)

        #     # Compute the final localization error
        #     loc_error_lst = np.concatenate([ref_target_loc_error_lst[i],
        #                                     target_ref_loc_error_lst[i]])
        #     if 0 in loc_error_lst.shape:
        #         loc_error = 0
        #     else:
        #         loc_error = np.mean(loc_error_lst)
        #     local_error_dict[dist].append(loc_error)

        # num_segments_lst.append(num_valid_segments_ref)
        # num_segments_lst.append(num_segments_target)

        # Print some info
        if idx % 20 == 0:
            detect_thresh_center = num_detect_thresh // 2
            inlier_thresh_center = num_inlier_thresh // 2
            print("Iter: ", idx)
            for dist in dist_tolerance_lst:
                print("\t Rep-%02d: %f \t Loc-%02d: %f" % (
                    dist, np.sum(repeatability_dict[dist][detect_thresh_center, inlier_thresh_center, :]) / \
                    np.sum(
                        (repeatability_dict[dist][detect_thresh_center, inlier_thresh_center, :] > 0.).astype(np.int)),
                    dist, np.sum(local_error_dict[dist][detect_thresh_center, inlier_thresh_center, :]) / \
                    np.sum(
                        (local_error_dict[dist][detect_thresh_center, inlier_thresh_center, :] > 0.).astype(np.int))))
            # Display runtime
            print("\t average runtime: %f s/image" % (np.mean(runtime_lst)))
            # Display average line segments
            print("\t average number of segments: %f segs/image" % (
                        np.sum(num_segments_lst[detect_thresh_center, inlier_thresh_center, :]) / \
                        np.sum((num_segments_lst[detect_thresh_center, inlier_thresh_center, :] > 0).astype(np.int))))

        # [Debug]
        # Save visualizations
        if idx <= 20 and (num_detect_thresh == 1) and (num_inlier_thresh == 1):
            ref_image_np = ref_image.squeeze()
            target_image_np = target_image.squeeze()

            # import ipdb; ipdb.set_trace()
            line_plot = plot_line_segments_from_segments(target_image.cpu().numpy().transpose(0, 2, 3, 1).squeeze(),
                                                         line_segments_ref_warped, line_width=2)
            line_plot2 = plot_line_segments_from_segments(ref_image.cpu().numpy().transpose(0, 2, 3, 1).squeeze(),
                                                          line_segments_target_warped, line_width=2)
            line_plot3 = plot_line_segments_from_segments(target_image.cpu().numpy().transpose(0, 2, 3, 1).squeeze(),
                                                          line_segments_target, line_width=2)
            line_plot4 = plot_line_segments_from_segments(ref_image.cpu().numpy().transpose(0, 2, 3, 1).squeeze(),
                                                          line_segments_ref, line_width=2)
            matplotlib.use("Agg")

            # Plot them into one plots
            fig, axes = plt.subplots(1, 5, figsize=(18, 8), dpi=120)
            # ref lines in ref views
            axes[0].imshow(line_plot4)
            axes[0].axis('off')
            # target lines in ref views
            axes[1].imshow(line_plot2)
            axes[1].axis('off')
            # ref lines in target views
            axes[2].imshow(line_plot3)
            axes[2].axis('off')

            # reference heatmap
            axes[3].imshow(heatmap_ref, vmax=1., vmin=0.)
            axes[3].axis('off')
            # target heatmap
            axes[4].imshow(heatmap_target, vmax=1., vmin=0.)
            axes[4].axis('off')

            plt.tight_layout()
            plt.subplots_adjust(wspace=0.02, hspace=0.0)
            plt.savefig("./sandbox/rep_seg_plot_HA_%d.png" % (idx))

    return repeatability_dict, local_error_dict, runtime_lst, num_segments_lst


# Compute repeatability and localization error given single pair of ref & target line segments
def compute_metrics(
        line_segments_ref,
        line_segments_target,
        valid_mask,
        H_mat,
        image_size,
        dist_tolerance_lst,
        distance_metric="sAP"
):
    """
    line_segments_ref: Nx2x2 array.
    line_segments_target: Nx2x2 array.
    valid_mask: 2D mask (same size as the image)
    H_mat: the 3x3 array containing the homography matrix.
    image_size: list containing [H, W].
    dist_tolerance_lst: list of all distance tolerances of interest.
    distance_metric: "sAP" or "orthogonal_distance".
    """
    if (not isinstance(line_segments_ref, np.ndarray)) or len(line_segments_ref.shape) < 3:
        raise ValueError("[Error] line_segments_ref should be an array with shape Nx2x2")
    if (not isinstance(line_segments_target, np.ndarray)) or len(line_segments_target.shape) < 3:
        raise ValueError("[Error] line_segments_target should be an array with shape Nx2x2")
    if not (len(H_mat.shape) == 2 and H_mat.shape[0] == 3 and H_mat.shape[1] == 3):
        raise ValueError("[Error] H_mat should be a 3x3 array")

    # Check the distance_metric to use
    supported_metrics = ["sAP", "orthogonal_distance"]
    if not distance_metric in supported_metrics:
        raise ValueError(f"[Error] The specified distnace metric is not in supported metrics {supported_metrics}.")

    # Compute repeatability
    num_segments_ref = line_segments_ref.shape[0]
    num_segments_target = line_segments_target.shape[0]

    # Warp ref line segments to target
    # Convert to xy format => homogeneous
    line_ref_homo = np.concatenate([np.flip(line_segments_ref, -1), np.ones([num_segments_ref, 2, 1])], axis=-1)
    line_ref_warped = line_ref_homo.dot(H_mat.T)
    # Normalize => back to HW format
    line_segments_ref_warped = np.flip(line_ref_warped[:, :, :2] / line_ref_warped[:, :, 2:], -1)

    # Filter out the out-of-boundary segments (True => keep)
    boundary_mask = np.sum(np.sum((line_segments_ref_warped < 0).astype(np.int), axis=-1), axis=-1)
    boundary_mask += np.sum((line_segments_ref_warped[:, :, 0] >= image_size[0] - 1).astype(np.int), axis=-1)
    boundary_mask += np.sum((line_segments_ref_warped[:, :, 1] >= image_size[1] - 1).astype(np.int), axis=-1)
    boundary_mask = (boundary_mask == 0)
    line_segments_ref_warped = line_segments_ref_warped[boundary_mask, :, :]

    valid_region_mask1 = valid_mask[
                             line_segments_ref_warped[:, 0, 0].astype(np.int), line_segments_ref_warped[:, 0, 1].astype(
                                 np.int)] == 1.
    valid_region_mask2 = valid_mask[
                             line_segments_ref_warped[:, 1, 0].astype(np.int), line_segments_ref_warped[:, 1, 1].astype(
                                 np.int)] == 1.
    valid_region_mask = valid_region_mask1 * valid_region_mask2
    line_segments_ref_warped = line_segments_ref_warped[valid_region_mask, :, :]
    # Valid number of segments in ref
    num_valid_segments_ref = line_segments_ref_warped.shape[0]

    # Compute distance matrix
    if distance_metric == "sAP":
        group_num = 1000
        num_ref_seg = line_segments_ref_warped.shape[0]
        min_dist_lst = []
        if num_ref_seg > group_num:
            num_iter = math.ceil(num_ref_seg / group_num)
            for iter_idx in range(num_iter):
                if iter_idx == num_iter - 1:
                    diff = (((line_segments_ref_warped[iter_idx * group_num:, None, :, None] - line_segments_target[:,
                                                                                               None]) ** 2).sum(
                        -1)) ** 0.5
                    diff = np.minimum(
                        diff[:, :, 0, 0] + diff[:, :, 1, 1], diff[:, :, 0, 1] + diff[:, :, 1, 0]
                    )
                else:
                    diff = (((line_segments_ref_warped[iter_idx * group_num:(iter_idx + 1) * group_num, None, :,
                              None] - line_segments_target[:, None]) ** 2).sum(-1)) ** 0.5
                    diff = np.minimum(
                        diff[:, :, 0, 0] + diff[:, :, 1, 1], diff[:, :, 0, 1] + diff[:, :, 1, 0]
                    )
                # Compute reference to target correctness
                try:
                    ref_target_min_dist_ = np.min(diff, 1)
                except:
                    # if diff is empty
                    ref_target_min_dist_ = np.ones([diff.shape[0], 1]) * (dist_tolerance_lst[-1] + 100.)
                min_dist_lst.append(ref_target_min_dist_)
            ref_target_min_dist = np.concatenate(min_dist_lst)

        else:
            diff = (((line_segments_ref_warped[:, None, :, None] - line_segments_target[:, None]) ** 2).sum(-1)) ** 0.5
            diff = np.minimum(
                diff[:, :, 0, 0] + diff[:, :, 1, 1], diff[:, :, 0, 1] + diff[:, :, 1, 0]
            )
            # Compute reference to target correctness
            try:
                ref_target_min_dist = np.min(diff, 1)
            except:
                # if diff is empty
                ref_target_min_dist = np.ones([diff.shape[0], 1]) * (dist_tolerance_lst[-1] + 100.)
    elif distance_metric == "orthogonal_distance":
        diff = get_overlap_orth_line_dist(
            line_segments_ref_warped,
            line_segments_target,
            min_overlap=0.5
        )

        # Compute reference to target correctness
        try:
            ref_target_min_dist = np.min(diff, 1)
        except:
            # if diff is empty
            ref_target_min_dist = np.ones([diff.shape[0], 1]) * (dist_tolerance_lst[-1] + 100.)

    ref_target_correctness_lst = []
    ref_target_loc_error_lst = []
    for dist_tolerance in dist_tolerance_lst:
        # Compute the correctness for repeatability
        ref_correct_mask = ref_target_min_dist <= dist_tolerance
        ref_target_correctness = np.sum((ref_correct_mask).astype(np.int))
        ref_target_correctness_lst.append(ref_target_correctness)

        # Compute the localization error
        ref_target_loc_error = ref_target_min_dist[ref_correct_mask]
        ref_target_loc_error_lst.append(ref_target_loc_error)

    # Warp target line segments to ref
    line_target_homo = np.concatenate([np.flip(line_segments_target, -1), np.ones([num_segments_target, 2, 1])],
                                      axis=-1)
    line_target_warped = line_target_homo.dot(np.linalg.inv(H_mat.T))
    line_segments_target_warped = np.flip(line_target_warped[:, :, :2] / line_target_warped[:, :, 2:], -1)

    # Compute distance matrix
    if distance_metric == "sAP":
        import ipdb;
        ipdb.set_trace()
        group_num = 1000
        num_target_seg = line_segments_target_warped.shape[0]
        min_dist_lst = []
        if num_target_seg > group_num:
            num_iter = math.ceil(num_target_seg / group_num)
            for iter_idx in range(num_iter):
                if iter_idx == num_iter - 1:
                    diff = (((line_segments_target_warped[iter_idx * group_num:, None, :, None] - line_segments_ref[:,
                                                                                                  None]) ** 2).sum(
                        -1)) ** 0.5
                    diff = np.minimum(
                        diff[:, :, 0, 0] + diff[:, :, 1, 1], diff[:, :, 0, 1] + diff[:, :, 1, 0]
                    )
                else:
                    diff = (((line_segments_target_warped[iter_idx * group_num:(iter_idx + 1) * group_num, None, :,
                              None] - line_segments_ref[:, None]) ** 2).sum(-1)) ** 0.5
                    diff = np.minimum(
                        diff[:, :, 0, 0] + diff[:, :, 1, 1], diff[:, :, 0, 1] + diff[:, :, 1, 0]
                    )
                # Compute reference to target correctness
                try:
                    target_ref_min_dist_ = np.min(diff, 1)
                except:
                    # if diff is empty
                    target_ref_min_dist_ = np.ones([diff.shape[0], 1]) * (dist_tolerance_lst[-1] + 100.)
                min_dist_lst.append(target_ref_min_dist_)
            target_ref_min_dist = np.concatenate(min_dist_lst)
        else:
            diff = (((line_segments_target_warped[:, None, :, None] - line_segments_ref[:, None]) ** 2).sum(-1)) ** 0.5
            diff = np.minimum(
                diff[:, :, 0, 0] + diff[:, :, 1, 1], diff[:, :, 0, 1] + diff[:, :, 1, 0]
            )
            # Compute reference to target correctness
            try:
                target_ref_min_dist = np.min(diff, 1)
            except:
                # if diff is empty
                target_ref_min_dist = np.ones([diff.shape[0], 1]) * (dist_tolerance_lst[-1] + 100.)
    elif distance_metric == "orthogonal_distance":
        # import ipdb; ipdb.set_trace()
        diff = get_overlap_orth_line_dist(
            line_segments_target_warped,
            line_segments_ref,
            min_overlap=0.5
        )
        # Compute target to reference correctness
        try:
            target_ref_min_dist = np.min(diff, 1)
        except:
            # if diff is empty
            target_ref_min_dist = np.ones([diff.shape[0], 1]) * (dist_tolerance_lst[-1] + 100.)
            # import ipdb; ipdb.set_trace()

    target_ref_correctness_lst = []
    target_ref_loc_error_lst = []
    for dist_tolerance in dist_tolerance_lst:
        # Compute the correctness for repeatability
        traget_correct_mask = target_ref_min_dist <= dist_tolerance
        target_ref_correctness = np.sum((traget_correct_mask).astype(np.int))
        target_ref_correctness_lst.append(target_ref_correctness)

        # Compute the localization error
        target_ref_loc_error = target_ref_min_dist[traget_correct_mask]
        target_ref_loc_error_lst.append(target_ref_loc_error)

    # Record the final correctness
    repeatability_results = {}
    loc_error_results = {}
    for i, dist in enumerate(dist_tolerance_lst):
        # Compute the final repeatability
        correctness = (ref_target_correctness_lst[i] + target_ref_correctness_lst[i]) / (
                    num_valid_segments_ref + num_segments_target)
        if np.isnan(correctness) or np.isinf(correctness):
            correctness = 0
        repeatability_results[dist] = correctness

        # Compute the final localization error
        loc_error_lst = np.concatenate([ref_target_loc_error_lst[i],
                                        target_ref_loc_error_lst[i]])
        if 0 in loc_error_lst.shape:
            loc_error = 0
        else:
            loc_error = np.mean(loc_error_lst)
        loc_error_results[dist] = loc_error

    return repeatability_results, loc_error_results, line_segments_ref_warped, line_segments_target_warped


# Compute repeatability and localization error given single pair of ref & target line segments
def compute_metrics_v2(
        line_segments_ref,
        line_segments_target,
        valid_mask,
        H_mat,
        image_size,
        dist_tolerance_lst,
        distance_metric="sAP",
        erode_border=False,
        erode_border_margin=2,
):
    """
    line_segments_ref: Nx2x2 array.
    line_segments_target: Nx2x2 array.
    valid_mask: 2D mask (same size as the image)
    H_mat: the 3x3 array containing the homography matrix.
    image_size: list containing [H, W].
    dist_tolerance_lst: list of all distance tolerances of interest.
    distance_metric: "sAP" or "orthogonal_distance".
    """
    # Verify the shapes
    if (not isinstance(line_segments_ref, np.ndarray)) or len(line_segments_ref.shape) < 3:
        raise ValueError("[Error] line_segments_ref should be an array with shape Nx2x2")
    if (not isinstance(line_segments_target, np.ndarray)) or len(line_segments_target.shape) < 3:
        raise ValueError("[Error] line_segments_target should be an array with shape Nx2x2")
    if not (len(H_mat.shape) == 2 and H_mat.shape[0] == 3 and H_mat.shape[1] == 3):
        raise ValueError("[Error] H_mat should be a 3x3 array")

    # Check the distance_metric to use
    supported_metrics = ["sAP", "orthogonal_distance", "sAP_square"]
    if not distance_metric in supported_metrics:
        raise ValueError(f"[Error] The specified distnace metric is not in supported metrics {supported_metrics}.")

    # Exclude the segments with endpoints in the boundary margin at the beginning
    if erode_border:
        # Compute the eroded valid masks (ref + target)
        if erode_border_margin < 1:
            raise ValueError("[Error] The erosion margin must be >= 1")
        ref_valid_mask = np.ones(image_size, dtype=np.float)
        ref_valid_mask = binary_erosion(ref_valid_mask, iterations=erode_border_margin).astype(np.float)
        target_valid_mask = binary_erosion(valid_mask, iterations=erode_border_margin).astype(np.float)

        # Exclude segment based on the valid masks
        ref_valid_region_mask1 = ref_valid_mask[
                                     line_segments_ref[:, 0, 0].astype(np.int), line_segments_ref[:, 0, 1].astype(
                                         np.int)] == 1.
        ref_valid_region_mask2 = ref_valid_mask[
                                     line_segments_ref[:, 1, 0].astype(np.int), line_segments_ref[:, 1, 1].astype(
                                         np.int)] == 1.
        ref_valid_region_mask = ref_valid_region_mask1 * ref_valid_region_mask2
        line_segments_ref = line_segments_ref[ref_valid_region_mask, :]
        target_valid_region_mask1 = target_valid_mask[
                                        line_segments_target[:, 0, 0].astype(np.int), line_segments_target[:, 0,
                                                                                      1].astype(np.int)] == 1.
        target_valid_region_mask2 = target_valid_mask[
                                        line_segments_target[:, 1, 0].astype(np.int), line_segments_target[:, 1,
                                                                                      1].astype(np.int)] == 1.
        target_valid_region_mask = target_valid_region_mask1 * target_valid_region_mask2
        line_segments_target = line_segments_target[target_valid_region_mask, :]
    else:
        ref_valid_mask = np.ones(image_size, dtype=np.float)
        target_valid_mask = valid_mask

    # Exclude the target segments with endpoints in the clip border
    target_clip_valid_mask = np.ones(image_size, dtype=np.float)
    if erode_border_margin > 0:
        target_clip_valid_mask = binary_erosion(target_clip_valid_mask, iterations=erode_border_margin).astype(np.float)
    target_valid_mask = target_valid_mask * target_clip_valid_mask
    target_valid_region_mask1 = target_valid_mask[
                                    np.clip(line_segments_target[:, 0, 0].astype(np.int), 0, 511), np.clip(
                                        line_segments_target[:, 0, 1].astype(
                                            np.int), 0, 511)] == 1.
    target_valid_region_mask2 = target_valid_mask[
                                    np.clip(line_segments_target[:, 1, 0].astype(np.int), 0, 511), np.clip(
                                        line_segments_target[:, 1, 1].astype(
                                            np.int), 0, 511)] == 1.
    target_valid_region_mask = target_valid_region_mask1 * target_valid_region_mask2
    line_segments_target = line_segments_target[target_valid_region_mask, :]

    # Compute repeatability
    num_segments_ref = line_segments_ref.shape[0]
    num_segments_target = line_segments_target.shape[0]

    # Warp ref line segments to target
    # Convert to xy format => homogeneous
    line_ref_homo = np.concatenate([np.flip(line_segments_ref, -1), np.ones([num_segments_ref, 2, 1])], axis=-1)
    line_ref_warped = line_ref_homo.dot(H_mat.T)
    # Normalize => back to HW format
    line_segments_ref_warped = np.flip(line_ref_warped[:, :, :2] / line_ref_warped[:, :, 2:], -1)

    # Filter out the out-of-border segments in target view (True => keep)
    boundary_mask = np.sum(np.sum((line_segments_ref_warped < 0).astype(np.int), axis=-1), axis=-1)
    boundary_mask += np.sum((line_segments_ref_warped[:, :, 0] >= image_size[0] - 1).astype(np.int), axis=-1)
    boundary_mask += np.sum((line_segments_ref_warped[:, :, 1] >= image_size[1] - 1).astype(np.int), axis=-1)
    boundary_mask = (boundary_mask == 0)
    line_segments_ref_warped = line_segments_ref_warped[boundary_mask, :]
    # Filter out the out of valid_mask segments in taget view (True => keep)
    valid_region_mask1 = target_valid_mask[
                             line_segments_ref_warped[:, 0, 0].astype(np.int), line_segments_ref_warped[:, 0, 1].astype(
                                 np.int)] == 1.
    valid_region_mask2 = target_valid_mask[
                             line_segments_ref_warped[:, 1, 0].astype(np.int), line_segments_ref_warped[:, 1, 1].astype(
                                 np.int)] == 1.
    valid_region_mask = valid_region_mask1 * valid_region_mask2
    line_segments_ref_warped = line_segments_ref_warped[valid_region_mask, :]
    # Perform the filtering on original segments (2 stage)
    line_segments_ref_valid = line_segments_ref[boundary_mask, :]
    line_segments_ref_valid = line_segments_ref_valid[valid_region_mask, :, :]
    # Valid number of segments in ref
    num_valid_segments_ref = line_segments_ref_valid.shape[0]

    # Warp target line segments to ref
    line_target_homo = np.concatenate([np.flip(line_segments_target, -1), np.ones([num_segments_target, 2, 1])],
                                      axis=-1)
    line_target_warped = line_target_homo.dot(np.linalg.inv(H_mat.T))
    line_segments_target_warped = np.flip(line_target_warped[:, :, :2] / line_target_warped[:, :, 2:], -1)
    # Filter out the out-of-border segments in ref view (True => keep)
    boundary_mask = np.sum(np.sum((line_segments_target_warped < 0).astype(np.int), axis=-1), axis=-1)
    boundary_mask += np.sum((line_segments_target_warped[:, :, 0] >= image_size[0] - 1).astype(np.int), axis=-1)
    boundary_mask += np.sum((line_segments_target_warped[:, :, 1] >= image_size[1] - 1).astype(np.int), axis=-1)
    boundary_mask = (boundary_mask == 0)
    line_segments_target_warped = line_segments_target_warped[boundary_mask, :]
    # Filter out the out of valid_mask segments in taget view (True => keep)
    valid_region_mask1 = ref_valid_mask[
                             line_segments_target_warped[:, 0, 0].astype(np.int), line_segments_target_warped[:, 0,
                                                                                  1].astype(np.int)] == 1.
    valid_region_mask2 = ref_valid_mask[
                             line_segments_target_warped[:, 1, 0].astype(np.int), line_segments_target_warped[:, 1,
                                                                                  1].astype(np.int)] == 1.
    valid_region_mask = valid_region_mask1 * valid_region_mask2
    line_segments_target_warped = line_segments_target_warped[valid_region_mask, :]
    # Directly assign
    line_segments_target_valid = line_segments_target_warped
    # Valid number of segments in ref
    num_valid_segments_target = line_segments_target_valid.shape[0]

    # Compute closest segments in taget segments for each ref segment.
    ref_target_min_dist = compute_distances(
        line_segments_ref_valid, line_segments_target_valid,
        dist_tolerance_lst, distance_metric,
        group_num=1000
    )

    ref_target_correctness_lst = []
    ref_target_loc_error_lst = []
    for dist_tolerance in dist_tolerance_lst:
        # Compute the correctness for repeatability
        ref_correct_mask = ref_target_min_dist <= dist_tolerance
        ref_target_correctness = np.sum((ref_correct_mask).astype(np.int))
        ref_target_correctness_lst.append(ref_target_correctness)

        # Compute the localization error
        ref_target_loc_error = ref_target_min_dist[ref_correct_mask]
        ref_target_loc_error_lst.append(ref_target_loc_error)

    # Compute closest segments in ref segments for each target segment.
    target_ref_min_dist = compute_distances(
        line_segments_target_valid, line_segments_ref_valid,
        dist_tolerance_lst, distance_metric,
        group_num=1000
    )

    target_ref_correctness_lst = []
    target_ref_loc_error_lst = []
    for dist_tolerance in dist_tolerance_lst:
        # Compute the correctness for repeatability
        traget_correct_mask = target_ref_min_dist <= dist_tolerance
        target_ref_correctness = np.sum((traget_correct_mask).astype(np.int))
        target_ref_correctness_lst.append(target_ref_correctness)

        # Compute the localization error
        target_ref_loc_error = target_ref_min_dist[traget_correct_mask]
        target_ref_loc_error_lst.append(target_ref_loc_error)

    # Record the final correctness
    repeatability_results = {}
    loc_error_results = {}
    for i, dist in enumerate(dist_tolerance_lst):
        # Compute the final repeatability
        # import ipdb; ipdb.set_trace()
        correctness = (ref_target_correctness_lst[i] + target_ref_correctness_lst[i]) / (
                    num_valid_segments_ref + num_valid_segments_target)
        if np.isnan(correctness) or np.isinf(correctness):
            correctness = 0
        repeatability_results[dist] = correctness

        # Compute the final localization error
        # loc_error_lst = np.concatenate([ref_target_loc_error_lst[i],
        #                                 target_ref_loc_error_lst[i]])
        # Only compute over target segments
        # import ipdb; ipdb.set_trace()
        loc_error_lst = target_ref_loc_error_lst[i]
        if 0 in loc_error_lst.shape:
            loc_error = 0
        else:
            loc_error = np.mean(loc_error_lst)
        loc_error_results[dist] = loc_error

    # Return the processed segments
    line_segments_filtered = {
        "line_segments_ref": line_segments_ref_valid,
        "line_segments_target": line_segments_target_valid
    }

    return repeatability_results, loc_error_results, line_segments_filtered


# Find the closest segment in line_segments_cand for "each" segment in line_segments_anchor
def compute_distances(line_segments_anchor, line_segments_cand, dist_tolerance_lst, distance_metric="sAP",
                      group_num=1000):
    if not distance_metric in ["sAP", "sAP_square", "orthogonal_distance"]:
        raise ValueError("[Error] The specified distance metric is not supported.")

    # Compute distance matrix
    if distance_metric == "sAP" or distance_metric == "sAP_square":
        num_anchor_seg = line_segments_anchor.shape[0]
        min_dist_lst = []
        if num_anchor_seg > group_num:
            num_iter = math.ceil(num_anchor_seg / group_num)
            for iter_idx in range(num_iter):
                if iter_idx == num_iter - 1:
                    if distance_metric == "sAP":
                        diff = (((line_segments_anchor[iter_idx * group_num:, None, :, None] - line_segments_cand[None,
                                                                                               :, None]) ** 2).sum(
                            -1)) ** 0.5
                    else:
                        diff = (((line_segments_anchor[iter_idx * group_num:, None, :, None] - line_segments_cand[None,
                                                                                               :, None]) ** 2).sum(-1))
                    diff = np.minimum(
                        diff[:, :, 0, 0] + diff[:, :, 1, 1], diff[:, :, 0, 1] + diff[:, :, 1, 0]
                    )
                else:
                    if distance_metric == "sAP":
                        diff = (((line_segments_anchor[iter_idx * group_num:(iter_idx + 1) * group_num, None, :,
                                  None] - line_segments_cand[:, None]) ** 2).sum(-1)) ** 0.5
                    else:
                        diff = (((line_segments_anchor[iter_idx * group_num:(iter_idx + 1) * group_num, None, :,
                                  None] - line_segments_cand[:, None]) ** 2).sum(-1))
                    diff = np.minimum(
                        diff[:, :, 0, 0] + diff[:, :, 1, 1], diff[:, :, 0, 1] + diff[:, :, 1, 0]
                    )
                # Compute reference to target correctness
                try:
                    anchor_cand_min_dist_ = np.min(diff, 1)
                except:
                    # if diff is empty
                    anchor_cand_min_dist_ = np.ones([diff.shape[0], 1]) * (dist_tolerance_lst[-1] + 100.)
                min_dist_lst.append(anchor_cand_min_dist_)
            anchor_cand_min_dist = np.concatenate(min_dist_lst)
        else:
            if distance_metric == "sAP":
                diff = (((line_segments_anchor[:, None, :, None] - line_segments_cand[None, :, None]) ** 2).sum(
                    -1)) ** 0.5
            else:
                diff = (((line_segments_anchor[:, None, :, None] - line_segments_cand[None, :, None]) ** 2).sum(-1))
            diff = np.minimum(
                diff[:, :, 0, 0] + diff[:, :, 1, 1], diff[:, :, 0, 1] + diff[:, :, 1, 0]
            )
            # Compute reference to target correctness
            try:
                anchor_cand_min_dist = np.min(diff, 1)
            except:
                # if diff is empty
                anchor_cand_min_dist = np.ones([diff.shape[0], 1]) * (dist_tolerance_lst[-1] + 100.)

    elif distance_metric == "orthogonal_distance":
        if 0 in line_segments_anchor.shape or 0 in line_segments_cand.shape:
            if 0 in line_segments_cand.shape:
                diff = np.ones([line_segments_anchor.shape[0], 1]) * (dist_tolerance_lst[-1] + 100.)
            else:
                diff = np.ones([1, 1]) * (dist_tolerance_lst[-1] + 100.)
        else:
            diff = get_overlap_orth_line_dist(
                line_segments_anchor,
                line_segments_cand,
                min_overlap=0.5
            )

        # Compute reference to target correctness
        try:
            anchor_cand_min_dist = np.min(diff, 1)
        except:
            # if diff is empty
            anchor_cand_min_dist = np.ones([diff.shape[0], 1]) * (dist_tolerance_lst[-1] + 100.)
        # import ipdb; ipdb.set_trace()

    return anchor_cand_min_dist