"""
Implementation of all line detection methods
(Only the line detection method using heatmaps and junctions as inputs)
"""
import numpy as np
import cv2 as cv2
from skimage.transform import hough_line, hough_line_peaks, probabilistic_hough_line
import copy
# from model.model_old import line_detection_module
import sys

sys.path.append("../")
# from model.line_detection_module import detect_ver2
import time
import torch
import math


# Function version of the simple line detection method.
def simple_line_detection(junctions, heatmap, detect_thresh=0.4):
    # Initialize empty line map
    num_junctions = junctions.shape[0]
    line_map_pred = np.zeros([num_junctions, num_junctions])

    # Define some hyperparameter
    mask_width = 2
    detect_threshold = detect_thresh

    # Iterate through all the junction combinations
    for i in range(num_junctions):
        for j in range(i + 1, num_junctions):
            # Fetch start and end point
            start_point = junctions[i, :]  # in HW format
            end_point = junctions[j, :]  # in HW format

            # Get the mask
            mask = np.zeros(heatmap.shape)
            mask = cv2.line(mask,
                            tuple(np.flip(start_point)), tuple(np.flip(end_point)),
                            1., mask_width).astype(np.bool)

            # Get the average activation
            activation = np.mean(heatmap[mask])
            # print(activation)

            # Define the heuristic threshold
            if activation > detect_threshold:
                line_map_pred[i, j] = 1
                line_map_pred[j, i] = 1

    return line_map_pred


# Class version of the simple line detection method.
class SimpleLineDetector(object):
    def __init__(self, detection_thresh, mask_width=2, verbose=False):
        self.detection_thresh = detection_thresh
        self.mask_width = mask_width
        self.verbose = verbose

    def forward(self, junctions, heatmap, recover_junction=False):
        # Initialize empty line map
        num_junctions = junctions.shape[0]
        line_map_pred = np.zeros([num_junctions, num_junctions])

        # If recover junctions on the boundary
        if recover_junction:
            junctions_bound = self.detect_junctions_on_boundary(heatmap)
            junctions = np.concatenate([junctions, junctions_bound], axis=0)

        num_op = 0
        # Iterate through all the junction combinations
        for i in range(num_junctions):
            for j in range(i + 1, num_junctions):
                # Fetch start and end point
                start_point = junctions[i, :]  # in HW format
                end_point = junctions[j, :]  # in HW format

                # Get the mask
                mask = np.zeros(heatmap.shape)
                mask = cv2.line(mask,
                                tuple(np.flip(start_point)),
                                tuple(np.flip(end_point)),
                                1., self.mask_width).astype(np.bool)

                # Get the average activation
                activation = np.mean(heatmap[mask])
                # print(activation)

                # Define the heuristic threshold
                if activation > self.detection_thresh:
                    line_map_pred[i, j] = 1
                    line_map_pred[j, i] = 1

                num_op += 1

        if self.verbose:
            print("total steps: ", num_op)

        if recover_junction:
            return line_map_pred, junctions
        else:
            return line_map_pred

    # Recover junctions from the boundary
    def detect_junctions_on_boundary(self, heatmap, band_width=1,
                                     low_thresh=0.15, high_thresh=0.2,
                                     grid_size=8):
        """
        heatmap: H*W*1 or H*W prob map
        band_width: width of the band on the boundary to consider (now only 1 is supported)
        low_thresh: directly discard all the pixels below this threshold
        high_thresh: a pixel need to have prob higher than this to be considered as a junction to keep.
        grid_size: the window width of the NMS
        """
        if not band_width == 1:
            raise ValueError("[Error] Currently we only support band_width=1 version.")

        if len(heatmap.shape) == 3:
            heatmap = heatmap.squeeze()

        # Get the width and height from the input heatmap
        height, width = heatmap.shape

        # Get the index map
        H_vec = np.arange(0, height, 1)
        W_vec = np.arange(0, width, 1)
        H_mat, W_mat = np.meshgrid(H_vec, W_vec, indexing="ij")
        index_map = np.concatenate([H_mat[..., None], W_mat[..., None]], axis=-1)

        # Crop out the boundary of the heatmap
        # now just use band_width=1
        top_band = heatmap[0, :width - 1]
        top_index = index_map[0, :width - 1, :]

        right_band = heatmap[:height - 1, width - 1]
        right_index = index_map[:height - 1, width - 1, :]

        bot_band = heatmap[height - 1, -1:0:-1]
        bot_index = index_map[height - 1, -1:0:-1, :]

        left_band = heatmap[-1:0:-1, 0]
        left_index = index_map[-1:0:-1, 0, :]

        # Concatenate to 1D vector
        full_band = np.concatenate([top_band, right_band, bot_band, left_band])
        full_index = np.concatenate([top_index, right_index, bot_index, left_index], axis=0)

        # Filter out some impossible cases
        full_band[full_band < low_thresh] = 0.

        # Box width
        box_size = grid_size

        # Convert to intervals
        start_points = np.clip(np.where(full_band > -1)[0] - box_size, 0, full_band.shape[0])
        end_points = np.clip(np.where(full_band > -1)[0] + box_size, 0, full_band.shape[0])
        # print(start_points.shape)

        # Pre-compute the area
        areas = np.maximum(0.0, end_points - start_points)

        # Argsort the heatmap confidence
        score_tmp = full_band.copy()

        # Start NMS
        nms_lst = []
        while score_tmp.max() >= high_thresh:
            candidates = np.argsort(score_tmp)[::-1]

            # Compute overlap
            starts = np.maximum(start_points[candidates[0]], start_points[candidates[1:]])
            ends = np.minimum(end_points[candidates[0]], end_points[candidates[1:]])

            intersects = np.maximum(0.0, ends - starts + 1)
            unions = areas[candidates[0]] + areas[candidates[1:]] - intersects

            # Supress the scores by overlapping ratio
            weight_vec = np.ones(candidates.shape)
            weight_vec[1:] = weight_vec[1:] - intersects / unions
            score_tmp[candidates[1:]] = weight_vec[1:] * score_tmp[candidates[1:]]

            nms_lst.append(candidates[0])
            score_tmp[candidates[0]] = 0.

        # Get the junctions from the index_map
        filtered_junctions = full_index[nms_lst, :]

        return filtered_junctions


# The simple line detector using LCNN sample method
class SimpleLineDetector_LCNN(object):
    def __init__(self, detect_thresh, num_samples=64, verbose=False,
                 low_thresh=0.15, high_thresh=0.2):
        # Line detection parameters
        self.detect_thresh = detect_thresh
        self.num_samples = num_samples
        self.verbose = verbose

        # Detecting junctions on the boundary parameters
        self.low_thresh = low_thresh
        self.high_thresh = high_thresh

        # Pre-compute the linspace sampler
        self.sampler = np.linspace(0, 1, self.num_samples)

    def detect(self, junctions, heatmap, recover_junction=True, filter_heatmap=False):
        # Check if heatmap is filtered
        # TODO: Test this part??
        if heatmap.min() < 0.1 and filter_heatmap:
            heatmap[heatmap <= 0.1] = 0

        # If recover junctions on the boundary
        if recover_junction:
            junctions_bound = self.detect_junctions_on_boundary(heatmap,
                                                                low_thresh=self.low_thresh,
                                                                high_thresh=self.high_thresh
                                                                )
            junctions = np.concatenate([junctions, junctions_bound], axis=0)

        # Initialize empty line map
        num_junctions = junctions.shape[0]
        line_map_pred = np.zeros([num_junctions, num_junctions])

        # Generate the candidate map
        candidate_map = np.triu(np.ones([num_junctions, num_junctions], dtype=np.int32)) - np.eye(num_junctions)
        candidate_index_map = np.where(candidate_map)
        candidate_index_map = np.concatenate([candidate_index_map[0][..., None],
                                              candidate_index_map[1][..., None]], axis=-1)

        # Get the corresponding start and end junctions
        candidate_junc_start = junctions[candidate_index_map[:, 0], :]
        candidate_junc_end = junctions[candidate_index_map[:, 1], :]

        # Get the sampling locations
        cand_samples_h = candidate_junc_start[:, 0:1] * self.sampler[None, ...] + candidate_junc_end[:, 0:1] * \
                         (1 - self.sampler)[None, ...]
        cand_samples_w = candidate_junc_start[:, 1:2] * self.sampler[None, ...] + candidate_junc_end[:, 1:2] * \
                         (1 - self.sampler)[None, ...]
        # Clip to image boundary
        if len(heatmap.shape) > 2:
            H, W, _ = heatmap.shape
        else:
            H, W = heatmap.shape
        cand_h = np.clip(cand_samples_h, 0, H - 1)
        cand_w = np.clip(cand_samples_w, 0, W - 1)

        # Get the floor and ceiling locations
        cand_h_floor = np.floor(cand_h).astype(np.int32)
        cand_h_ceil = np.ceil(cand_h).astype(np.int32)
        cand_w_floor = np.floor(cand_w).astype(np.int32)
        cand_w_ceil = np.ceil(cand_w).astype(np.int32)

        # Perform the bilinear sampling
        cand_samples_feat = (
                heatmap[cand_h_floor, cand_w_floor] * (cand_h_ceil - cand_h) * (cand_w_ceil - cand_w) + \
                heatmap[cand_h_floor, cand_w_ceil] * (cand_h_ceil - cand_h) * (cand_w - cand_w_floor) + \
                heatmap[cand_h_ceil, cand_w_floor] * (cand_h - cand_h_floor) * (cand_w_ceil - cand_w) + \
                heatmap[cand_h_ceil, cand_w_ceil] * (cand_h - cand_h_floor) * (cand_w - cand_w_floor)
        )
        # cand_samples_feat = heatmap[cand_h.astype(np.int32), cand_w.astype(np.int32)]

        # Perform the simple threshold detection
        detection_results = np.mean(cand_samples_feat, axis=-1) > self.detect_thresh

        # Convert detection results back to line_map_pred
        # line_map_pred = np.zeros([num_junctions, num_junctions])
        detected_junc_indexes = candidate_index_map[detection_results, :]
        # print(detected_junc_indexes.shape)
        line_map_pred[detected_junc_indexes[:, 0], detected_junc_indexes[:, 1]] = 1
        line_map_pred[detected_junc_indexes[:, 1], detected_junc_indexes[:, 0]] = 1

        return line_map_pred, junctions

    # Detect the complete score map for all the line segment candidates
    def detect_score_map(self, junctions, heatmap, recover_junction=True):
        # If recover junctions on the boundary
        if recover_junction:
            junctions_bound = self.detect_junctions_on_boundary(heatmap,
                                                                low_thresh=self.low_thresh,
                                                                high_thresh=self.high_thresh
                                                                )
            junctions = np.concatenate([junctions, junctions_bound], axis=0)

        # Initialize empty line map
        num_junctions = junctions.shape[0]
        line_score_map_pred = np.zeros([num_junctions, num_junctions], dtype=np.float32)

        # Generate the candidate map
        candidate_map = np.triu(np.ones([num_junctions, num_junctions], dtype=np.int32)) - np.eye(num_junctions)
        candidate_index_map = np.where(candidate_map)
        candidate_index_map = np.concatenate([candidate_index_map[0][..., None],
                                              candidate_index_map[1][..., None]], axis=-1)

        # Get the corresponding start and end junctions
        candidate_junc_start = junctions[candidate_index_map[:, 0], :]
        candidate_junc_end = junctions[candidate_index_map[:, 1], :]

        # Get the sampling locations
        cand_samples_h = candidate_junc_start[:, 0:1] * self.sampler[None, ...] + candidate_junc_end[:, 0:1] * \
                         (1 - self.sampler)[None, ...]
        cand_samples_w = candidate_junc_start[:, 1:2] * self.sampler[None, ...] + candidate_junc_end[:, 1:2] * \
                         (1 - self.sampler)[None, ...]
        # Clip to image boundary
        if len(heatmap.shape) > 2:
            H, W, _ = heatmap.shape
        else:
            H, W = heatmap.shape
        cand_h = np.clip(cand_samples_h, 0, H - 1)
        cand_w = np.clip(cand_samples_w, 0, W - 1)

        # Get the floor and ceiling locations
        cand_h_floor = np.floor(cand_h).astype(np.int32)
        cand_h_ceil = np.ceil(cand_h).astype(np.int32)
        cand_w_floor = np.floor(cand_w).astype(np.int32)
        cand_w_ceil = np.ceil(cand_w).astype(np.int32)

        # Perform the bilinear sampling
        cand_samples_feat = (
                heatmap[cand_h_floor, cand_w_floor] * (cand_h_ceil - cand_h) * (cand_w_ceil - cand_w) + \
                heatmap[cand_h_floor, cand_w_ceil] * (cand_h_ceil - cand_h) * (cand_w - cand_w_floor) + \
                heatmap[cand_h_ceil, cand_w_floor] * (cand_h - cand_h_floor) * (cand_w_ceil - cand_w) + \
                heatmap[cand_h_ceil, cand_w_ceil] * (cand_h - cand_h_floor) * (cand_w - cand_w_floor)
        )
        # cand_samples_feat = heatmap[cand_h.astype(np.int32), cand_w.astype(np.int32)]

        # Record all the detected score
        score_map = np.mean(cand_samples_feat, axis=-1)
        detection_index_map = score_map > 0.

        # Convert detection results back to line_map_pred
        # import ipdb; ipdb.set_trace()
        detected_junc_indexes = candidate_index_map[detection_index_map, :]
        # print(detected_junc_indexes.shape)
        line_score_map_pred[detected_junc_indexes[:, 0], detected_junc_indexes[:, 1]] = score_map[detection_index_map]
        line_score_map_pred[detected_junc_indexes[:, 1], detected_junc_indexes[:, 0]] = score_map[detection_index_map]

        return line_score_map_pred, junctions

    # Recover junctions from the boundary
    def detect_junctions_on_boundary(self, heatmap, band_width=1,
                                     low_thresh=0.15, high_thresh=0.2,
                                     grid_size=8):
        """
        heatmap: H*W*1 or H*W prob map
        band_width: width of the band on the boundary to consider (now only 1 is supported)
        low_thresh: directly discard all the pixels below this threshold
        high_thresh: a pixel need to have prob higher than this to be considered as a junction to keep.
        grid_size: the window width of the NMS
        """
        if not band_width == 1:
            raise ValueError("[Error] Currently we only support band_width=1 version.")

        if len(heatmap.shape) == 3:
            heatmap = heatmap.squeeze()

        # Get the width and height from the input heatmap
        height, width = heatmap.shape

        # Get the index map
        H_vec = np.arange(0, height, 1)
        W_vec = np.arange(0, width, 1)
        H_mat, W_mat = np.meshgrid(H_vec, W_vec, indexing="ij")
        index_map = np.concatenate([H_mat[..., None], W_mat[..., None]], axis=-1)

        # Crop out the boundary of the heatmap
        # now just use band_width=1
        top_band = heatmap[0, :width - 1]
        top_index = index_map[0, :width - 1, :]

        right_band = heatmap[:height - 1, width - 1]
        right_index = index_map[:height - 1, width - 1, :]

        bot_band = heatmap[height - 1, -1:0:-1]
        bot_index = index_map[height - 1, -1:0:-1, :]

        left_band = heatmap[-1:0:-1, 0]
        left_index = index_map[-1:0:-1, 0, :]

        # Concatenate to 1D vector
        full_band = np.concatenate([top_band, right_band, bot_band, left_band])
        full_index = np.concatenate([top_index, right_index, bot_index, left_index], axis=0)

        # Filter out some impossible cases
        full_band[full_band < low_thresh] = 0.

        # Box width
        box_size = grid_size

        # Convert to intervals
        start_points = np.clip(np.where(full_band > -1)[0] - box_size, 0, full_band.shape[0])
        end_points = np.clip(np.where(full_band > -1)[0] + box_size, 0, full_band.shape[0])
        # print(start_points.shape)

        # Pre-compute the area
        areas = np.maximum(0.0, end_points - start_points)

        # Argsort the heatmap confidence
        score_tmp = full_band.copy()

        # Start NMS
        nms_lst = []
        while score_tmp.max() >= high_thresh:
            candidates = np.argsort(score_tmp)[::-1]

            # Compute overlap
            starts = np.maximum(start_points[candidates[0]], start_points[candidates[1:]])
            ends = np.minimum(end_points[candidates[0]], end_points[candidates[1:]])

            intersects = np.maximum(0.0, ends - starts + 1)
            unions = areas[candidates[0]] + areas[candidates[1:]] - intersects

            # Supress the scores by overlapping ratio
            weight_vec = np.ones(candidates.shape)
            weight_vec[1:] = weight_vec[1:] - intersects / unions
            score_tmp[candidates[1:]] = weight_vec[1:] * score_tmp[candidates[1:]]

            nms_lst.append(candidates[0])
            score_tmp[candidates[0]] = 0.

        # Get the junctions from the index_map
        filtered_junctions = full_index[nms_lst, :]

        return filtered_junctions


"""
Version2 searches junction pair candidates along the closest line proposals generated by 
probabilistic hough transform. Performance is largely affected by the distance margin set
between junctions and lines (also affects the runtime a lot).
"""


# The simple line detection version 2
class SimpleLineDetectionV2(object):
    def __init__(self, junc_to_line_dist=10, num_closest_line=5, line_to_junc_dist=10,
                 detect_thresh=0.6, mask_width=1, height=512, width=512,
                 hough_thresh=100, line_gap=5):
        """
        junc_to_line_dist: max dist thresh for finding closest line.
        num_closest_line: max number of closest lines to consider.
        line_to_junc_dist: max dist thresh for finding junctions along the line.
        detect_thresh: detection threshold on the segmented probability map.
        mask_width: the width for the line segmentation mask.
        height: height of the heatmap / image.
        width: width of the heatmap / image.
        hough_thresh: threshold for the probabilistic hough transform.
        line_gap: max gap for line merging in probabilistic hough transform
        """
        # Line proposal related parameters
        self.junc_to_line_dist = junc_to_line_dist
        self.num_closest_line = num_closest_line
        self.line_to_junc_dist = line_to_junc_dist
        self.detect_thresh = detect_thresh
        self.mask_width = mask_width

        # Probabilistic hough transform related param
        self.hough_thresh = hough_thresh
        self.line_gap = line_gap

        # Compute the minimum length
        self.height = height
        self.width = width
        self.min_line_len = int(0.1 * (height ** 2 + width ** 2) ** 0.5)

        # Precompute the index_map
        H_vec = np.arange(0, self.height, 1)
        W_vec = np.arange(0, self.width, 1)
        H_mat, W_mat = np.meshgrid(H_vec, W_vec, indexing="ij")
        self.index_map = np.concatenate([H_mat[..., None], W_mat[..., None]], axis=-1)

    # The detection step
    def detect(self, junctions, heatmap, recover_junction=True):
        # Check if heatmap is filtered
        if heatmap.min() < 0.1:
            heatmap[heatmap <= 0.1] = 0

        # If recover junctions on the boundary
        if recover_junction:
            junctions_bound = self.detect_junctions_on_boundary(heatmap)
            junctions = np.concatenate([junctions, junctions_bound], axis=0)

        # Get the junction line distance map first
        junc_line_dist_map, _ = self.compute_junc_line_dist_map(junctions, heatmap)

        # Initialize empty line map
        num_junctions = junctions.shape[0]
        line_map_pred = np.zeros([num_junctions, num_junctions])

        num_op = 0
        # Iterate through all the junctions
        for junc_index in range(num_junctions):
            junc_line_dist_vec = junc_line_dist_map[junc_index, :]
            junc_line_dist_idx = np.argsort(junc_line_dist_vec)[:self.num_closest_line]
            junc_line_dist_mask = junc_line_dist_vec[junc_line_dist_idx] <= self.junc_to_line_dist
            junc_line_dist_idx = junc_line_dist_idx[junc_line_dist_mask]

            # Find candidate junctions from candidate lines
            candidate_line_mat = junc_line_dist_map[:, junc_line_dist_idx]
            candidate_junctions = np.unique(np.where(candidate_line_mat <= self.line_to_junc_dist)[0])
            mask = candidate_junctions != junc_index
            candidate_junc_idx = candidate_junctions[mask]
            # import ipdb; ipdb.set_trace()
            candidate_junc_idx = candidate_junc_idx[line_map_pred[junc_index, candidate_junc_idx] == 0]
            # candidate_junctions = junctions[candidate_junc_idx, :]

            # Iterate through all the candidate junctions
            # num_candidate_junc = candidate_junc_idx.shape[0]
            start_point = junctions[junc_index, :]  # in HW format
            for end_point_idx in candidate_junc_idx:
                # Fetch start and end point
                # end_point_idx = candidate_junc_idx[cand_junc_idx]
                end_point = junctions[end_point_idx, :]  # in HW format

                # Get the mask
                mask = np.zeros(heatmap.shape)
                # mask[[0,1,2,3,4,5], [0,1,2,3,4,5]] = True
                mask = cv2.line(mask,
                                tuple(np.flip(start_point)), tuple(np.flip(end_point)),
                                1., self.mask_width).astype(np.bool)

                # Get the average activation
                activation = np.mean(heatmap[mask])
                # print(activation)

                # Define the heuristic threshold
                if activation > self.detect_thresh:
                    line_map_pred[junc_index, end_point_idx] = 1
                    line_map_pred[end_point_idx, junc_index] = 1

                num_op += 1
        print(num_op)

        return line_map_pred, junctions

    # # Add cython detection method
    # def detect_cython(self, junctions, heatmap, recover_junction=True):
    #     start_time = time.time()
    #     # Check if heatmap is filtered
    #     if heatmap.min() < 0.1:
    #         heatmap[heatmap <= 0.1] = 0

    #     # If recover junctions on the boundary
    #     if recover_junction:
    #         junctions_bound = self.detect_junctions_on_boundary(heatmap)
    #         junctions = np.concatenate([junctions, junctions_bound], axis=0)

    #     # Get the junction line distance map first
    #     junc_line_dist_map, _ = self.compute_junc_line_dist_map(junctions, heatmap)

    #     end_time = time.time()
    #     print(end_time - start_time)
    #     start_time = time.time()

    #     line_map_pred = detect_ver2(
    #         junctions.astype(np.int),
    #         heatmap.astype(np.float),
    #         junc_line_dist_map.astype(np.float),
    #         self.num_closest_line, self.junc_to_line_dist, self.line_to_junc_dist,
    #         self.mask_width, self.detect_thresh
    #     )
    #     end_time = time.time()
    #     print(end_time - start_time)

    #     return line_map_pred, junctions

    # Add numpy detection method
    # [Abandoned]
    def detect_np(self, junctions, heatmap, recover_junction=True):
        # Check if heatmap is filtered
        if heatmap.min() < 0.1:
            heatmap[heatmap <= 0.1] = 0

        # If recover junctions on the boundary
        if recover_junction:
            junctions_bound = self.detect_junctions_on_boundary(heatmap)
            junctions = np.concatenate([junctions, junctions_bound], axis=0)

        # Get the junction line distance map first
        junc_line_dist_map, line_segments = self.compute_junc_line_dist_map(junctions, heatmap)

        # Initialize empty line map
        num_junctions = junctions.shape[0]
        line_map_pred = np.zeros([num_junctions, num_junctions])

        # Get pixel to line distance map
        pixel_line_dist_map = self.compute_pixel_line_dist_map(line_segments)

        # Iterate through all the junctions
        for junc_index in range(num_junctions):
            junc_line_dist_vec = junc_line_dist_map[junc_index, :]
            junc_line_dist_idx = np.argsort(junc_line_dist_vec)[:self.num_closest_line]
            junc_line_dist_mask = junc_line_dist_vec[junc_line_dist_idx] <= self.junc_to_line_dist
            junc_line_dist_idx = junc_line_dist_idx[junc_line_dist_mask]

            # Find candidate junctions from candidate lines
            candidate_line_mat = junc_line_dist_map[:, junc_line_dist_idx]
            candidate_junctions = np.unique(np.where(candidate_line_mat <= self.line_to_junc_dist)[0])
            mask = candidate_junctions != junc_index
            candidate_junc_idx = candidate_junctions[mask]
            # import ipdb; ipdb.set_trace()
            candidate_junc_idx = candidate_junc_idx[line_map_pred[junc_index, candidate_junc_idx] == 0]
            # candidate_junctions = junctions[candidate_junc_idx, :]

            # Iterate through all the candidate junctions
            # num_candidate_junc = candidate_junc_idx.shape[0]
            start_point = junctions[junc_index, :]  # in HW format
            for end_point_idx in candidate_junc_idx:
                # Fetch start and end point
                # end_point_idx = candidate_junc_idx[cand_junc_idx]
                end_point = junctions[end_point_idx, :]  # in HW format

                # Get the pixels within the distance

                # Get the average activation
                activation = np.mean(heatmap[mask])
                # print(activation)

                # Define the heuristic threshold
                if activation > self.detect_thresh:
                    line_map_pred[junc_index, end_point_idx] = 1
                    line_map_pred[end_point_idx, junc_index] = 1

                num_op += 1
        print(num_op)
        raise NotImplementedError

        return line_map_pred, junctions

    # Compute the distance map of all pixels to all line segments
    # [Abandoned]
    def compute_pixel_line_dist_map(self, line_segments):
        import ipdb;
        ipdb.set_trace()
        start_points = line_segments[:, :2]
        end_points = line_segments[:, 2:]

        # Compute the direction vectors and its norm
        dir_vecs = end_points - start_points

        index_map = self.index_map.copy()  # H*W*2
        # Compute the junction to start point vector
        # H*W*num_lines*2
        start_to_pixel_mat = np.expand_dims(index_map, axis=2) - start_points[None, None, ...]
        # H*W*num_lines
        start_to_pixel_mat_norm = np.linalg.norm(start_to_pixel_mat, axis=-1, ord=None)

        # Deal with the divide by zero corner case.
        start_to_pixel_mask = start_to_pixel_mat_norm == 0.
        start_to_pixel_mat_norm[start_to_pixel_mask] = 1.

        # Compute the point to line projection
        dir_mat = np.repeat(dir_vecs[None, ...], self.width, axis=0)
        dir_mat = np.repeat(dir_mat[None, ...], self.height, aixs=0)
        dir_mat_norm = np.linalg.norm(dir_mat, axis=-1, ord=None)

        tmp = np.sum(start_to_pixel_mat * dir_mat, axis=-1) / (dir_mat_norm * start_to_pixel_mat_norm)
        tmp = np.clip(tmp, a_min=-1, a_max=1)
        theta = np.arccos(tmp)
        raise NotImplementedError
        import ipdb;
        ipdb.set_trace()

    # Compute the junction to line distance matrix
    def compute_junc_line_dist_map(self, junctions, heatmap):
        # Perform probabilist hough transform
        lines = probabilistic_hough_line(heatmap, threshold=self.hough_thresh, line_length=self.min_line_len,
                                         line_gap=self.line_gap, seed=200)

        # The line segments are in HW format
        line_segments = np.array(lines)
        line_segments = np.concatenate([np.flip(line_segments[:, 1, :], axis=-1),
                                        np.flip(line_segments[:, 0, :], axis=-1)],
                                       axis=-1)

        # Compute junctions to line distance matrix (num_junctions x num_lines)
        num_junctions = junctions.shape[0]
        # num_lines = line_segments.shape[0]
        start_points = line_segments[:, :2]
        end_points = line_segments[:, 2:]
        # Compute the direction vectors and its norm
        dir_vecs = end_points - start_points
        # dir_vecs_norm = np.linalg.norm(dir_vecs, axis=-1, ord=None)

        # Compute the junction to start point vector
        start_to_junc_mat = np.expand_dims(junctions, axis=1) - start_points[None, ...]
        start_to_junc_mat_norm = np.linalg.norm(start_to_junc_mat, axis=-1, ord=None)
        # Deal with the divide by zero corner case.
        start_to_junc_mask = start_to_junc_mat_norm == 0.
        start_to_junc_mat_norm[start_to_junc_mask] = 1.

        # Compute the point to line projection
        dir_mat = np.repeat(dir_vecs[None, ...], num_junctions, axis=0)
        dir_mat_norm = np.linalg.norm(dir_mat, axis=-1, ord=None)

        tmp = np.sum(start_to_junc_mat * dir_mat, axis=-1) / (dir_mat_norm * start_to_junc_mat_norm)
        tmp = np.clip(tmp, a_min=-1, a_max=1)
        theta = np.arccos(tmp)

        junc_line_dist = np.abs(start_to_junc_mat_norm * np.sin(theta))

        # Compute if the porjection points are on the line segments
        junc_line_proj_len = start_to_junc_mat_norm * np.cos(theta)
        junc_line_proj_frac = junc_line_proj_len / dir_mat_norm

        # Compute the junction to endponits distance
        junc_start_dist = np.linalg.norm(start_to_junc_mat, axis=-1, ord=None)
        junc_end_dist = np.expand_dims(junctions, axis=1) - end_points[None, ...]
        junc_end_dist = np.linalg.norm(junc_end_dist, axis=-1, ord=None)

        junc_endpoints_dist = np.concatenate([junc_start_dist[..., None], junc_end_dist[..., None]], axis=-1)
        junc_min_endpoints_dist = np.min(junc_endpoints_dist, axis=-1)

        # Get the final distance
        junc_line_dist[junc_line_proj_frac > 1.] = junc_min_endpoints_dist[junc_line_proj_frac > 1.]
        junc_line_dist[junc_line_proj_frac < 0.] = junc_min_endpoints_dist[junc_line_proj_frac < 0.]

        return junc_line_dist, line_segments

    # Recover junctions from the boundary
    def detect_junctions_on_boundary(self, heatmap, band_width=1,
                                     low_thresh=0.15, high_thresh=0.2,
                                     grid_size=8):
        """
        heatmap: H*W*1 or H*W prob map
        band_width: width of the band on the boundary to consider (now only 1 is supported)
        low_thresh: directly discard all the pixels below this threshold
        high_thresh: a pixel need to have prob higher than this to be considered as a junction to keep.
        grid_size: the window width of the NMS
        """
        if not band_width == 1:
            raise ValueError("[Error] Currently we only support band_width=1 version.")

        if len(heatmap.shape) == 3:
            heatmap = heatmap.squeeze()

        # Crop out the boundary of the heatmap
        # now just use band_width=1
        top_band = heatmap[0, :self.width - 1]
        top_index = self.index_map[0, :self.width - 1, :]

        right_band = heatmap[:self.height - 1, self.width - 1]
        right_index = self.index_map[:self.height - 1, self.width - 1, :]

        bot_band = heatmap[self.height - 1, -1:0:-1]
        bot_index = self.index_map[self.height - 1, -1:0:-1, :]

        left_band = heatmap[-1:0:-1, 0]
        left_index = self.index_map[-1:0:-1, 0, :]

        # Concatenate to 1D vector
        full_band = np.concatenate([top_band, right_band, bot_band, left_band])
        full_index = np.concatenate([top_index, right_index, bot_index, left_index], axis=0)

        # Filter out some impossible cases
        full_band[full_band < low_thresh] = 0.

        # Box width
        box_size = grid_size

        # Convert to intervals
        start_points = np.clip(np.where(full_band > -1)[0] - box_size, 0, full_band.shape[0])
        end_points = np.clip(np.where(full_band > -1)[0] + box_size, 0, full_band.shape[0])
        # print(start_points.shape)

        # Pre-compute the area
        areas = np.maximum(0.0, end_points - start_points)

        # Argsort the heatmap confidence
        score_tmp = full_band.copy()

        # Start NMS
        nms_lst = []
        while score_tmp.max() >= high_thresh:
            candidates = np.argsort(score_tmp)[::-1]

            # Compute overlap
            starts = np.maximum(start_points[candidates[0]], start_points[candidates[1:]])
            ends = np.minimum(end_points[candidates[0]], end_points[candidates[1:]])

            intersects = np.maximum(0.0, ends - starts + 1)
            unions = areas[candidates[0]] + areas[candidates[1:]] - intersects

            # Supress the scores by overlapping ratio
            weight_vec = np.ones(candidates.shape)
            weight_vec[1:] = weight_vec[1:] - intersects / unions
            score_tmp[candidates[1:]] = weight_vec[1:] * score_tmp[candidates[1:]]

            nms_lst.append(candidates[0])
            score_tmp[candidates[0]] = 0.

        # Get the junctions from the index_map
        filtered_junctions = full_index[nms_lst, :]

        return filtered_junctions


"""
Version3 iterates through all the line proposals and compares pairs of junctions within a
distance margin to the line. We only search through all the sorted consecutive pairs.
"""


# The simple line detection version3
class SimpleLineDetectionV3(object):
    def __init__(self, junc_to_line_dist=10, heatmap_thresh=0.2, detect_thresh=0.6,
                 mask_width=1, height=512, width=512,
                 hough_thresh=100, line_gap=5, verbose=False):
        """
        junc_to_line_dist: max dist thresh for finding closest line.
        heatmap_thresh: threshold for the heatmap filtering.
        detect_thresh: detection threshold on the segmented probability map.
        mask_width: the width for the line segmentation mask.
        height: height of the heatmap / image.
        width: width of the heatmap / image.
        hough_thresh: threshold for the probabilistic hough transform.
        line_gap: max gap for line merging in probabilistic hough transform
        verbose: if printing some debug information.
        """
        # Line proposal related parameters
        self.junc_to_line_dist = junc_to_line_dist
        self.heatmap_thresh = heatmap_thresh
        self.detect_thresh = detect_thresh
        self.mask_width = mask_width

        # Probabilistic hough transform related param
        self.hough_thresh = hough_thresh
        self.line_gap = line_gap

        # Compute the minimum length
        self.height = height
        self.width = width
        self.min_line_len = int(0.1 * (height ** 2 + width ** 2) ** 0.5)

        self.verbose = verbose

        # Precompute the index_map
        H_vec = np.arange(0, self.height, 1)
        W_vec = np.arange(0, self.width, 1)
        H_mat, W_mat = np.meshgrid(H_vec, W_vec, indexing="ij")
        self.index_map = np.concatenate([H_mat[..., None], W_mat[..., None]], axis=-1)

    # The detection step
    def detect(self, junctions, heatmap, recover_junction=True):
        # Check if heatmap is filtered
        if heatmap.min() < self.heatmap_thresh:
            heatmap[heatmap <= self.heatmap_thresh] = 0

        # If recover junctions on the boundary
        if recover_junction:
            junctions_bound = self.detect_junctions_on_boundary(heatmap)
            junctions = np.concatenate([junctions, junctions_bound], axis=0)

        # Get the junction line distance map first
        junc_line_dist_map, junc_line_proj_frac, _ = self.compute_junc_line_dist_map(junctions, heatmap)

        ####################################
        ## Start the line prediction part ##
        ####################################
        # Initialize empty line_pred map
        num_junctions = junctions.shape[0]
        num_lines = junc_line_dist_map.shape[1]
        line_map_pred = np.zeros([num_junctions, num_junctions], dtype=np.int32)

        if self.verbose:
            num_op = 0

        # Iterate through all the line segments
        for line_idx in range(num_lines):
            junc_line_dist_vec = junc_line_dist_map[:, line_idx]
            junc_line_frac_vec = junc_line_proj_frac[:, line_idx]

            # Indexes in original junction order.
            junc_cand_idxs = np.where(junc_line_dist_vec < self.junc_to_line_dist)[0]
            # Fetch the fraction vector
            junc_cand_frac_vec = junc_line_frac_vec[junc_cand_idxs]
            sort_key = np.argsort(junc_cand_frac_vec)
            junc_cand_idxs_sorted = junc_cand_idxs[sort_key]

            # Only process the line when the junction number > 2
            num_cand_junc = junc_cand_idxs_sorted.shape[0]
            if junc_cand_idxs_sorted.shape[0] >= 2:
                # Declare a junc connectivity map
                junc_connect_map = np.zeros([num_cand_junc - 1])

                # Go through all the junction intervals
                for junc_interval_idx in range(num_cand_junc - 1):
                    # Fetch the current and next junction
                    current_junc = junctions[junc_cand_idxs_sorted[junc_interval_idx], :]
                    next_junc = junctions[junc_cand_idxs_sorted[junc_interval_idx + 1], :]

                    # Check the connectivity
                    mask = np.zeros(heatmap.shape)
                    # mask[[0,1,2,3,4,5], [0,1,2,3,4,5]] = True
                    mask = cv2.line(mask,
                                    tuple(np.flip(current_junc)),
                                    tuple(np.flip(next_junc)),
                                    1., self.mask_width).astype(np.bool)
                    # Get the average activation
                    activation = np.mean(heatmap[mask])

                    # Define the heuristic threshold
                    if activation > self.detect_thresh:
                        junc_connect_map[junc_interval_idx] = 1

                    # Record the debug info if verbose
                    if self.verbose:
                        num_op += 1

                label_state = 0
                start_idx = 0
                # Label all the combinations
                for junc_interval_idx in range(num_cand_junc - 1):
                    label = junc_connect_map[junc_interval_idx]
                    # Check transition
                    if label == 1:
                        # Transition from 0 to 1
                        if label_state == 0:
                            # Label the corresponding junctions
                            current_junc_idx = junc_cand_idxs_sorted[junc_interval_idx + 1]
                            prev_junc_idx = junc_cand_idxs_sorted[junc_interval_idx]
                            line_map_pred[current_junc_idx, prev_junc_idx] = 1
                            line_map_pred[prev_junc_idx, current_junc_idx] = 1

                            label_state = 1
                            start_idx = junc_interval_idx
                        # Transition from 1 to 1
                        else:
                            # Iterate from start idx
                            current_junc_idx = junc_cand_idxs_sorted[junc_interval_idx + 1]
                            for i in range(start_idx, junc_interval_idx + 1):
                                prev_junc_idx = junc_cand_idxs_sorted[i]
                                line_map_pred[current_junc_idx, prev_junc_idx] = 1
                                line_map_pred[prev_junc_idx, current_junc_idx] = 1

                    else:
                        # Transition from 1 to 0
                        if label_state == 1:
                            label_state = 0

        if self.verbose:
            print("Num of operations: ", num_op)

        return line_map_pred, junctions

    # Compute the junction to line distance matrix
    def compute_junc_line_dist_map(self, junctions, heatmap):
        # Perform probabilist hough transform
        lines = probabilistic_hough_line(heatmap, threshold=self.hough_thresh, line_length=self.min_line_len,
                                         line_gap=self.line_gap, seed=200)

        # The line segments are in HW format
        line_segments = np.array(lines)
        line_segments = np.concatenate([np.flip(line_segments[:, 1, :], axis=-1),
                                        np.flip(line_segments[:, 0, :], axis=-1)],
                                       axis=-1)

        # Compute junctions to line distance matrix (num_junctions x num_lines)
        num_junctions = junctions.shape[0]
        # num_lines = line_segments.shape[0]
        start_points = line_segments[:, :2]
        end_points = line_segments[:, 2:]
        # Compute the direction vectors and its norm
        dir_vecs = end_points - start_points
        # dir_vecs_norm = np.linalg.norm(dir_vecs, axis=-1, ord=None)

        # Compute the junction to start point vector
        start_to_junc_mat = np.expand_dims(junctions, axis=1) - start_points[None, ...]
        start_to_junc_mat_norm = np.linalg.norm(start_to_junc_mat, axis=-1, ord=None)
        # Deal with the divide by zero corner case.
        start_to_junc_mask = start_to_junc_mat_norm == 0.
        start_to_junc_mat_norm[start_to_junc_mask] = 1.

        # Compute the point to line projection
        dir_mat = np.repeat(dir_vecs[None, ...], num_junctions, axis=0)
        dir_mat_norm = np.linalg.norm(dir_mat, axis=-1, ord=None)

        tmp = np.sum(start_to_junc_mat * dir_mat, axis=-1) / (dir_mat_norm * start_to_junc_mat_norm)
        tmp = np.clip(tmp, a_min=-1, a_max=1)
        theta = np.arccos(tmp)

        junc_line_dist = np.abs(start_to_junc_mat_norm * np.sin(theta))

        # Compute if the porjection points are on the line segments
        junc_line_proj_len = start_to_junc_mat_norm * np.cos(theta)
        junc_line_proj_frac = junc_line_proj_len / dir_mat_norm

        # Compute the junction to endponits distance
        junc_start_dist = np.linalg.norm(start_to_junc_mat, axis=-1, ord=None)
        junc_end_dist = np.expand_dims(junctions, axis=1) - end_points[None, ...]
        junc_end_dist = np.linalg.norm(junc_end_dist, axis=-1, ord=None)

        junc_endpoints_dist = np.concatenate([junc_start_dist[..., None], junc_end_dist[..., None]], axis=-1)
        junc_min_endpoints_dist = np.min(junc_endpoints_dist, axis=-1)

        # Get the final distance
        junc_line_dist[junc_line_proj_frac > 1.] = junc_min_endpoints_dist[junc_line_proj_frac > 1.]
        junc_line_dist[junc_line_proj_frac < 0.] = junc_min_endpoints_dist[junc_line_proj_frac < 0.]

        return junc_line_dist, junc_line_proj_frac, line_segments

    # Recover junctions from the boundary
    def detect_junctions_on_boundary(self, heatmap, band_width=1,
                                     low_thresh=0.15, high_thresh=0.2,
                                     grid_size=8):
        """
        heatmap: H*W*1 or H*W prob map
        band_width: width of the band on the boundary to consider (now only 1 is supported)
        low_thresh: directly discard all the pixels below this threshold
        high_thresh: a pixel need to have prob higher than this to be considered as a junction to keep.
        grid_size: the window width of the NMS
        """
        if not band_width == 1:
            raise ValueError("[Error] Currently we only support band_width=1 version.")

        if len(heatmap.shape) == 3:
            heatmap = heatmap.squeeze()

        # Crop out the boundary of the heatmap
        # now just use band_width=1
        top_band = heatmap[0, :self.width - 1]
        top_index = self.index_map[0, :self.width - 1, :]

        right_band = heatmap[:self.height - 1, self.width - 1]
        right_index = self.index_map[:self.height - 1, self.width - 1, :]

        bot_band = heatmap[self.height - 1, -1:0:-1]
        bot_index = self.index_map[self.height - 1, -1:0:-1, :]

        left_band = heatmap[-1:0:-1, 0]
        left_index = self.index_map[-1:0:-1, 0, :]

        # Concatenate to 1D vector
        full_band = np.concatenate([top_band, right_band, bot_band, left_band])
        full_index = np.concatenate([top_index, right_index, bot_index, left_index], axis=0)

        # Filter out some impossible cases
        full_band[full_band < low_thresh] = 0.

        # Box width
        box_size = grid_size

        # Convert to intervals
        start_points = np.clip(np.where(full_band > -1)[0] - box_size, 0, full_band.shape[0])
        end_points = np.clip(np.where(full_band > -1)[0] + box_size, 0, full_band.shape[0])
        # print(start_points.shape)

        # Pre-compute the area
        areas = np.maximum(0.0, end_points - start_points)

        # Argsort the heatmap confidence
        score_tmp = full_band.copy()

        # Start NMS
        nms_lst = []
        while score_tmp.max() >= high_thresh:
            candidates = np.argsort(score_tmp)[::-1]

            # Compute overlap
            starts = np.maximum(start_points[candidates[0]], start_points[candidates[1:]])
            ends = np.minimum(end_points[candidates[0]], end_points[candidates[1:]])

            intersects = np.maximum(0.0, ends - starts + 1)
            unions = areas[candidates[0]] + areas[candidates[1:]] - intersects

            # Supress the scores by overlapping ratio
            weight_vec = np.ones(candidates.shape)
            weight_vec[1:] = weight_vec[1:] - intersects / unions
            score_tmp[candidates[1:]] = weight_vec[1:] * score_tmp[candidates[1:]]

            nms_lst.append(candidates[0])
            score_tmp[candidates[0]] = 0.

        # Get the junctions from the index_map
        filtered_junctions = full_index[nms_lst, :]

        return filtered_junctions


"""
Newest Line detector.
Supports:
    (1) pytorch API inference
    (2) local_max search
    (3) inlier thresholding
"""


class SimpleLineDetector_LCNN2(object):
    def __init__(self,
                 detect_thresh, num_samples=64, verbose=False,
                 sampling_method="local_max", inlier_thresh=0.,
                 heatmap_low_thresh=0.15, heatmap_high_thresh=0.2,
                 max_local_patch_radius=3, lambda_radius=2.,
                 use_long_line_nms=False, use_candidate_suppression=False,
                 nms_dist_tolerance=3.,
                 use_heatmap_refinement=False, heatmap_refine_cfg=None,
                 use_junction_refinement=False, junction_refine_cfg=None
                 ):
        """
        detect_thresh: The probability threshold for mean activation (0. ~ 1.)
        num_samples: Number of sampling locations along the line segments.
        verbose: Verbose or not haha.
        sampling_method: Sampling method on locations ("bilinear" or "local_max").
        inlier_thresh: The min inlier ratio to satisfy (0. ~ 1.) => 0. means no threshold.
        heatmap_low_thresh: The lowest threshold for the pixel to be considered as candidate in junction recovery.
        heatmap_high_thresh: The higher threshold for NMS in junction recovery.
        max_local_patch_radius: The max patch to be considered in local maximum search.
        lambda_radius: The lambda factor in linear local maximum search formulation
        use_long_line_nms: Apply line NMS to break long line segments containing several sub-segments into pieces...
        use_candidate_suppression: Apply candidate suppression to break long segments into short sub-segments.
        nms_dist_tolerance: The distance tolerance for nms. Decide whether the junctions are on the line.
        use_heatmap_refinement: Use heatmap refinement method or not.
        heatmap_refine_cfg: The configs for heatmap refinement methods.
        use_junction_refinement: Use junction refinement method or not.
        junction_refine_cfg: The configs for junctioni refinement methods.
        """
        # Line detection parameters
        # [Detect threshold] Handling multiple detection threshold
        if isinstance(detect_thresh, float) or isinstance(detect_thresh, np.ndarray):
            self.detect_thresh = detect_thresh
        elif isinstance(detect_thresh, list):
            self.detect_thresh = np.array(detect_thresh)
        else:
            raise ValueError("[Error] detect_thresh can only be a single float, list, or np array.")
        # Indicator that we have multiple detection thresh
        self.num_detect_thresh = 1
        if isinstance(detect_thresh, np.ndarray) or isinstance(detect_thresh, list):
            self.num_detect_thresh = len(detect_thresh)
        self.num_samples = num_samples
        self.verbose = verbose
        self.sampling_method = sampling_method
        # [Inlier threshold] Handling multiple inlier threshold
        if isinstance(inlier_thresh, float) or isinstance(inlier_thresh, np.ndarray):
            self.inlier_thresh = inlier_thresh
        elif isinstance(inlier_thresh, list):
            self.inlier_thresh = np.array(inlier_thresh)
        else:
            raise ValueError("[Error] inlier_thresh can only be a single float, list, or np array.")
        # Indicator that we have multiple inlier thresh
        self.num_inlier_thresh = 1
        if isinstance(inlier_thresh, np.ndarray) or isinstance(inlier_thresh, list):
            self.num_inlier_thresh = len(inlier_thresh)
        # Max local patch size
        self.local_patch_radius = max_local_patch_radius
        self.lambda_radius = lambda_radius

        # Detecting junctions on the boundary parameters
        self.low_thresh = heatmap_low_thresh
        self.high_thresh = heatmap_high_thresh

        # Pre-compute the linspace sampler
        self.sampler = np.linspace(0, 1, self.num_samples)
        self.torch_sampler = torch.linspace(0, 1, self.num_samples)

        # Long line segment suppression configuration
        self.use_long_line_nms = use_long_line_nms
        self.use_candidate_suppression = use_candidate_suppression
        self.nms_dist_tolerance = nms_dist_tolerance

        # Heatmap refinement configuration
        self.use_heatmap_refinement = use_heatmap_refinement
        self.heatmap_refine_cfg = heatmap_refine_cfg
        if self.use_heatmap_refinement and self.heatmap_refine_cfg is None:
            raise ValueError("[Error] Heatmap refine config must be specified if one wants to use heatmap refinement.")

        # Junction refinement configuration
        self.use_junction_refinement = use_junction_refinement
        self.junction_refine_cfg = junction_refine_cfg
        if self.use_junction_refinement and self.junction_refine_cfg is None:
            raise ValueError(
                "[Error] Junction refine config must be specified if one wants to use junction refinement.")

    def detect(self, junctions, heatmap, recover_junction=True, filter_heatmap=False):
        # Check if heatmap is filtered
        # TODO: Test this part??
        if heatmap.min() < 0.1 and filter_heatmap:
            heatmap[heatmap <= 0.1] = 0

        # If recover junctions on the boundary
        if recover_junction:
            junctions_bound = self.detect_junctions_on_boundary(heatmap,
                                                                low_thresh=self.low_thresh,
                                                                high_thresh=self.high_thresh
                                                                )
            junctions = np.concatenate([junctions, junctions_bound], axis=0)

        # Initialize empty line map
        num_junctions = junctions.shape[0]
        line_map_pred = np.zeros([num_junctions, num_junctions])

        # Generate the candidate map
        candidate_map = np.triu(np.ones([num_junctions, num_junctions], dtype=np.int32)) - np.eye(num_junctions)
        candidate_index_map = np.where(candidate_map)
        candidate_index_map = np.concatenate([candidate_index_map[0][..., None],
                                              candidate_index_map[1][..., None]], axis=-1)

        # Get the corresponding start and end junctions
        candidate_junc_start = junctions[candidate_index_map[:, 0], :]
        candidate_junc_end = junctions[candidate_index_map[:, 1], :]

        # Get the sampling locations (N x 64)
        cand_samples_h = candidate_junc_start[:, 0:1] * self.sampler[None, ...] + candidate_junc_end[:, 0:1] * \
                         (1 - self.sampler)[None, ...]
        cand_samples_w = candidate_junc_start[:, 1:2] * self.sampler[None, ...] + candidate_junc_end[:, 1:2] * \
                         (1 - self.sampler)[None, ...]
        # Clip to image boundary
        if len(heatmap.shape) > 2:
            H, W, _ = heatmap.shape
        else:
            H, W = heatmap.shape
        cand_h = np.clip(cand_samples_h, 0, H - 1)
        cand_w = np.clip(cand_samples_w, 0, W - 1)

        # Compute the distance threshold
        segments_length = np.sqrt(np.sum((candidate_junc_start - candidate_junc_end) ** 2, axis=-1))  # N
        normalized_seg_length = segments_length / (((H ** 2) + (W ** 2)) ** 0.5)  # N
        dist_thresh = 0.5 * (2 ** 0.5) + 3. * normalized_seg_length  # min_dist + 3. * noramlize
        # Make it N x 64
        dist_thresh = np.repeat(dist_thresh[..., None], self.num_samples, axis=-1)

        # Compute the candidate points
        cand_points = np.concatenate([cand_h[..., None], cand_w[..., None]], axis=-1)
        cand_points_round = np.round(cand_points)
        # N x 64 x 2

        # Construct local patches 9x9 = 81
        patch_mask = np.zeros([int(2 * self.local_patch_radius + 1),
                               int(2 * self.local_patch_radius + 1)])
        H_patch_points, W_patch_points = np.where(patch_mask >= 0)
        patch_points = np.concatenate([H_patch_points[..., None], W_patch_points[..., None]], axis=-1)
        # Shift [0, 0] to the center
        patch_points = patch_points - self.local_patch_radius

        # Construct local patch mask
        # N x 64 x 81 x 2
        patch_points_shifted = np.expand_dims(cand_points_round, axis=2) + patch_points[None, None, ...]
        patch_dist = np.sqrt(np.sum((np.expand_dims(cand_points, axis=2) - patch_points_shifted) ** 2, axis=-1))
        patch_dist_mask = patch_dist < dist_thresh[..., None]
        print(patch_dist_mask.shape)
        # N x 64 x 81

        # Get all points => num_points_center x num_patch_points x 2
        points_H = np.clip(patch_points_shifted[:, :, :, 0], a_min=0, a_max=H - 1).astype(np.int)
        points_W = np.clip(patch_points_shifted[:, :, :, 1], a_min=0, a_max=W - 1).astype(np.int)
        points = np.concatenate([points_H[..., None], points_W[..., None]], axis=-1).to(torch.int32)
        print(points.shape)

        # Sample the feature (N x 64 x 81)
        sampled_feat = heatmap[points[:, :, :, 0], points[:, :, :, 1]]
        # Filtering using the valid mask
        sampled_feat = sampled_feat * patch_dist_mask.astype(np.int)
        sampled_feat_lmax = np.max(sampled_feat, axis=-1)

        raise NotImplementedError

    # Convert inputs to desired torch tensor
    def convert_inputs(self, inputs, device):
        if isinstance(inputs, np.ndarray):
            outputs = torch.tensor(inputs, dtype=torch.float32, device=device)
        elif isinstance(inputs, torch.Tensor):
            outputs = inputs.to(torch.float32).to(device)
        else:
            raise ValueError("[Error] inputs must either be torch tensor or numpy ndarray.")

        return outputs

    # Torch version of junction detection
    def detect_torch(self, junctions, heatmap,
                     recover_junction=True, filter_heatmap=False,
                     device=torch.device("cpu")):
        # Convert inputs to torch tensor
        junctions = self.convert_inputs(junctions, device=device)
        heatmap = self.convert_inputs(heatmap, device=device)

        # Check if heatmap is filtered
        # if heatmap.min() < 0.1 and filter_heatmap:
        #     heatmap[heatmap <= 0.1] = 0.
        heatmap[heatmap <= 0.5] = 0.

        # Perform the heatmap refinement
        if self.use_heatmap_refinement:
            if self.heatmap_refine_cfg["mode"] == "global":
                heatmap = self.refine_heatmap(
                    heatmap,
                    self.heatmap_refine_cfg["ratio"],
                    self.heatmap_refine_cfg["valid_thresh"]
                )
            elif self.heatmap_refine_cfg["mode"] == "local":
                heatmap = self.refine_heatmap_local(
                    heatmap,
                    self.heatmap_refine_cfg["num_blocks"],
                    self.heatmap_refine_cfg["overlap_ratio"],
                    self.heatmap_refine_cfg["ratio"],
                    self.heatmap_refine_cfg["valid_thresh"]
                )
                # heatmap = self.refine_heatmap_local(heatmap, 20, 0.3)

        # Initialize empty line map
        num_junctions = junctions.shape[0]
        line_map_pred = torch.zeros([self.num_detect_thresh, self.num_inlier_thresh,
                                     num_junctions, num_junctions], device=device, dtype=torch.int32)

        # Generate the candidate map
        candidate_map = torch.triu(torch.ones([num_junctions, num_junctions], device=device,
                                              dtype=torch.int32), diagonal=1)

        # Fetch the image boundary
        if len(heatmap.shape) > 2:
            H, W, _ = heatmap.shape
        else:
            H, W = heatmap.shape

        # Optionally perfrom candidate filtering
        if self.use_long_line_nms and self.use_candidate_suppression:
            raise ValueError(
                "[Error] User should only choose either use 'candidate suppression' or 'long line nms', but not both.")
        if self.use_candidate_suppression:
            candidate_map = self.candidate_suppression(junctions, candidate_map, H, W)

        # Fetch the candidates
        candidate_index_map = torch.where(candidate_map)
        candidate_index_map = torch.cat([candidate_index_map[0][..., None],
                                         candidate_index_map[1][..., None]], dim=-1)

        # Get the corresponding start and end junctions
        candidate_junc_start = junctions[candidate_index_map[:, 0], :]
        candidate_junc_end = junctions[candidate_index_map[:, 1], :]

        # Get the sampling locations (N x 64)
        sampler = self.torch_sampler.to(device)[None, ...]
        cand_samples_h = candidate_junc_start[:, 0:1] * sampler + \
                         candidate_junc_end[:, 0:1] * (1 - sampler)
        cand_samples_w = candidate_junc_start[:, 1:2] * sampler + \
                         candidate_junc_end[:, 1:2] * (1 - sampler)

        # Clip to image boundary
        cand_h = torch.clamp(cand_samples_h, min=0, max=H - 1)
        cand_w = torch.clamp(cand_samples_w, min=0, max=W - 1)

        # Local maximum search
        if self.sampling_method == "local_max":
            # Compute normalized segment lengths
            segments_length = torch.sqrt(torch.sum((candidate_junc_start.to(torch.float32) - \
                                                    candidate_junc_end.to(torch.float32)) ** 2, dim=-1))  # N
            normalized_seg_length = segments_length / (((H ** 2) + (W ** 2)) ** 0.5)  # N

            # Perform local max search
            num_cand = cand_h.shape[0]
            group_size = 10000
            if num_cand > group_size:
                num_iter = math.ceil(num_cand / group_size)
                sampled_feat_lst = []
                for iter_idx in range(num_iter):
                    if not iter_idx == num_iter - 1:
                        cand_h_ = cand_h[iter_idx * group_size: (iter_idx + 1) * group_size, :]
                        cand_w_ = cand_w[iter_idx * group_size: (iter_idx + 1) * group_size, :]
                        normalized_seg_length_ = normalized_seg_length[
                                                 iter_idx * group_size: (iter_idx + 1) * group_size]
                    else:
                        cand_h_ = cand_h[iter_idx * group_size:, :]
                        cand_w_ = cand_w[iter_idx * group_size:, :]
                        normalized_seg_length_ = normalized_seg_length[iter_idx * group_size:]
                    sampled_feat_ = self.detect_local_max(
                        heatmap,
                        cand_h_, cand_w_, H, W,
                        normalized_seg_length_, device
                    )
                    sampled_feat_lst.append(sampled_feat_)
                sampled_feat = torch.cat(sampled_feat_lst, dim=0)
                # import ipdb; ipdb.set_trace()
            else:
                sampled_feat = self.detect_local_max(
                    heatmap,
                    cand_h, cand_w, H, W,
                    normalized_seg_length, device
                )
        # Bilinear sampling
        elif self.sampling_method == "bilinear":
            # Perform bilinear sampling
            sampled_feat = self.detect_bilinear(
                heatmap,
                cand_h, cand_w, H, W, device
            )
        else:
            raise ValueError("[Error] Unknown sampling method. Now only 'local_max' and 'bilinear' are supported.")

        for detect_thresh_idx in range(self.num_detect_thresh):
            for inlier_thresh_idx in range(self.num_inlier_thresh):
                # Fetch the corresponding thresholds
                detect_thresh = self.detect_thresh[
                    detect_thresh_idx] if self.num_detect_thresh > 1 else self.detect_thresh
                inlier_thresh = self.inlier_thresh[
                    inlier_thresh_idx] if self.num_inlier_thresh > 1 else self.inlier_thresh

                # [Simple threshold detection] Perform the simple threshold detection
                # detection_results is a mask over all candidates
                detection_results = torch.mean(sampled_feat, dim=-1) > detect_thresh

                # [Inlier threshold detection] Perform the inlier threshold detection
                if inlier_thresh > 0.:
                    inlier_ratio = torch.sum(sampled_feat > detect_thresh, dim=-1).to(torch.float32) / self.num_samples
                    detection_results_inlier = inlier_ratio >= inlier_thresh
                    detection_results = detection_results * detection_results_inlier

                # Convert detection results back to line_map_pred
                # line_map_pred = np.zeros([num_junctions, num_junctions])
                detected_junc_indexes = candidate_index_map[detection_results, :]
                # print(detected_junc_indexes.shape)
                line_map_pred[
                    detect_thresh_idx, inlier_thresh_idx, detected_junc_indexes[:, 0], detected_junc_indexes[:, 1]] = 1
                line_map_pred[
                    detect_thresh_idx, inlier_thresh_idx, detected_junc_indexes[:, 1], detected_junc_indexes[:, 0]] = 1

        # Perform NMS
        if self.use_long_line_nms:
            line_map_pred = self.line_NMS(junctions, line_map_pred, H, W)

        # Perform junction refinement
        if self.use_junction_refinement:
            junctions, line_map_pred = self.refine_junction_perturb(
                junctions, line_map_pred,
                heatmap, H, W,
                device
            )

        # Keep the original behavior when there's one detect_thresh and one inlier_thresh
        if self.num_detect_thresh == 1 and self.num_inlier_thresh == 1:
            line_map_pred = line_map_pred[0, 0, ...]

        return line_map_pred, junctions, heatmap

    # Heatmap refinement method
    def refine_heatmap(self, heatmap, ratio=0.2, valid_thresh=1e-2):
        # Grab the top 10% values
        heatmap_values = heatmap[heatmap > valid_thresh]
        sorted_values = torch.sort(heatmap_values, descending=True)[0]
        top10_len = math.ceil(sorted_values.shape[0] * ratio)
        max20 = torch.mean(sorted_values[:top10_len])
        # print(max10)
        # import ipdb; ipdb.set_trace()
        heatmap = torch.clamp(heatmap / max20, min=0., max=1.)

        return heatmap

    # Local heatmap refinement method
    def refine_heatmap_local(self,
                             heatmap,
                             num_blocks=5,
                             overlap_ratio=0.5,
                             ratio=0.2,
                             valid_thresh=2e-3
                             ):
        """ Local heatmap refinement method. """
        # Get the shape of the heatmap
        H, W = heatmap.shape
        increase_ratio = 1 - overlap_ratio
        h_block = round(H / (1 + (num_blocks - 1) * increase_ratio))
        w_block = round(W / (1 + (num_blocks - 1) * increase_ratio))

        count_map = torch.zeros(heatmap.shape, dtype=torch.int,
                                device=heatmap.device)
        heatmap_output = torch.zeros(heatmap.shape, dtype=torch.float,
                                     device=heatmap.device)

        # Iterate through each block
        for h_idx in range(num_blocks):
            for w_idx in range(num_blocks):
                # Fetch the heatmap
                h_start = round(h_idx * h_block * increase_ratio)
                w_start = round(w_idx * w_block * increase_ratio)
                h_end = h_start + h_block if h_idx < num_blocks - 1 else H
                w_end = w_start + w_block if w_idx < num_blocks - 1 else W

                subheatmap = heatmap[h_start:h_end, w_start:w_end]
                if subheatmap.max() > valid_thresh:
                    subheatmap = self.refine_heatmap(
                        subheatmap, ratio, valid_thresh=valid_thresh)

                # Aggregate it to the final heatmap
                heatmap_output[h_start:h_end, w_start:w_end] += subheatmap
                count_map[h_start:h_end, w_start:w_end] += 1

        heatmap_output = torch.clamp(heatmap_output / count_map,
                                     max=1., min=0.)

        return heatmap_output

    # The candidate suppression
    def candidate_suppression(self, junctions, candidate_map, H, W):
        # Define the distance tolerance
        dist_tolerance = self.nms_dist_tolerance

        # Compute distance between junction pairs
        # (num_junc x 1 x 2) - (1 x num_junc x 2) => num_junc x num_junc map
        line_dist_map = torch.sum((torch.unsqueeze(junctions, dim=1) - junctions[None, ...]) ** 2, dim=-1) ** 0.5

        # Fetch all the "detected lines"
        seg_indexes = torch.where(torch.triu(candidate_map, diagonal=1))
        start_point_idxs = seg_indexes[0]
        end_point_idxs = seg_indexes[1]
        start_points = junctions[start_point_idxs, :]
        end_points = junctions[end_point_idxs, :]

        # Fetch corresponding entries
        line_dists = line_dist_map[start_point_idxs, end_point_idxs]

        # Check whether they are on the line
        dir_vecs = (end_points - start_points) / torch.norm(end_points - start_points, dim=-1)[..., None]
        # Get the orthogonal distance
        cand_vecs = junctions[None, ...] - start_points.unsqueeze(dim=1)
        cand_vecs_norm = torch.norm(cand_vecs, dim=-1)
        # Check whether they are projected directly onto the segment
        proj = torch.einsum('bij,bjk->bik', cand_vecs, dir_vecs[..., None]) / line_dists[..., None, None]
        # proj is num_segs x num_junction x 1
        proj_mask = (proj >= 0) * (proj <= 1)
        cand_angles = torch.acos(
            torch.einsum('bij,bjk->bik', cand_vecs, dir_vecs[..., None]) / cand_vecs_norm[..., None]
        )
        cand_dists = cand_vecs_norm[..., None] * torch.sin(cand_angles)
        junc_dist_mask = cand_dists <= dist_tolerance
        junc_mask = junc_dist_mask * proj_mask

        # Minus starting points
        num_segs = start_point_idxs.shape[0]
        junc_counts = torch.sum(junc_mask, dim=[1, 2])
        junc_counts -= junc_mask[..., 0][torch.arange(0, num_segs), start_point_idxs].to(torch.int)
        junc_counts -= junc_mask[..., 0][torch.arange(0, num_segs), end_point_idxs].to(torch.int)

        # Get the invalid candidate mask
        final_mask = junc_counts > 0
        candidate_map[start_point_idxs[final_mask], end_point_idxs[final_mask]] = 0

        # Get the final junctions
        # if junc_mask.sum() > 2:
        #     candidate_map[start_point_idx, end_point_idx] = 0
        #     candidate_map[end_point_idx, start_point_idx] = 0

        return candidate_map

    # The long segment suppression
    def line_NMS(self, junctions, line_map_pred, H, W):
        # Define the distance tolerance
        dist_tolerance = self.nms_dist_tolerance

        # Compute distance between junction pairs
        # (num_junc x 1 x 2) - (1 x num_junc x 2) => num_junc x num_junc map
        line_dist_map = torch.sum((torch.unsqueeze(junctions, dim=1) - junctions[None, ...]) ** 2, dim=-1) ** 0.5

        # Iterate through all the detect_thresh and inlier_thresh
        output_line_map = torch.zeros(line_map_pred.shape, dtype=torch.int)
        for detect_thresh_idx in range(self.num_detect_thresh):
            for inlier_thresh_idx in range(self.num_inlier_thresh):
                # Fetch the corresponding line_map => NxN
                line_map = line_map_pred[detect_thresh_idx, inlier_thresh_idx, ...]
                # TODO: Set the lower triangle part to 0??

                # Fetch all the "detected lines"
                detected_seg_indexes = torch.where(torch.triu(line_map, diagonal=1))
                start_point_idxs = detected_seg_indexes[0]
                end_point_idxs = detected_seg_indexes[1]
                start_points = junctions[start_point_idxs, :]
                end_points = junctions[end_point_idxs, :]

                # Fetch corresponding entries
                line_dists = line_dist_map[start_point_idxs, end_point_idxs]

                # Argsort to get the order
                iter_idxs = torch.argsort(line_dists, descending=True)

                # Iterate from the longest line
                for iter_idx in iter_idxs:
                    # Fetch the corresponding distance
                    line_dist = line_dists[iter_idx]
                    if line_dist < 15:
                        continue
                    else:
                        # Fetch the start and end point
                        start_point = start_points[iter_idx, :]
                        end_point = end_points[iter_idx, :]
                        start_point_idx = start_point_idxs[iter_idx]
                        end_point_idx = end_point_idxs[iter_idx]
                        # Fetch the extreme point coordinates
                        h_min = max(min(start_point[0], end_point[0]) - dist_tolerance, 0)
                        h_max = min(max(start_point[0], end_point[0]) + dist_tolerance, H - 1)
                        w_min = max(min(start_point[1], end_point[1]) - dist_tolerance, 0)
                        w_max = min(max(start_point[1], end_point[1]) + dist_tolerance, W - 1)

                        # Fetch the junctions within the box
                        h_mask = (junctions[:, 0] >= h_min) * (junctions[:, 0] <= h_max)
                        w_mask = (junctions[:, 1] >= w_min) * (junctions[:, 1] <= w_max)
                        junc_cand_idxs = torch.where(h_mask * w_mask)[0]
                        cand_junctions = junctions[junc_cand_idxs, :]

                        # Check whether they are on the line
                        dir_vec = (end_point - start_point) / torch.norm(end_point - start_point)
                        # Get the orthogonal distance
                        cand_vecs = cand_junctions - start_point
                        cand_vecs_norm = torch.norm(cand_vecs, dim=-1)
                        # Check whether they are projected directly onto the segment
                        proj = torch.matmul(cand_vecs, dir_vec) / line_dist
                        proj_mask = (proj >= 0) * (proj <= 1)
                        # import ipdb; ipdb.set_trace()
                        cand_angles = torch.acos(torch.matmul(cand_vecs, dir_vec) / cand_vecs_norm)
                        cand_dists = cand_vecs_norm * torch.sin(cand_angles)
                        junc_dist_mask = cand_dists <= dist_tolerance
                        junc_mask = junc_dist_mask * proj_mask

                        # Get the final junctions
                        junc_cand_idxs_final = junc_cand_idxs[junc_mask]
                        # import ipdb; ipdb.set_trace()

                        # Fetch the sub-array from line_pred_map
                        junc_cand_idxs_final = torch.unique(torch.cat(
                            [start_point_idx[None], junc_cand_idxs_final, end_point_idx[None]],
                            dim=-1
                        ))
                        sub_line_map = line_map[junc_cand_idxs_final, :]
                        sub_line_map = sub_line_map[:, junc_cand_idxs_final]
                        s = torch.where(junc_cand_idxs_final == start_point_idx)[0]
                        e = torch.where(junc_cand_idxs_final == end_point_idx)[0]
                        sub_line_map[s, e] = 0
                        sub_line_map[e, s] = 0
                        # import ipdb; ipdb.set_trace()
                        if sub_line_map.sum() > 0:
                            line_map[start_point_idx, end_point_idx] = 0
                            line_map[end_point_idx, start_point_idx] = 0
                # Write to output line map
                output_line_map[detect_thresh_idx, inlier_thresh_idx, ...] = line_map

        return output_line_map

    # The endpoint perturbation refinement
    def refine_junction_perturb(self, junctions, line_map_pred, heatmap, H, W, device):
        # Get the config
        junction_refine_cfg = self.junction_refine_cfg

        # Assert that we are not in parameter search mode
        if (not self.num_detect_thresh == 1) or (not self.num_inlier_thresh == 1):
            raise ValueError("[Error] This junction refinement method is not supported in parameter search mode...")

        # Fetch refinement parameters
        num_perturbs = junction_refine_cfg["num_perturbs"]
        perturb_interval = junction_refine_cfg["perturb_interval"]
        side_perturbs = (num_perturbs - 1) // 2
        # Fetch the 2D perturb mat
        perturb_vec = torch.arange(
            start=-perturb_interval * side_perturbs,
            end=perturb_interval * (side_perturbs + 1),
            step=perturb_interval, device=device
        )
        w1_grid, h1_grid, w2_grid, h2_grid = torch.meshgrid(
            perturb_vec, perturb_vec, perturb_vec, perturb_vec
        )
        perturb_tensor = torch.cat([
            w1_grid[..., None], h1_grid[..., None],
            w2_grid[..., None], h2_grid[..., None]],
            dim=-1)
        perturb_tensor_flat = perturb_tensor.view(-1, 2, 2)

        # Fetch the junctions and line_map
        junctions = junctions.clone()
        line_map = line_map_pred[0, 0, ...]

        # Fetch all the detected lines
        detected_seg_indexes = torch.where(torch.triu(line_map, diagonal=1))
        start_point_idxs = detected_seg_indexes[0]
        end_point_idxs = detected_seg_indexes[1]
        start_points = junctions[start_point_idxs, :]
        end_points = junctions[end_point_idxs, :]

        line_segments = torch.cat([
            start_points.unsqueeze(dim=1),
            end_points.unsqueeze(dim=1)
        ], dim=1)

        line_segment_candidates = line_segments.unsqueeze(dim=1) + perturb_tensor_flat[None, ...]
        # Clip the the boundaries
        line_segment_candidates[..., 0] = line_segment_candidates[..., 0].clamp(min=0, max=H - 1)
        line_segment_candidates[..., 1] = line_segment_candidates[..., 1].clamp(min=0, max=W - 1)

        # Iterate through all the segments
        refined_segment_lst = []
        num_segments = line_segments.shape[0]
        for idx in range(num_segments):
            segment = line_segment_candidates[idx, ...]
            # Get the corresponding start and end junctions
            candidate_junc_start = segment[:, 0, :]
            candidate_junc_end = segment[:, 1, :]

            # Get the sampling locations (N x 64)
            sampler = self.torch_sampler.to(device)[None, ...]
            cand_samples_h = candidate_junc_start[:, 0:1] * sampler + \
                             candidate_junc_end[:, 0:1] * (1 - sampler)
            cand_samples_w = candidate_junc_start[:, 1:2] * sampler + \
                             candidate_junc_end[:, 1:2] * (1 - sampler)

            # Clip to image boundary
            cand_h = torch.clamp(cand_samples_h, min=0, max=H - 1)
            cand_w = torch.clamp(cand_samples_w, min=0, max=W - 1)

            # Perform bilinear sampling
            segment_feat = self.detect_bilinear(
                heatmap,
                cand_h, cand_w, H, W, device
            )
            segment_results = torch.mean(segment_feat, dim=-1)
            max_idx = torch.argmax(segment_results)
            refined_segment_lst.append(segment[max_idx, ...][None, ...])
        # Concatenate back to segments
        refined_segments = torch.cat(refined_segment_lst, dim=0)
        # import ipdb; ipdb.set_trace()

        # Convert back to junctions and line_map
        junctions_new = torch.cat(
            [refined_segments[:, 0, :],
             refined_segments[:, 1, :]],
            dim=0
        )
        junctions_new = torch.unique(junctions_new, dim=0)
        line_map_new = self.segments_to_line_map(junctions_new, refined_segments)

        return junctions_new, line_map_new[None, None, ...]

    def segments_to_line_map(self, junctions, segments):
        # create empty line map
        device = junctions.device
        num_junctions = junctions.shape[0]
        line_map = torch.zeros([num_junctions, num_junctions], device=device)

        # Iterate through every segment
        for idx in range(segments.shape[0]):
            # Get the junctions from a single segement
            seg = segments[idx, ...]
            junction1 = seg[0, :]
            junction2 = seg[1, :]

            # Get index
            idx_junction1 = torch.where((junctions == junction1).sum(axis=1) == 2)[0]
            idx_junction2 = torch.where((junctions == junction2).sum(axis=1) == 2)[0]

            # label the corresponding entries
            line_map[idx_junction1, idx_junction2] = 1
            line_map[idx_junction2, idx_junction1] = 1

        return line_map

    # Detection by bilinear sampling
    def detect_bilinear(self, heatmap, cand_h, cand_w, H, W, device):
        # Get the floor and ceiling locations
        cand_h_floor = torch.floor(cand_h).to(torch.long)
        cand_h_ceil = torch.ceil(cand_h).to(torch.long)
        cand_w_floor = torch.floor(cand_w).to(torch.long)
        cand_w_ceil = torch.ceil(cand_w).to(torch.long)

        # Perform the bilinear sampling
        cand_samples_feat = (
                heatmap[cand_h_floor, cand_w_floor] * (cand_h_ceil - cand_h) * (cand_w_ceil - cand_w) + \
                heatmap[cand_h_floor, cand_w_ceil] * (cand_h_ceil - cand_h) * (cand_w - cand_w_floor) + \
                heatmap[cand_h_ceil, cand_w_floor] * (cand_h - cand_h_floor) * (cand_w_ceil - cand_w) + \
                heatmap[cand_h_ceil, cand_w_ceil] * (cand_h - cand_h_floor) * (cand_w - cand_w_floor)
        )

        return cand_samples_feat

    # Detection by local maximum search
    def detect_local_max(self, heatmap, cand_h, cand_w, H, W, normalized_seg_length, device):
        # Compute the distance threshold
        dist_thresh = 0.5 * (2 ** 0.5) + self.lambda_radius * normalized_seg_length  # min_dist + 3. * noramlize
        # Make it N x 64
        dist_thresh = torch.repeat_interleave(dist_thresh[..., None], self.num_samples, dim=-1)

        # Compute the candidate points
        cand_points = torch.cat([cand_h[..., None], cand_w[..., None]], dim=-1)
        cand_points_round = torch.round(cand_points)  # N x 64 x 2

        # Construct local patches 9x9 = 81
        patch_mask = torch.zeros([int(2 * self.local_patch_radius + 1),
                                  int(2 * self.local_patch_radius + 1)], device=device)
        patch_center = torch.tensor([[self.local_patch_radius, self.local_patch_radius]],
                                    device=device, dtype=torch.float32)
        H_patch_points, W_patch_points = torch.where(patch_mask >= 0)
        patch_points = torch.cat([H_patch_points[..., None], W_patch_points[..., None]], dim=-1)
        # Fetch the circle region
        patch_center_dist = torch.sqrt(torch.sum((patch_points - patch_center) ** 2, dim=-1))
        patch_points = patch_points[patch_center_dist <= self.local_patch_radius, :]
        # Shift [0, 0] to the center
        patch_points = patch_points - self.local_patch_radius

        # Construct local patch mask
        patch_points_shifted = torch.unsqueeze(cand_points_round, dim=2) + patch_points[None, None, ...]
        patch_dist = torch.sqrt(torch.sum((torch.unsqueeze(cand_points, dim=2) - patch_points_shifted) ** 2, dim=-1))
        patch_dist_mask = patch_dist < dist_thresh[..., None]

        # Get all points => num_points_center x num_patch_points x 2
        points_H = torch.clamp(patch_points_shifted[:, :, :, 0], min=0, max=H - 1).to(torch.long)
        points_W = torch.clamp(patch_points_shifted[:, :, :, 1], min=0, max=W - 1).to(torch.long)
        points = torch.cat([points_H[..., None], points_W[..., None]], dim=-1)

        # Sample the feature (N x 64 x 81)
        sampled_feat = heatmap[points[:, :, :, 0], points[:, :, :, 1]]
        # Filtering using the valid mask
        sampled_feat = sampled_feat * patch_dist_mask.to(torch.float32)
        sampled_feat_lmax, _ = torch.max(sampled_feat, dim=-1)

        return sampled_feat_lmax

    # Recover junctions from the boundary
    def detect_junctions_on_boundary(self, heatmap, band_width=1,
                                     low_thresh=0.15, high_thresh=0.2,
                                     grid_size=8):
        """
        heatmap: H*W*1 or H*W prob map
        band_width: width of the band on the boundary to consider (now only 1 is supported)
        low_thresh: directly discard all the pixels below this threshold
        high_thresh: a pixel need to have prob higher than this to be considered as a junction to keep.
        grid_size: the window width of the NMS
        """
        if not band_width == 1:
            raise ValueError("[Error] Currently we only support band_width=1 version.")

        if len(heatmap.shape) == 3:
            heatmap = heatmap.squeeze()

        # Get the width and height from the input heatmap
        height, width = heatmap.shape

        # Get the index map
        H_vec = np.arange(0, height, 1)
        W_vec = np.arange(0, width, 1)
        H_mat, W_mat = np.meshgrid(H_vec, W_vec, indexing="ij")
        index_map = np.concatenate([H_mat[..., None], W_mat[..., None]], axis=-1)

        # Crop out the boundary of the heatmap
        # now just use band_width=1
        top_band = heatmap[0, :width - 1]
        top_index = index_map[0, :width - 1, :]

        right_band = heatmap[:height - 1, width - 1]
        right_index = index_map[:height - 1, width - 1, :]

        bot_band = heatmap[height - 1, -1:0:-1]
        bot_index = index_map[height - 1, -1:0:-1, :]

        left_band = heatmap[-1:0:-1, 0]
        left_index = index_map[-1:0:-1, 0, :]

        # Concatenate to 1D vector
        full_band = np.concatenate([top_band, right_band, bot_band, left_band])
        full_index = np.concatenate([top_index, right_index, bot_index, left_index], axis=0)

        # Filter out some impossible cases
        full_band[full_band < low_thresh] = 0.

        # Box width
        box_size = grid_size

        # Convert to intervals
        start_points = np.clip(np.where(full_band > -1)[0] - box_size, 0, full_band.shape[0])
        end_points = np.clip(np.where(full_band > -1)[0] + box_size, 0, full_band.shape[0])
        # print(start_points.shape)

        # Pre-compute the area
        areas = np.maximum(0.0, end_points - start_points)

        # Argsort the heatmap confidence
        score_tmp = full_band.copy()

        # Start NMS
        nms_lst = []
        while score_tmp.max() >= high_thresh:
            candidates = np.argsort(score_tmp)[::-1]

            # Compute overlap
            starts = np.maximum(start_points[candidates[0]], start_points[candidates[1:]])
            ends = np.minimum(end_points[candidates[0]], end_points[candidates[1:]])

            intersects = np.maximum(0.0, ends - starts + 1)
            unions = areas[candidates[0]] + areas[candidates[1:]] - intersects

            # Supress the scores by overlapping ratio
            weight_vec = np.ones(candidates.shape)
            weight_vec[1:] = weight_vec[1:] - intersects / unions
            score_tmp[candidates[1:]] = weight_vec[1:] * score_tmp[candidates[1:]]

            nms_lst.append(candidates[0])
            score_tmp[candidates[0]] = 0.

        # Get the junctions from the index_map
        filtered_junctions = full_index[nms_lst, :]

        return filtered_junctions