"""
Version 2
Convert the aggregation results from the homography adaptation to gt labels
(1) Using fixed line detection parameters
(2) Using line NMS operation
"""
import numpy as np
import cv2
import h5py
# from evaluation.line_detect_eval import evaluate_mAPH_simple, evaluate_sAP
from model.line_detection_val import SimpleLineDetector, SimpleLineDetector_LCNN, SimpleLineDetector_LCNN2
from dataset.dataset_util import get_dataset
import yaml
from config.project_config import Config as cfg
import os
from model.line_detection_val import SimpleLineDetector_LCNN
from utils import parse_h5_data
from evaluation.eval_utils import convert_raw_exported_predictions, line_map_to_segments
from dataset.synthetic_util import get_line_heatmap, get_unique_junctions
import torch
from scipy.ndimage import label
from tqdm import tqdm


# Suppress local overlapping small line segments and (optionally) refine the junctions
def line_NMS(line_map, junctions, image_size):
    """
    Inputs:
        line_map: N*N np array
        junctions: N*2 np array
    """
    device = torch.device("cpu")
    # Convert line_map to line segments
    line_segments = line_map_to_segments(junctions, line_map)

    start_points = line_segments[:, 0, :]
    end_points = line_segments[:, 1, :]
    line_segments_length = np.linalg.norm(start_points - end_points, axis=-1)
    iter_idxs = np.argsort(line_segments_length)

    # Generate the heatmaps
    heatmap = np.zeros(image_size)
    for idx in iter_idxs:
        point1 = np.flip(start_points[idx, :]).astype(np.int)  # xy format
        point2 = np.flip(end_points[idx, :]).astype(np.int)  # xy format
        length = line_segments_length[idx]

        heatmap_tmp = np.zeros(image_size)
        cv2.line(heatmap_tmp, tuple(point1), tuple(point2), 1., thickness=1)
        heatmap_tmp *= length

        heatmap = np.concatenate([heatmap[..., None], heatmap_tmp[..., None]], axis=-1)
        heatmap = np.max(heatmap, axis=-1)

    # Perform nms
    angle_range = 5.
    nms_radius = 3.
    line_dir_vecs = (end_points - start_points) / line_segments_length[..., None]

    # Construct local patches 9x9 = 81
    patch_mask = torch.zeros([int(2 * nms_radius + 1),
                              int(2 * nms_radius + 1)], device=device)
    patch_center = torch.tensor([[nms_radius, nms_radius]],
                                device=device, dtype=torch.float32)
    H_patch_points, W_patch_points = torch.where(patch_mask >= 0)
    patch_points = torch.cat([H_patch_points[..., None], W_patch_points[..., None]], dim=-1)
    # Fetch the circle region
    patch_center_dist = torch.sqrt(torch.sum((patch_points - patch_center) ** 2, dim=-1))
    patch_points = patch_points[patch_center_dist <= nms_radius, :]
    # Shift [0, 0] to the center
    patch_points = patch_points - nms_radius

    # Convert some array to tensors
    heatmap = torch.tensor(heatmap, dtype=torch.float64, device=device)
    line_segments_length = torch.tensor(line_segments_length, dtype=torch.float64, device=device)

    # Start from the longest line
    iter_idxs = iter_idxs[::-1]
    for idx in iter_idxs:
        # Fetch all point belong to the line
        line_points = torch.where(heatmap == line_segments_length[idx])
        line_points = torch.cat([
            line_points[0][..., None],
            line_points[1][..., None]
        ], dim=-1
        )

        # Get all possible suppresion target
        line_dir_ref = line_dir_vecs[idx, :]
        angles = np.arccos(
            np.clip(np.abs(np.dot(line_dir_vecs, line_dir_ref)), a_min=0., a_max=1.)
        ) * 360 / (2 * np.pi)
        cand_idxs = np.where(angles < angle_range)[0]
        cand_idxs = cand_idxs[line_segments_length[cand_idxs] < line_segments_length[idx]]

        cand_values = line_segments_length[cand_idxs]

        # Construct local patch mask
        patch_points_shifted = torch.unsqueeze(line_points, dim=1) + patch_points[None, ...]
        patch_dist = torch.sqrt(torch.sum((torch.unsqueeze(line_points, dim=1) - patch_points_shifted) ** 2, dim=-1))
        patch_dist_mask = patch_dist < nms_radius

        # Get all points => num_points_center x num_patch_points x 2
        points_H = torch.clamp(patch_points_shifted[:, :, 0], min=0, max=image_size[0] - 1).to(torch.long)
        points_W = torch.clamp(patch_points_shifted[:, :, 1], min=0, max=image_size[1] - 1).to(torch.long)
        points = torch.cat([points_H[..., None], points_W[..., None]], dim=-1)

        # Sample the feature (N x 64 x 81)
        sampled_feat = heatmap[points[:, :, 0], points[:, :, 1]]
        # Filtering using the valid mask
        sampled_feat = sampled_feat * patch_dist_mask.to(torch.float32)

        # Iteratively suppress each value
        # import ipdb; ipdb.set_trace()
        # print(cand_values.shape)
        # print(cand_values.dim())
        if cand_values.dim() > 0:
            # print(cand_values.shape)
            for cand_val in cand_values:
                suppress_mask = sampled_feat == cand_val
                # Fetch the suppression points
                suppress_points = points[suppress_mask, :]

                # Nothing to suppress
                if 0 in points[suppress_mask, :].shape:
                    continue
                else:
                    heatmap[suppress_points[:, 0], suppress_points[:, 1]] = 0.

    # TODO: Re-assign junctions to the dominating line

    return heatmap, junctions


# Suppress local overlapping small line segments and (optionally) refine the junctions
def line_NMS_v2(line_map, junctions, image_size, device=torch.device("cuda")):
    """
    Inputs:
        line_map: N*N np array
        junctions: N*2 np array
    """
    # Convert line_map to line segments
    line_segments = line_map_to_segments(junctions, line_map)

    start_points = line_segments[:, 0, :]
    end_points = line_segments[:, 1, :]
    line_segments_length = np.linalg.norm(start_points - end_points, axis=-1)
    iter_idxs = np.argsort(line_segments_length)

    # Generate the heatmaps
    line_points_lst = []
    line_length_lst = []
    line_point_num_lst = []
    # heatmap = np.zeros(image_size)
    for idx in iter_idxs[::-1]:
        point1 = np.flip(start_points[idx, :]).astype(np.int)  # xy format
        point2 = np.flip(end_points[idx, :]).astype(np.int)  # xy format
        length = line_segments_length[idx]

        heatmap_tmp = np.zeros(image_size)
        cv2.line(heatmap_tmp, tuple(point1), tuple(point2), 1., thickness=1)

        line_points = np.where(heatmap_tmp > 0.)
        line_points = np.concatenate(
            [line_points[0][..., None],
             line_points[1][..., None]],
            axis=-1
        )
        num_points = line_points.shape[0]
        line_points_lst.append(line_points)
        line_point_num_lst.append(num_points)
        line_length_lst.append(length)

    # Convert the regular sized tensors
    max_num_points = max(line_point_num_lst)
    num_segments = line_segments.shape[0]
    line_points = -1 * torch.ones(
        [num_segments, max_num_points, 2],
        dtype=torch.float32, device=device
    )
    # Fill the tensor with corresponding entries
    for idx in range(len(line_points_lst)):
        line_points[idx, :line_point_num_lst[idx], :] = \
            torch.tensor(line_points_lst[idx], dtype=torch.float32, device=device)

    # Get the mask (The padded entries with -1 are 0.)
    line_points_mask = (line_points >= 0.).to(torch.float32)

    # Perform nms
    angle_range = 5.
    nms_radius = 3.
    line_dir_vecs = (end_points - start_points) / line_segments_length[..., None]
    # Re-sort according to segment lengths
    line_dir_vecs = line_dir_vecs[iter_idxs[::-1], :]
    line_segments_length = line_segments_length[iter_idxs[::-1]]

    valid_line_mask = torch.ones([num_segments]).to(torch.bool)
    # Start from the longest line
    for idx in range(num_segments):
        # Get all possible suppresion target
        line_dir_ref = line_dir_vecs[idx, :]
        angles = np.arccos(
            np.clip(np.abs(np.dot(line_dir_vecs, line_dir_ref)), a_min=0., a_max=1.)
        ) * 360 / (2 * np.pi)
        cand_idxs = np.where(angles < angle_range)[0]
        cand_idxs = cand_idxs[line_segments_length[cand_idxs] < line_segments_length[idx]]
        cand_idxs = torch.tensor(cand_idxs, dtype=torch.long, device=device)
        cand_idxs = cand_idxs[valid_line_mask[cand_idxs]]
        # print(f"\t{idx} {len(cand_idxs)}")
        # if not isinstance(cand_idxs, np.ndarray):
        #     continue
        if len(cand_idxs) == 0:
            continue

        # Fetch the reference points
        # import ipdb; ipdb.set_trace()
        ref_mask = torch.sum(line_points_mask[idx, ...], dim=-1) > 0.
        ref_line_points = line_points[idx, ref_mask, :]  # N x 2
        if 0 in ref_line_points.shape:
            continue

        # Fetch points in cand_idxs
        cand_line_points = line_points[cand_idxs, ...]  # num_cand x N x 2
        cand_line_points_mask = line_points_mask[cand_idxs, ...].clone()  # num_cand x N x 2

        # Compute distance tensor
        dists = torch.norm(
            cand_line_points.unsqueeze(dim=2) - ref_line_points[None, None, ...],
            dim=-1
        )
        dists = torch.min(dists, dim=2)[0]
        # import ipdb; ipdb.set_trace()
        suppress_mask = dists <= nms_radius
        cand_line_points_mask[suppress_mask, :] = 0.

        # Assign the info back to the original mask
        line_points_mask[cand_idxs, ...] = cand_line_points_mask * line_points_mask[cand_idxs, ...]

        # Identify all zero entries
        valid_line_mask = torch.sum(line_points_mask, dim=[-1, -2]) > 0

    # Sort the line segments
    # import matplotlib.pyplot as plt
    # import matplotlib
    # matplotlib.use("TkAgg")
    # line_segments_sorted = line_segments[iter_idxs[::-1], ...]
    # for line_idx in range(line_segments_sorted.shape[0]):
    #     print(line_idx)
    #     heatmap = np.zeros([512, 512])
    #     start_point = np.flip(line_segments_sorted[line_idx, 0, :]).astype(np.int)
    #     end_point = np.flip(line_segments_sorted[line_idx, 1, :]).astype(np.int)
    #     heatmap = cv2.line(heatmap, tuple(start_point), tuple(end_point), 1., 1)
    #     plt.imshow(heatmap)
    #     plt.show()
    # import ipdb; ipdb.set_trace()

    # Postprocessing the valid_line_mask
    line_points_mask = torch.sum(line_points_mask, dim=-1) > 0.
    line_points_mask = line_points_mask.cpu().numpy()

    auxiliary_segment_lst = []
    for idx in range(line_points_mask.shape[0]):
        line_mask = line_points_mask[idx, :]
        # single_line_points = line_points[idx, ...]

        # If no segment in it, just continue
        if line_mask.sum() <= 0:
            continue

        # Identify the overall direction of the line
        dir_vec = line_dir_vecs[idx, ...]

        # Extract the connected components
        comp_mask, num_comp = label(line_mask)
        # Iterate through each component
        for i in range(num_comp):
            comp_idx = i + 1
            mask = comp_mask == comp_idx
            masked_indexes = np.where(mask)[0]

            # If too few elements, then throw it away...
            if len(masked_indexes) <= 4:
                continue

            # Check the horizontal and vertical extreme points
            masked_points = line_points[idx, masked_indexes, :]
            h_min = masked_points[:, 0].min()[None, ...]
            h_max = masked_points[:, 0].max()[None, ...]
            w_min = masked_points[:, 1].min()[None, ...]
            w_max = masked_points[:, 1].max()[None, ...]

            # import ipdb; ipdb.set_trace()
            # H and W of the end_point are larger.
            if dir_vec[0] >= 0 and dir_vec[1] >= 0:
                start_point = torch.cat([h_min, w_min], dim=0)
                end_point = torch.cat([h_max, w_max], dim=0)
            # H higher, w lower
            elif dir_vec[0] >= 0 and dir_vec[1] < 0:
                start_point = torch.cat([h_min, w_max], dim=0)
                end_point = torch.cat([h_max, w_min], dim=0)
            # H lower, w higher
            elif dir_vec[0] < 0 and dir_vec[1] >= 0:
                start_point = torch.cat([h_max, w_min], dim=0)
                end_point = torch.cat([h_min, w_max], dim=0)
            # H lower, w lower
            else:
                start_point = torch.cat([h_max, w_max], dim=0)
                end_point = torch.cat([h_min, w_min], dim=0)
            auxiliary_segment_lst.append(
                torch.cat([start_point[None, ...], end_point[None, ...]], dim=0)[None, ...]
            )
    auxiliary_segments = torch.cat(auxiliary_segment_lst, dim=0)

    # Convert to Nx4 for now
    auxiliary_segments2 = torch.cat(
        [auxiliary_segments[:, 0, :], auxiliary_segments[:, 1, :]], dim=-1
    ).cpu().numpy()
    auxiliary_junctions, auxiliary_line_map = get_unique_junctions(auxiliary_segments2, 1)
    # heatmap2 = get_line_heatmap(np.flip(auxiliary_junctions, axis=-1), auxiliary_line_map, image_size, 1)

    # Render the surviving points to the final heatmaps
    # heatmap = torch.zeros(image_size, dtype=torch.float32, device=device)
    # points = line_points[line_points_mask, :].to(torch.long)
    # heatmap[points[:, 0], points[:, 1]] = 1.

    return junctions, auxiliary_junctions, auxiliary_line_map


if __name__ == "__main__":
    # Define the path to the exported dataset
    exported_dataset_path = "wireframe_train_homograpy-export_epoch045_ce1_consist_round0_v1.5.h5"
    exported_dataset_path = os.path.join(cfg.export_dataroot, exported_dataset_path)
    exported_dataset = h5py.File(exported_dataset_path, "r")

    # Specified the output path for the results
    output_dataset_path = "wireframe_train_adaptation_iter0_epoch045_ce1_consist_detect_0.28_inlier_0.85_local_max_v1.5_refine.h5"
    output_dataset_path = os.path.join(cfg.export_dataroot, output_dataset_path)

    # Check cuda device
    # if torch.cuda.is_available():
    #     device = torch.device("cuda")
    # else:
    device = torch.device("cuda")
    nms_device = torch.device("cuda")

    # Get the line detection method.
    detect_cfg = {
        "detect_thresh": 0.28,
        "num_samples": 64,
        "sampling_method": "local_max",
        "inlier_thresh": 0.85,
        "max_local_patch_radius": 3,
        "lambda_radius": 2,
        "use_long_line_nms": False,
        "use_candidate_suppression": False,
        "nms_dist_tolerance": 3.
    }
    line_detector = SimpleLineDetector_LCNN2(**detect_cfg)

    # Get the model configuration
    with open("config/lcnn_dynamic_config_new.yaml", "r") as f:
        model_cfg = yaml.safe_load(f)

    # Iterate through all the dataset keys
    with h5py.File(output_dataset_path, "w") as output_dataset:
        for idx, output_key in enumerate(tqdm(list(exported_dataset.keys()), ascii=True)):
            # Get the data
            data = parse_h5_data(exported_dataset[output_key])

            # Preprocess the data
            converted_data = convert_raw_exported_predictions(data, grid_size=model_cfg["grid_size"],
                                                              detect_thresh=model_cfg["detection_thresh"]
                                                              )
            junctions_pred_raw = converted_data["junctions_pred"]
            heatmap_pred = converted_data["heatmap_pred"]
            valid_mask = converted_data["valid_mask"]

            line_map_pred, junctions_pred = line_detector.detect_torch(
                junctions_pred_raw, heatmap_pred,
                recover_junction=False,
                filter_heatmap=True,
                device=device
            )
            if isinstance(line_map_pred, torch.Tensor):
                line_map_pred = line_map_pred.cpu().numpy()
            if isinstance(junctions_pred, torch.Tensor):
                junctions_pred = junctions_pred.cpu().numpy()

            junctions, aux_junctions, aux_line_map = line_NMS_v2(
                line_map_pred,
                junctions_pred,
                image_size=[512, 512]
            )

            heatmap = get_line_heatmap(
                np.flip(aux_junctions, axis=-1),
                aux_line_map,
                [512, 512]
            )
            heatmap_debug = get_line_heatmap(
                np.flip(junctions_pred, axis=-1),
                line_map_pred,
                [512, 512]
            )
            # import ipdb; ipdb.set_trace()
            # heatmap_label, junctions_pred = line_NMS_v2(
            #     line_map_pred,
            #     junctions_pred,
            #     image_size=[512, 512],
            #     device=nms_device
            # )
            # heatmap_label_debug = get_line_heatmap(
            #     np.flip(junctions_pred, axis=-1),
            #     line_map_pred,
            #     [512, 512]
            # )
            # heatmap_label = (heatmap_label > 0.).to(torch.float32)

            output_data = {
                "junctions": junctions,
                "line_map": line_map_pred,
                "aux_junctions": aux_junctions,
                "aux_line_map": aux_line_map
            }

            # Record it to the h5 dataset
            f_group = output_dataset.create_group(output_key)

            # Store data
            for key, output_data in output_data.items():
                f_group.create_dataset(key, data=output_data, compression="gzip")
