import time
import torch
import numpy as np
import kornia as K
import kornia.feature as KF
from semantic_nav.slam_utils import (numpy_to_o3d_pcd,
                                     opencv_to_pillow,
                                     recover_pose_from_matches,
                                     preprocess_point_cloud,
                                     registration_ransac_correspondence,
                                     registration_icp,
                                     draw_geometries)
from semantic_nav.torch_utils import (opencv_to_kornia,
                                      resize_image,
                                      feature_matching,
                                      display_matches,
                                      torch_knn,
                                      ImageEmbedding)
from semantic_nav.topomap_utils import TopoMap
from semantic_nav.log_utils import get_logger
logger = get_logger()


class FeatureMatcher:
    def __init__(self, max_corre_num=1000, desired_size=(640, 480), device=K.utils.get_cuda_or_mps_device_if_available()):
        self.device = device
        self.desired_size = desired_size
        self.matcher = KF.LoFTR(pretrained="outdoor").to(device)

    def registration_global(self, color_depth0, color_depth1, camera_intrinsic, debug=False):
        color_img0, depth_img0 = color_depth0
        color_img1, depth_img1 = color_depth1
        # resize camera intrinsic
        resize_factor = (float(self.desired_size[0]) / float(color_img0.shape[1]) , float(self.desired_size[1]) / float(color_img0.shape[0]))
        camera_intrinsic = camera_intrinsic * np.array(resize_factor + (1.0,)).reshape(3, 1)
        # convert images from opencv format to kornia format
        color_img0 = opencv_to_kornia(color_img0).to(self.device)
        color_img1 = opencv_to_kornia(color_img1).to(self.device)
        # resize images to desired size
        color_img0 = resize_image(color_img0, *self.desired_size)
        color_img1 = resize_image(color_img1, *self.desired_size)
        depth_img0 = resize_image(depth_img0, *self.desired_size)
        depth_img1 = resize_image(depth_img1, *self.desired_size)

        mkpts0, mkpts1 = feature_matching(self.matcher, color_img0, color_img1)

        if debug:
            display_matches(color_img0, color_img1, mkpts0, mkpts1)

        if mkpts0.shape[0] < 8:
            logger.error("Not enough matches found")
            return None
        mkpts0 = mkpts0.cpu().numpy()
        mkpts1 = mkpts1.cpu().numpy()

        # visual pose estimation
        # R, t, mask = recover_pose_from_matches(camera_intrinsic, mkpts0, mkpts1)
        # inliers = mask.flatten() > 0
        # mkpts0 = mkpts0[inliers]
        # mkpts1 = mkpts1[inliers]

        mkpts0_int = np.round(mkpts0).astype(np.int32)
        mkpts1_int = np.round(mkpts1).astype(np.int32)
        depth0 = depth_img0[mkpts0_int[:, 1], mkpts0_int[:, 0]]
        depth1 = depth_img1[mkpts1_int[:, 1], mkpts1_int[:, 0]]
        
        ones_colum = np.ones((mkpts0.shape[0], 1))
        mkpts0 = np.hstack((mkpts0, ones_colum))
        mkpts1 = np.hstack((mkpts1, ones_colum))
        # convert pixel coordinates to camera coordinates with depth
        pixel2norm_coord = np.linalg.inv(camera_intrinsic.T)
        norm_coord0 = mkpts0 @ pixel2norm_coord
        norm_coord1 = mkpts1 @ pixel2norm_coord
        points0 = norm_coord0 * depth0.reshape(-1, 1)
        points1 = norm_coord1 * depth1.reshape(-1, 1)

        src_pcd = numpy_to_o3d_pcd(points0)
        tgt_pcd = numpy_to_o3d_pcd(points1)
        T_ransac = registration_ransac_correspondence(src_pcd, tgt_pcd, inlier_threshold=0.2)
        return T_ransac


class TopoMapLocator(TopoMap):
    def __init__(self, path, dist_thre=30.0 ,device=K.utils.get_cuda_or_mps_device_if_available()):
        super().__init__(path)
        self.device = device
        self.matcher = FeatureMatcher(desired_size=(960, 540), device=device)
        self.time_stamp = None
        self.node_now = None
        self.node_last = None
        self.T_in_node = None
        self.database = None
        self.dist_thre = dist_thre
        self.embedding = ImageEmbedding()
        self.embedding_nodes()

    def embedding_nodes(self):
        observations_dict = self.load_observations()
        if len(observations_dict) < 1:
            return
        sorted_observations = sorted(observations_dict.items(), key=lambda x: x[0])
        images = [observations[0][0] for node_idx, observations in sorted_observations]
        torch_images = [self.embedding.preprocess(opencv_to_pillow(image)) for image in images]
        torch_images = torch.cat(torch_images, dim=0)
        self.database = self.embedding(torch_images).contiguous()

    def get_location(self):
        return self.time_stamp, self.node_now, self.T_in_node

    def locate(self, observations, camera_intrinsics, this_pose, time_stamp, debug=False):
        self.time_stamp = time_stamp
        self.node_now = self.locate_node(observations, this_pose)
        return self.time_stamp, self.node_now, self.locate_in_node(observations, camera_intrinsics, debug=debug)

    def locate_in_node(self, observations, camera_intrinsics, debug=False):
        color_img0 = observations[0][0]
        depth_img0 = observations[0][1]
        camera_intrinsic = camera_intrinsics[0]
        frame_idx = self.node(self.node_now)['frame_idx']
        history_observations = self.get_frame(frame_idx)

        color_img1 = history_observations[0][0]
        depth_img1 = history_observations[0][1]

        if (color_img0.shape[:2] != depth_img0.shape[:2] or
            color_img1.shape[:2] != depth_img1.shape[:2] or
            color_img0.shape[:2] != color_img1.shape[:2]):
            raise ValueError("Images and depth images should have the same size")

        start_time = time.time()
        T_coarse = self.matcher.registration_global((color_img0, depth_img0), (color_img1, depth_img1), camera_intrinsic)
        if T_coarse is None:
            return None

        pcd0 = observations[0][2]
        pcd1 = history_observations[0][2]

        T_refine = registration_icp(pcd0, pcd1, threshold=0.1, tf_init=T_coarse, method='colored_icp', loss='Cauchy')
        self.T_in_node = T_refine
        end_time = time.time()
        logger.info(f"locate in node consumes time: {end_time - start_time:.3f}s")

        if debug:
            # visually verify registration result
            draw_geometries([pcd0, pcd1])
            pcd0.transform(T_refine)
            draw_geometries([pcd0, pcd1])
            logger.info(f"registration pose: {T_refine}")
        return T_refine

    def locate_node(self, observations, this_pose):
        logger.info(f"number of nodes: {self.number_of_nodes()}")
        embedding_vector = self.embedding(opencv_to_pillow(observations[0][0])).contiguous()
        if self.database is None:
            self.database = embedding_vector
            self.save_observation(observations)
            self.grow_graph(self.node_now, self.node_last, this_pose)

            return self.node_num

        distances, indices = torch_knn(embedding_vector, self.database, k=min(self.database.shape[0], 3), metric='L2')
        logger.info(distances)
        logger.info(indices)

        self.node_last = self.node_now
        if distances[0, 0] > self.dist_thre:
            self.node_now = self.node_num
            self.node_num += 1
            self.database = torch.cat((self.database, embedding_vector), dim=0)
            self.save_observation(observations)
        else:
            self.node_now = indices[0, 0].item()

        if self.node_now != self.node_last:
            self.grow_graph(self.node_now, self.node_last, this_pose)

        return self.node_now
