import time
import PIL
import numpy as np
import matplotlib
import matplotlib.pyplot as plt1
import cv2
import open3d as o3d
import torch
import torch.nn.functional as F
import kornia as K
import kornia.feature as KF
from kornia_moons.viz import draw_LAF_matches
import clip
from semantic_nav.log_utils import get_logger
logger = get_logger()


def open3d_to_kornia(o3d_image):
    """
    Convert an image from Open3D format to Kornia format.

    Args:
        o3d_image (open3d.t.geometry.Image): The input image in Open3D tensor format.

    Returns:
        torch.Tensor: The image in Kornia format (CHW).
    """
    # Convert the Open3D tensor to a PyTorch tensor using DLPack
    tensor_image = torch.utils.dlpack.from_dlpack(o3d_image.as_tensor().to_dlpack())
    # Permute the dimensions to match Kornia format (CHW)
    return tensor_image.permute(2, 0, 1)


def opencv_to_kornia(image):
    """
    Convert an image from OpenCV format to Kornia format.
    
    Args:
        image (np.ndarray): The input image in OpenCV format (BGR, HWC).
    
    Returns:
        torch.Tensor: The image converted to Kornia format (RGB, CHW, float32, normalized, with batch dimension).
    """
    image_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB).astype(np.float32)
    # Convert to float32 and normalize
    image_float = image_rgb.astype(np.float32) / 255.0
    # Transpose from HWC to CHW format
    image_tensor = K.image_to_tensor(image_float).unsqueeze(0)
    return image_tensor


def resize_image(image, width, height):
    if isinstance(image, np.ndarray):
        # use cv2.INTER_LANCZOS4 to get a better quality interpolation with more computational cost
        resized_image = cv2.resize(image, dsize=(width, height), interpolation=cv2.INTER_NEAREST_EXACT)
    elif isinstance(image, torch.Tensor):
        device = image.device
        resized_image = K.geometry.resize(image.cpu(), size=(height, width), interpolation='bilinear', antialias=True)
        resized_image = resized_image.to(device)
    else:
        raise ValueError("Unsupported image type: {}".format(type(image)))
    return resized_image


def feature_matching(matcher, img1, img2, conf_thre=0.7, max_num=500):
    input_dict = {
        "image0": K.color.rgb_to_grayscale(img1),  # LofTR works on grayscale images only
        "image1": K.color.rgb_to_grayscale(img2),
    }
    t1 = time.time()
    with torch.inference_mode():
        correspondences = matcher(input_dict)

    t2 = time.time()
    logger.info(f"Feature matching inference time: {t2 - t1:.3f}s")
    mkpts0 = correspondences["keypoints0"]
    mkpts1 = correspondences["keypoints1"]
    mconf = correspondences["confidence"]
    mask = mconf > conf_thre
    mkpts0 = mkpts0[mask]
    mkpts1 = mkpts1[mask]

    if mkpts0.shape[0] > max_num:
        mconf = mconf[mask]
        _, indicies = torch.topk(mconf, k=max_num, dim=-1, largest=True, sorted=False)
        mkpts0 = mkpts0[indicies, :]
        mkpts1 = mkpts1[indicies, :]

    return mkpts0, mkpts1


def display_matches(img1, img2, mkpts0, mkpts1, inliers=None):
    matplotlib.use('TkAgg')
    if inliers is None:
        inliers = np.ones((mkpts0.shape[0], 1))
    mkpts0 = mkpts0.cpu()
    mkpts1 = mkpts1.cpu()

    draw_LAF_matches(
        KF.laf_from_center_scale_ori(
            mkpts0.view(1, -1, 2),
            torch.ones(mkpts0.shape[0]).view(1, -1, 1, 1),
            torch.ones(mkpts0.shape[0]).view(1, -1, 1),
        ),
        KF.laf_from_center_scale_ori(
            mkpts1.view(1, -1, 2),
            torch.ones(mkpts1.shape[0]).view(1, -1, 1, 1),
            torch.ones(mkpts1.shape[0]).view(1, -1, 1),
        ),
        torch.arange(mkpts0.shape[0]).view(-1, 1).repeat(1, 2),
        K.tensor_to_image(img1),
        K.tensor_to_image(img2),
        inlier_mask=inliers,
        draw_dict={"inlier_color": (0.2, 1, 0.2), "tentative_color": None, "feature_color": (0.2, 0.5, 1), "vertical": False},
    )
    plt.show()


def distance_L2(xq, xb):
    norms_xq = (xq ** 2).sum(axis=1)
    norms_xb = (xb ** 2).sum(axis=1)
    distances = norms_xq.reshape(-1, 1) + norms_xb -2 * xq @ xb.T
    return distances


def distance_IP(xq, xb):
    distances = xq @ xb.T
    return distances


def torch_knn(xq, xb, k, metric='L2'):
    """
    Find the k nearest neighbors of xq in xb using torch.

    Args:
        xq (torch.Tensor): The query points (N, D).
        xb (torch.Tensor): The database points (M, D).
        k (int): The number of neighbors to find.
        metric (str): The distance metric to use, either L2 or IP(inner product).

    Returns:
        torch.Tensor: The indices of the k nearest neighbors in xb for each point in xq.
    """
    metric_function = {'L2': distance_L2, 'IP': distance_IP}
    descend_flag = {'L2': False, 'IP': True}
    with torch.no_grad():
        distances = metric_function[metric](xq, xb)
        return torch.topk(distances, k, dim=-1, largest=descend_flag[metric], sorted=True)


class ImageEmbedding:
    def __init__(self, device=K.utils.get_cuda_or_mps_device_if_available()):
        self.device = device
        self.model, self.preprocessor = clip.load("ViT-L/14@336px", device=self.device)
        self.model.eval()

    def preprocess(self, img_pil):
        return self.preprocessor(img_pil).unsqueeze(0).to(self.device)

    def __call__(self, img):
        if isinstance(img, PIL.Image.Image):
            image = self.preprocess(img)
        elif isinstance(img, torch.Tensor):
            image = img.to(self.device)
        with torch.no_grad():
            image_features = self.model.encode_image(image)
            # image_features = F.normalize(image_features, p=2, dim=1)
        return image_features
