from LightGlue.lightglue.utils import match_pair
from LightGlue.lightglue import LightGlue, SuperPoint, DISK, SIFT, ALIKED #DoGHardNet
from LightGlue.lightglue.utils import load_image, rbd# https://github.com/cvg/LightGlue
import numpy as np
import cv2
import torch

# DISK+LightGlue, ALIKED+LightGlue or SIFT+LightGlue,SuperPoint+LightGlue
# load the extractor
extractor = ALIKED(max_num_keypoints=2048).eval().cuda()
matcher = LightGlue(features='aliked').eval().cuda()

def getkp(img):
    image0 = load_image(img).cuda()
    feats = extractor.extract(image0)
    kp= feats['keypoints'].cpu().numpy()
    # feats=feats['descriptors'].cpu().numpy()#
    return kp, feats

def sift_points(kp0,feats0,kp1,feats1):
    matches01 = matcher({'image0': feats0, 'image1': feats1})
    feats0, feats1, matches01 = [rbd(x) for x in [feats0, feats1, matches01]]  # remove batch dimension
    matches = matches01['matches']
    points0 = feats0['keypoints'][matches[..., 0]]  # coordinates in image #0, shape (K,2)
    points1 = feats1['keypoints'][matches[..., 1]]
    good00=points0.cpu().numpy()
    good01 = points1.cpu().numpy()
    good00 = good00.reshape(-1,2)
    good01 = good01.reshape(-1,2)
    return good00, good01
#
def light_in(box,good00, good01):
    x_min, y_min = box[0, :2]
    x_max, y_max = box[3, :2]
    # Extract x and y coordinates of keypoints
    # kp_coordinates = np.array([good00[i].pt for i in range(len(good00))])
    x_coords, y_coords =good00[:, 0], good00[:, 1]

    # Find the indices of points that are inside the bounding box
    in_box_indices = np.where(
        (x_min <= x_coords) & (x_coords <= x_max) &
        (y_min <= y_coords) & (y_coords <= y_max)
    )[0]
    # Extract keypoints and descriptors using the indices
    kp00_in = good00[in_box_indices]
    kp01_in = good01[in_box_indices]
    return kp00_in,kp01_in

def feature_inbox(box,kp,feats):
    a=box
    kp_in=kp
    des_in=feats
    return kp_in, des_in
