import matplotlib.pyplot as plt import cv2 import kornia as K import kornia.feature as KF import numpy as np import torch from kornia_moons.feature import * from kornia_moons.viz import * import gradio as gr def load_torch_image(img): if isinstance(img, np.ndarray): # If the input is already a numpy array, convert it to a tensor img_tensor = K.image_to_tensor(img).float() / 255.0 else: # If it's a file path, load it using kornia img_tensor = K.io.load_image(img, K.io.ImageLoadType.RGB32) img_tensor = img_tensor.unsqueeze(0) # Add batch dimension: 1xCxHxW img_tensor = K.geometry.resize(img_tensor, (700, 700)) return img_tensor def inference(img1, img2): img1_tensor = load_torch_image(img1) img2_tensor = load_torch_image(img2) matcher = KF.LoFTR(pretrained='outdoor') input_dict = { "image0": K.color.rgb_to_grayscale(img1_tensor), # LoFTR works on grayscale images only "image1": K.color.rgb_to_grayscale(img2_tensor) } with torch.no_grad(): correspondences = matcher(input_dict) mkpts0 = correspondences['keypoints0'].cpu().numpy() mkpts1 = correspondences['keypoints1'].cpu().numpy() H, inliers = cv2.findFundamentalMat(mkpts0, mkpts1, cv2.USAC_MAGSAC, 0.5, 0.999, 100000) inliers = inliers > 0 fig, ax = plt.subplots() draw_LAF_matches( KF.laf_from_center_scale_ori(torch.from_numpy(mkpts0).view(1,-1, 2), torch.ones(mkpts0.shape[0]).view(1,-1, 1, 1), torch.ones(mkpts0.shape[0]).view(1,-1, 1)), KF.laf_from_center_scale_ori(torch.from_numpy(mkpts1).view(1,-1, 2), torch.ones(mkpts1.shape[0]).view(1,-1, 1, 1), torch.ones(mkpts1.shape[0]).view(1,-1, 1)), torch.arange(mkpts0.shape[0]).view(-1,1).repeat(1,2), K.tensor_to_image(img1_tensor.squeeze()), K.tensor_to_image(img2_tensor.squeeze()), inliers, draw_dict={'inlier_color': (0.2, 1, 0.2), 'tentative_color': None, 'feature_color': (0.2, 0.5, 1), 'vertical': False}, ax=ax ) plt.axis('off') return fig title = "Kornia-Loftr" description = "Gradio demo for Kornia-Loftr: Detector-Free Local Feature Matching with Transformers. To use it, simply upload your image, or click one of the examples to load them. Read more at the links below." article = "

Open Source Differentiable Computer Vision Library | Kornia Github Repo | LoFTR Github | LoFTR: Detector-Free Local Feature Matching with Transformers

" css = ".output_image, .input_image {height: 40rem !important; width: 100% !important;}" examples = [['kn_church-2.jpg','kn_church-8.jpg']] iface = gr.Interface( inference, [ gr.Image(type="numpy", label="Input1"), gr.Image(type="numpy", label="Input2")], gr.Plot(label="Feature Matches"), title=title, description=description, article=article, examples=examples, css=css ) iface.launch(debug=True)