File size: 3,153 Bytes
39dde7a
 
 
 
 
 
 
 
 
 
1787e0d
 
 
39dde7a
1787e0d
39dde7a
 
1787e0d
3c8220a
 
39dde7a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1a70cbc
33aefc9
39dde7a
 
 
 
11fba97
255c510
e35b8e7
39dde7a
ac2ee0e
39dde7a
 
 
 
 
 
 
ac2ee0e
e35b8e7
 
39dde7a
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
import matplotlib.pyplot as plt
import cv2
import kornia as K
import kornia.feature as KF
import numpy as np
import torch
from kornia_moons.feature import *
import gradio as gr

def load_torch_image(fname):
    img: Tensor = K.io.load_image(fname, K.io.ImageLoadType.RGB32)
    img = img[None]  # 1xCxHxW / fp32 / [0, 1]
    img = K.geometry.resize(img, (700, 700))
    return img
 
def inference(file1,file2):
    fname1 = file1.name
    fname2 = file2.name   
    img1 = load_torch_image(fname1)
    img2 = load_torch_image(fname2)

    matcher = KF.LoFTR(pretrained='outdoor')

    input_dict = {"image0": K.color.rgb_to_grayscale(img1), # LofTR works on grayscale images only 
                "image1": K.color.rgb_to_grayscale(img2)}

    with torch.no_grad():
        correspondences = matcher(input_dict)
    mkpts0 = correspondences['keypoints0'].cpu().numpy()
    mkpts1 = correspondences['keypoints1'].cpu().numpy()
    H, inliers = cv2.findFundamentalMat(mkpts0, mkpts1, cv2.USAC_MAGSAC, 0.5, 0.999, 100000)
    inliers = inliers > 0
    fig, ax = plt.subplots()

    draw_LAF_matches(
        KF.laf_from_center_scale_ori(torch.from_numpy(mkpts0).view(1,-1, 2),
                                    torch.ones(mkpts0.shape[0]).view(1,-1, 1, 1),
                                    torch.ones(mkpts0.shape[0]).view(1,-1, 1)),

        KF.laf_from_center_scale_ori(torch.from_numpy(mkpts1).view(1,-1, 2),
                                    torch.ones(mkpts1.shape[0]).view(1,-1, 1, 1),
                                    torch.ones(mkpts1.shape[0]).view(1,-1, 1)),
        torch.arange(mkpts0.shape[0]).view(-1,1).repeat(1,2),
        K.tensor_to_image(img1),
        K.tensor_to_image(img2),
        inliers,
        draw_dict={'inlier_color': (0.2, 1, 0.2),
                'tentative_color': None, 
                'feature_color': (0.2, 0.5, 1), 'vertical': False}, ax=ax)
    plt.axis('off')
    fig.savefig('example.jpg',dpi=110,bbox_inches='tight')
    return 'example.jpg'


title = "Kornia-Loftr"
description = "Gradio demo for Kornia-Loftr: Detector-Free Local Feature Matching with Transformers. To use it, simply upload your image, or click one of the examples to load them. Read more at the links below."
article = "<p style='text-align: center'><a href='https://kornia.readthedocs.io/en/latest/' target='_blank'>Open Source Differentiable Computer Vision Library</a> | <a href='https://github.com/kornia/kornia' target='_blank'>Kornia Github Repo</a> | <a href='https://github.com/zju3dv/LoFTR' target='_blank'>LoFTR Github</a> | <a href='https://arxiv.org/abs/2104.00680' target='_blank'>LoFTR: Detector-Free Local Feature Matching with Transformers</a></p>"
css = ".output_image, .input_image {height: 40rem !important; width: 100% !important;}"

examples = [['kn_church-2.jpg','kn_church-8.jpg']]
gr.Interface(
    inference, 
    [gr.inputs.Image(type="file", label="Input1"),gr.inputs.Image(type="file", label="Input2")], 
    gr.outputs.Image(type="file", label="Output"),
    title=title,
    description=description,
    article=article,
    enable_queue=True,
    examples=examples,
    css=css
    ).launch(debug=True)