Spaces:
Running
on
CPU Upgrade
Running
on
CPU Upgrade
File size: 3,203 Bytes
29421eb |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 |
import gradio as gr
import kornia as K
import kornia.feature as KF
import torch
import matplotlib
matplotlib.use('Agg')
import numpy as np
from plot_utils import plot_images, plot_lines, plot_color_line_matches
sold2 = KF.SOLD2(pretrained=True, config=None)
ransac = K.geometry.RANSAC(model_type="homography", inl_th=3.0)
def infer(img1, img2, line_style: str):
torch_img1 = K.image_to_tensor(img1).float() / 255.0
torch_img2 = K.image_to_tensor(img2).float() / 255.0
torch_img1_gray = K.color.rgb_to_grayscale(torch_img1)
torch_img2_gray = K.color.rgb_to_grayscale(torch_img2)
imgs = torch.stack([torch_img1_gray, torch_img2_gray],)
with torch.inference_mode():
outputs = sold2(imgs)
line_seg1 = outputs["line_segments"][0]
line_seg2 = outputs["line_segments"][1]
desc1 = outputs["dense_desc"][0]
desc2 = outputs["dense_desc"][1]
with torch.inference_mode():
matches = sold2.match(line_seg1, line_seg2, desc1[None], desc2[None])
valid_matches = matches != -1
match_indices = matches[valid_matches]
matched_lines1 = line_seg1[valid_matches]
matched_lines2 = line_seg2[match_indices]
imgs_to_plot = [K.tensor_to_image(torch_img1), K.tensor_to_image(torch_img2)]
fig = plot_images(imgs_to_plot, ["Image 1 - detected lines", "Image 2 - detected lines"])
if line_style == "Line Matches":
lines_to_plot = [line_seg1.numpy(), line_seg2.numpy()]
plot_lines(lines_to_plot, fig, ps=3, lw=2, indices={0, 1})
elif line_style == "Color Line Matches":
plot_color_line_matches([matched_lines1, matched_lines2], fig, lw=2)
# elif line_style == "Line Segment Homography Warping":
# H_ransac, correspondence_mask = ransac(
# matched_lines1.flip(dims=(2,)), matched_lines2.flip(dims=(2,))
# )
# img1_warp_to2 = K.geometry.warp_perspective(
# torch_img1[None], H_ransac[None], (torch_img1.shape[1:])
# )
# fig = plot_images(
# [K.tensor_to_image(torch_img2), K.tensor_to_image(img1_warp_to2)],
# ["Image 2", "Image 1 wrapped to 2"],
# )
return fig
description = """Line Segment Matching with Kornia
In this space you can try out Line Segment Matching with the Kornia library as seen in [this tutorial](https://kornia-tutorials.readthedocs.io/en/latest/line_detection_and_matching_sold2.html).
Just upload two images of a scene with different view points, choose an option for output and run the demo.
"""
Iface = gr.Interface(
fn=infer,
inputs=[gr.components.Image(),
gr.components.Image(),
gr.components.Dropdown(["Line Matches",
"Color Line Matches",
#"Line Segment Homography Warping",
],
value="Line Matches",
label="Options"),
],
outputs=gr.components.Plot(),
examples=[["terrace0.JPG", "terrace1.JPG"]],
title="Line Segment Matching with Kornia",
description=description,
).launch() |