Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -3,83 +3,87 @@ import cv2 as cv
|
|
3 |
import gradio as gr
|
4 |
|
5 |
def match_features(img1, img2):
|
6 |
-
|
7 |
-
|
8 |
-
|
9 |
-
img2 = np.array(img2.convert("L"))
|
10 |
|
11 |
-
|
12 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
13 |
|
14 |
-
|
15 |
-
FLANN_INDEX_KDTREE = 1
|
16 |
-
index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
|
17 |
-
search_params = dict(checks=50)
|
18 |
-
flann = cv.FlannBasedMatcher(index_params, search_params)
|
19 |
-
kp1, des1 = cv.SIFT_create().detectAndCompute(img1, None)
|
20 |
-
kp2, des2 = cv.SIFT_create().detectAndCompute(img2, None)
|
21 |
-
|
22 |
-
matches = flann.knnMatch(des1, des2, k=2)
|
23 |
-
matchesMask = [[0, 0] for i in range(len(matches))]
|
24 |
-
good_matches = []
|
25 |
-
for i, (m, n) in enumerate(matches):
|
26 |
-
if m.distance < 0.7 * n.distance:
|
27 |
-
matchesMask[i] = [1, 0]
|
28 |
-
good_matches.append(m)
|
29 |
-
|
30 |
-
sift_flann_matches_count = len(good_matches)
|
31 |
-
|
32 |
-
# Draw matches
|
33 |
-
draw_params = dict(matchColor=(0, 255, 0),
|
34 |
-
singlePointColor=(255, 0, 0),
|
35 |
-
matchesMask=matchesMask,
|
36 |
-
flags=cv.DrawMatchesFlags_DEFAULT)
|
37 |
-
img3_sift_flann = cv.drawMatchesKnn(img1, kp1, img2, kp2, matches, None, **draw_params)
|
38 |
-
|
39 |
-
# Convert image to RGB format for display in Gradio
|
40 |
-
img3_sift_flann = cv.cvtColor(img3_sift_flann, cv.COLOR_BGR2RGB)
|
41 |
-
|
42 |
-
# Add text with match count
|
43 |
-
font = cv.FONT_HERSHEY_SIMPLEX
|
44 |
-
font_scale = 1
|
45 |
-
font_color = (255, 255, 255) # White color
|
46 |
-
thickness = 2
|
47 |
-
|
48 |
-
# Add count text to the image
|
49 |
-
h, w = img3_sift_flann.shape[:2]
|
50 |
-
text = f"SIFT-FLANN Matches: {sift_flann_matches_count}"
|
51 |
-
# Get text size
|
52 |
-
(text_width, text_height), _ = cv.getTextSize(text, font, font_scale, thickness)
|
53 |
-
# Position text at bottom center
|
54 |
-
x = (w - text_width) // 2
|
55 |
-
y = h - 20 # 20 pixels from bottom
|
56 |
-
# Add black background for better visibility
|
57 |
-
cv.rectangle(img3_sift_flann, (x-5, y-text_height-5), (x+text_width+5, y+5), (0, 0, 0), -1)
|
58 |
-
# Add text
|
59 |
-
cv.putText(img3_sift_flann, text, (x, y), font, font_scale, font_color, thickness)
|
60 |
-
|
61 |
-
return img3_sift_flann
|
62 |
-
|
63 |
-
except Exception as e:
|
64 |
-
# Return the error message if something goes wrong
|
65 |
-
return None, f"Error occurred: {str(e)}"
|
66 |
-
|
67 |
-
# Gradio interface with no example images
|
68 |
iface = gr.Interface(
|
69 |
fn=match_features,
|
70 |
inputs=[
|
71 |
gr.Image(type="pil", label="Image 1"),
|
72 |
gr.Image(type="pil", label="Image 2")
|
73 |
],
|
74 |
-
outputs=
|
75 |
-
|
76 |
-
gr.Textbox(label="Error Log", placeholder="Error details will appear here", interactive=False) # Error message box
|
77 |
-
],
|
78 |
-
title="SIFT-FLANN Image Feature Matching",
|
79 |
description="""
|
80 |
-
Upload two images of the same subject taken from different angles to find
|
81 |
-
|
82 |
-
|
|
|
83 |
)
|
84 |
|
|
|
85 |
iface.launch()
|
|
|
3 |
import gradio as gr
|
4 |
|
5 |
def match_features(img1, img2):
|
6 |
+
# Convert Gradio image inputs (PIL) to OpenCV format (numpy array)
|
7 |
+
img1 = np.array(img1.convert("L"))
|
8 |
+
img2 = np.array(img2.convert("L"))
|
|
|
9 |
|
10 |
+
# SIFT with FLANN
|
11 |
+
sift = cv.SIFT_create()
|
12 |
+
kp1, des1 = sift.detectAndCompute(img1, None)
|
13 |
+
kp2, des2 = sift.detectAndCompute(img2, None)
|
14 |
+
|
15 |
+
# Initialize FLANN
|
16 |
+
FLANN_INDEX_KDTREE = 1
|
17 |
+
index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
|
18 |
+
search_params = dict(checks=50)
|
19 |
+
flann = cv.FlannBasedMatcher(index_params, search_params)
|
20 |
+
|
21 |
+
# Find matches
|
22 |
+
matches = flann.knnMatch(des1, des2, k=2)
|
23 |
+
|
24 |
+
# Apply ratio test
|
25 |
+
matchesMask = [[0, 0] for i in range(len(matches))]
|
26 |
+
good_matches = []
|
27 |
+
for i, (m, n) in enumerate(matches):
|
28 |
+
if m.distance < 0.7 * n.distance:
|
29 |
+
matchesMask[i] = [1, 0]
|
30 |
+
good_matches.append(m)
|
31 |
+
|
32 |
+
# Calculate match count
|
33 |
+
sift_flann_matches_count = len(good_matches)
|
34 |
+
|
35 |
+
# Draw matches
|
36 |
+
draw_params = dict(
|
37 |
+
matchColor=(0, 255, 0),
|
38 |
+
singlePointColor=(255, 0, 0),
|
39 |
+
matchesMask=matchesMask,
|
40 |
+
flags=cv.DrawMatchesFlags_DEFAULT
|
41 |
+
)
|
42 |
+
img3_sift_flann = cv.drawMatchesKnn(img1, kp1, img2, kp2, matches, None, **draw_params)
|
43 |
+
|
44 |
+
# Convert image to RGB format for display in Gradio
|
45 |
+
img3_sift_flann = cv.cvtColor(img3_sift_flann, cv.COLOR_BGR2RGB)
|
46 |
+
|
47 |
+
# Add text with match count to the image
|
48 |
+
font = cv.FONT_HERSHEY_SIMPLEX
|
49 |
+
font_scale = 1
|
50 |
+
font_color = (255, 255, 255) # White color
|
51 |
+
thickness = 2
|
52 |
+
|
53 |
+
# Add count text
|
54 |
+
h, w = img3_sift_flann.shape[:2]
|
55 |
+
text = f"SIFT-FLANN Matches: {sift_flann_matches_count}"
|
56 |
+
|
57 |
+
# Get text size
|
58 |
+
(text_width, text_height), _ = cv.getTextSize(text, font, font_scale, thickness)
|
59 |
+
|
60 |
+
# Position text at bottom center
|
61 |
+
x = (w - text_width) // 2
|
62 |
+
y = h - 20 # 20 pixels from bottom
|
63 |
+
|
64 |
+
# Add black background for better visibility
|
65 |
+
cv.rectangle(img3_sift_flann, (x-5, y-text_height-5), (x+text_width+5, y+5), (0,0,0), -1)
|
66 |
+
|
67 |
+
# Add text
|
68 |
+
cv.putText(img3_sift_flann, text, (x, y), font, font_scale, font_color, thickness)
|
69 |
+
|
70 |
+
return img3_sift_flann
|
71 |
|
72 |
+
# Gradio interface without examples
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
73 |
iface = gr.Interface(
|
74 |
fn=match_features,
|
75 |
inputs=[
|
76 |
gr.Image(type="pil", label="Image 1"),
|
77 |
gr.Image(type="pil", label="Image 2")
|
78 |
],
|
79 |
+
outputs=gr.Image(label="SIFT (FLANN) Matches"),
|
80 |
+
title="Image Feature Matching with SIFT+FLANN",
|
|
|
|
|
|
|
81 |
description="""
|
82 |
+
Upload two images of the same subject taken from different angles to find
|
83 |
+
feature matches using SIFT (Scale-Invariant Feature Transform) with
|
84 |
+
FLANN (Fast Library for Approximate Nearest Neighbors).
|
85 |
+
"""
|
86 |
)
|
87 |
|
88 |
+
# Launch the interface
|
89 |
iface.launch()
|