NivedPadikkal commited on
Commit
594b2f7
·
verified ·
1 Parent(s): 66912be

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +79 -91
app.py CHANGED
@@ -1,19 +1,15 @@
1
  import numpy as np
2
  import cv2 as cv
3
  import gradio as gr
4
- import base64
5
- from PIL import Image
6
- import io
7
 
8
- def match_features(target_img, comparison_gallery):
9
- # Initialize list to store results and match counts
10
  result_images = []
11
  match_counts = []
12
 
13
- # Skip processing if no target image
14
- if target_img is None:
15
- return [], []
16
-
17
  # Convert target image to grayscale OpenCV format
18
  target_cv = np.array(target_img.convert("L"))
19
 
@@ -28,8 +24,14 @@ def match_features(target_img, comparison_gallery):
28
  flann = cv.FlannBasedMatcher(index_params, search_params)
29
 
30
  # Process each comparison image
31
- for img in comparison_gallery:
 
 
 
 
32
  if img is None:
 
 
33
  continue
34
 
35
  # Convert to grayscale OpenCV format
@@ -40,6 +42,9 @@ def match_features(target_img, comparison_gallery):
40
 
41
  # Skip if no features detected
42
  if des_img is None or des_target is None:
 
 
 
43
  continue
44
 
45
  try:
@@ -91,102 +96,85 @@ def match_features(target_img, comparison_gallery):
91
  cv.rectangle(result_img, (x-5, y-text_height-5), (x+text_width+5, y+5), (0,0,0), -1)
92
  cv.putText(result_img, text, (x, y), font, font_scale, font_color, thickness)
93
 
94
- # Convert to PIL image for gallery
95
- pil_img = Image.fromarray(result_img)
96
- result_images.append((pil_img, f"Matches: {match_count}"))
97
-
98
  except Exception as e:
99
- print(f"Error processing image: {str(e)}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
100
 
101
- # Sort results by match counts (descending)
102
- sorted_results = sorted(zip(result_images, match_counts), key=lambda x: x[1], reverse=True)
103
 
104
- # Unzip the sorted results
105
- if sorted_results:
106
- sorted_images, sorted_counts = zip(*sorted_results)
107
- else:
108
- sorted_images, sorted_counts = [], []
109
 
110
- return sorted_images, list(sorted_counts)
111
-
112
- def format_match_counts(counts):
113
- if not counts:
114
- return "No matches found"
115
 
116
- formatted = ["### Match Counts (sorted by highest match)"]
117
- for i, count in enumerate(counts):
118
- formatted.append(f"Image {i+1}: **{count}** matches")
119
 
120
- return "\n".join(formatted)
121
-
122
- # Create Gradio interface with gallery components
123
- with gr.Blocks(title="Image Feature Matching with Gallery") as iface:
124
- gr.Markdown("# Image Feature Matching with SIFT+FLANN")
125
- gr.Markdown("""
126
- Upload a target image and multiple comparison images to find feature matches using
127
- SIFT (Scale-Invariant Feature Transform) with FLANN (Fast Library for Approximate Nearest Neighbors).
128
 
129
- The results will be sorted by the number of matches (highest first), and each result will display the match count.
130
- """)
131
 
132
  with gr.Row():
133
- with gr.Column(scale=1):
134
- target_input = gr.Image(type="pil", label="Target Image")
135
- gr.Markdown("### Upload a single target image")
136
-
137
- with gr.Column(scale=2):
138
- comparison_gallery = gr.Gallery(
139
- label="Comparison Images",
140
- object_fit="contain",
141
- columns=3,
142
- rows=2,
143
- height="auto",
144
- allow_preview=False,
145
- show_download_button=False
146
- )
147
- upload_button = gr.UploadButton("Click to Upload Images", file_types=["image"], file_count="multiple")
148
- gr.Markdown("### Upload as many comparison images as you want")
149
-
150
- compare_btn = gr.Button("Compare Images", variant="primary")
151
 
152
  with gr.Row():
153
- with gr.Column(scale=2):
154
- results_gallery = gr.Gallery(
155
- label="Results (Sorted by Match Count)",
156
- object_fit="contain",
157
- columns=2,
158
- rows=2,
159
- height="auto",
160
- allow_preview=True,
161
- show_download_button=True,
162
- show_label=True
163
- )
164
- with gr.Column(scale=1):
165
- match_counts_output = gr.Markdown(label="Match Counts")
166
-
167
- # Handle uploading multiple images to the gallery
168
- upload_button.upload(
169
- lambda files: [file for file in files],
170
- upload_button,
171
- comparison_gallery
172
- )
173
 
174
- # Handle comparison button click
175
  compare_btn.click(
176
  fn=match_features,
177
- inputs=[target_input, comparison_gallery],
178
- outputs=[results_gallery, match_counts_output]
 
 
 
 
 
 
 
 
179
  )
180
-
181
- gr.Markdown("""
182
- ### Instructions
183
-
184
- 1. Upload a target image on the left side
185
- 2. Upload multiple comparison images using the upload button (you can select multiple files at once)
186
- 3. Click "Compare Images" to find feature matches
187
- 4. Results will be sorted by the number of matches (highest first)
188
- 5. Each result image shows the matches between target and comparison image
189
- """)
190
 
191
  # Launch the interface
192
  iface.launch()
 
1
  import numpy as np
2
  import cv2 as cv
3
  import gradio as gr
 
 
 
4
 
5
+ def match_features(target_img, comp_img1, comp_img2, comp_img3, comp_img4, comp_img5, comp_img6, comp_img7, comp_img8):
6
+ # Initialize list to store results
7
  result_images = []
8
  match_counts = []
9
 
10
+ # List of comparison images
11
+ comparison_imgs = [comp_img1, comp_img2, comp_img3, comp_img4, comp_img5, comp_img6, comp_img7, comp_img8]
12
+
 
13
  # Convert target image to grayscale OpenCV format
14
  target_cv = np.array(target_img.convert("L"))
15
 
 
24
  flann = cv.FlannBasedMatcher(index_params, search_params)
25
 
26
  # Process each comparison image
27
+ for img in comparison_imgs:
28
+ # Create a default blank image with "No image" text
29
+ blank_img = np.zeros((400, 800, 3), dtype=np.uint8)
30
+ cv.putText(blank_img, "No image provided", (250, 200), cv.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2)
31
+
32
  if img is None:
33
+ result_images.append(blank_img)
34
+ match_counts.append(0)
35
  continue
36
 
37
  # Convert to grayscale OpenCV format
 
42
 
43
  # Skip if no features detected
44
  if des_img is None or des_target is None:
45
+ cv.putText(blank_img, "No features detected", (250, 200), cv.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2)
46
+ result_images.append(blank_img)
47
+ match_counts.append(0)
48
  continue
49
 
50
  try:
 
96
  cv.rectangle(result_img, (x-5, y-text_height-5), (x+text_width+5, y+5), (0,0,0), -1)
97
  cv.putText(result_img, text, (x, y), font, font_scale, font_color, thickness)
98
 
99
+ result_images.append(result_img)
 
 
 
100
  except Exception as e:
101
+ # Handle any errors
102
+ error_img = np.zeros((400, 800, 3), dtype=np.uint8)
103
+ cv.putText(error_img, f"Error: {str(e)}", (50, 200), cv.FONT_HERSHEY_SIMPLEX, 0.7, (255, 255, 255), 1)
104
+ result_images.append(error_img)
105
+ match_counts.append(0)
106
+
107
+ # Ensure we have 8 results
108
+ while len(result_images) < 8:
109
+ blank_img = np.zeros((400, 800, 3), dtype=np.uint8)
110
+ cv.putText(blank_img, "No image provided", (250, 200), cv.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2)
111
+ result_images.append(blank_img)
112
+ match_counts.append(0)
113
+
114
+ # Return all match results and counts
115
+ return result_images + [match_counts]
116
+
117
+ # Create Gradio interface with 1 target and up to 8 comparison images
118
+ with gr.Blocks(title="Image Feature Matching Comparison") as iface:
119
+ gr.Markdown("# Image Feature Matching with SIFT+FLANN")
120
+ gr.Markdown("""
121
+ Upload a target image and up to 8 comparison images to find feature matches using
122
+ SIFT (Scale-Invariant Feature Transform) with FLANN (Fast Library for Approximate Nearest Neighbors).
123
+ The number of matches will be displayed for each comparison.
124
+ """)
125
 
126
+ with gr.Row():
127
+ target_input = gr.Image(type="pil", label="Target Image")
128
 
129
+ with gr.Row():
130
+ comp_img1 = gr.Image(type="pil", label="Comparison Image 1")
131
+ comp_img2 = gr.Image(type="pil", label="Comparison Image 2")
 
 
132
 
133
+ with gr.Row():
134
+ comp_img3 = gr.Image(type="pil", label="Comparison Image 3")
135
+ comp_img4 = gr.Image(type="pil", label="Comparison Image 4")
 
 
136
 
137
+ with gr.Row():
138
+ comp_img5 = gr.Image(type="pil", label="Comparison Image 5")
139
+ comp_img6 = gr.Image(type="pil", label="Comparison Image 6")
140
 
141
+ with gr.Row():
142
+ comp_img7 = gr.Image(type="pil", label="Comparison Image 7")
143
+ comp_img8 = gr.Image(type="pil", label="Comparison Image 8")
 
 
 
 
 
144
 
145
+ compare_btn = gr.Button("Compare Images")
 
146
 
147
  with gr.Row():
148
+ result1 = gr.Image(label="Result 1")
149
+ result2 = gr.Image(label="Result 2")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
150
 
151
  with gr.Row():
152
+ result3 = gr.Image(label="Result 3")
153
+ result4 = gr.Image(label="Result 4")
154
+
155
+ with gr.Row():
156
+ result5 = gr.Image(label="Result 5")
157
+ result6 = gr.Image(label="Result 6")
158
+
159
+ with gr.Row():
160
+ result7 = gr.Image(label="Result 7")
161
+ result8 = gr.Image(label="Result 8")
162
+
163
+ match_counts_output = gr.JSON(label="Match Counts")
 
 
 
 
 
 
 
 
164
 
 
165
  compare_btn.click(
166
  fn=match_features,
167
+ inputs=[
168
+ target_input,
169
+ comp_img1, comp_img2, comp_img3, comp_img4,
170
+ comp_img5, comp_img6, comp_img7, comp_img8
171
+ ],
172
+ outputs=[
173
+ result1, result2, result3, result4,
174
+ result5, result6, result7, result8,
175
+ match_counts_output
176
+ ]
177
  )
 
 
 
 
 
 
 
 
 
 
178
 
179
  # Launch the interface
180
  iface.launch()