Spaces:
Running
Running
xuelunshen
commited on
Commit
•
5078caa
1
Parent(s):
1bfbd08
update: ui
Browse files- app.py +16 -19
- common/utils.py +7 -2
- hloc/matchers/gim.py +2 -2
app.py
CHANGED
@@ -17,14 +17,12 @@ from common.utils import (
|
|
17 |
)
|
18 |
|
19 |
DESCRIPTION = """
|
20 |
-
# Image Matching
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
🐛 Your feedback is valuable to me. Please do not hesitate to report any bugs [here](https://github.com/Vincentqyw/image-matching-webui/issues).
|
28 |
"""
|
29 |
|
30 |
|
@@ -97,12 +95,12 @@ def run(config):
|
|
97 |
with gr.Row():
|
98 |
matcher_list = gr.Dropdown(
|
99 |
choices=list(matcher_zoo.keys()),
|
100 |
-
value="
|
101 |
label="Matching Model",
|
102 |
interactive=True,
|
103 |
)
|
104 |
match_image_src = gr.Radio(
|
105 |
-
["upload", "webcam"
|
106 |
label="Image Source",
|
107 |
value="upload",
|
108 |
)
|
@@ -248,22 +246,21 @@ def run(config):
|
|
248 |
output_matches_ransac = gr.Image(
|
249 |
label="Ransac Matches", type="numpy"
|
250 |
)
|
|
|
|
|
|
|
251 |
with gr.Accordion(
|
252 |
"Open for More: Matches Statistics", open=False
|
253 |
):
|
254 |
matches_result_info = gr.JSON(label="Matches Statistics")
|
255 |
matcher_info = gr.JSON(label="Match info")
|
256 |
|
257 |
-
with gr.Accordion(
|
258 |
-
|
259 |
-
|
|
|
|
|
260 |
)
|
261 |
-
with gr.Accordion(
|
262 |
-
"Open for More: Geometry info", open=False
|
263 |
-
):
|
264 |
-
geometry_result = gr.JSON(
|
265 |
-
label="Reconstructed Geometry"
|
266 |
-
)
|
267 |
|
268 |
# callbacks
|
269 |
match_image_src.change(
|
|
|
17 |
)
|
18 |
|
19 |
DESCRIPTION = """
|
20 |
+
# GIM Image Matching
|
21 |
+
<p align="center">
|
22 |
+
<span style="color: gray; font-size: smaller;">
|
23 |
+
Thanks <a href="https://github.com/Vincentqyw/image-matching-webui" style="color: gray;">Vincentqyw/image-matching-webui</a> for the UI framework.
|
24 |
+
</span>
|
25 |
+
</p>
|
|
|
|
|
26 |
"""
|
27 |
|
28 |
|
|
|
95 |
with gr.Row():
|
96 |
matcher_list = gr.Dropdown(
|
97 |
choices=list(matcher_zoo.keys()),
|
98 |
+
value="gim",
|
99 |
label="Matching Model",
|
100 |
interactive=True,
|
101 |
)
|
102 |
match_image_src = gr.Radio(
|
103 |
+
["upload", "webcam"],
|
104 |
label="Image Source",
|
105 |
value="upload",
|
106 |
)
|
|
|
246 |
output_matches_ransac = gr.Image(
|
247 |
label="Ransac Matches", type="numpy"
|
248 |
)
|
249 |
+
output_wrapped = gr.Image(
|
250 |
+
label="Wrapped Pair", type="numpy"
|
251 |
+
)
|
252 |
with gr.Accordion(
|
253 |
"Open for More: Matches Statistics", open=False
|
254 |
):
|
255 |
matches_result_info = gr.JSON(label="Matches Statistics")
|
256 |
matcher_info = gr.JSON(label="Match info")
|
257 |
|
258 |
+
with gr.Accordion(
|
259 |
+
"Open for More: Geometry info", open=False
|
260 |
+
):
|
261 |
+
geometry_result = gr.JSON(
|
262 |
+
label="Reconstructed Geometry"
|
263 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
264 |
|
265 |
# callbacks
|
266 |
match_image_src.change(
|
common/utils.py
CHANGED
@@ -41,14 +41,15 @@ def get_feature_model(conf):
|
|
41 |
def gen_examples():
|
42 |
random.seed(1)
|
43 |
example_matchers = [
|
|
|
|
|
|
|
44 |
"disk+lightglue",
|
45 |
"loftr",
|
46 |
"disk",
|
47 |
"d2net",
|
48 |
-
"topicfm",
|
49 |
"superpoint+superglue",
|
50 |
"disk+dualsoftmax",
|
51 |
-
"lanet",
|
52 |
]
|
53 |
|
54 |
def gen_images_pairs(path: str, count: int = 5):
|
@@ -64,6 +65,10 @@ def gen_examples():
|
|
64 |
# image pair path
|
65 |
path = "datasets/sacre_coeur/mapping"
|
66 |
pairs = gen_images_pairs(path, len(example_matchers))
|
|
|
|
|
|
|
|
|
67 |
match_setting_threshold = DEFAULT_SETTING_THRESHOLD
|
68 |
match_setting_max_features = DEFAULT_SETTING_MAX_FEATURES
|
69 |
detect_keypoints_threshold = DEFAULT_DEFAULT_KEYPOINT_THRESHOLD
|
|
|
41 |
def gen_examples():
|
42 |
random.seed(1)
|
43 |
example_matchers = [
|
44 |
+
"gim",
|
45 |
+
"gim",
|
46 |
+
"gim",
|
47 |
"disk+lightglue",
|
48 |
"loftr",
|
49 |
"disk",
|
50 |
"d2net",
|
|
|
51 |
"superpoint+superglue",
|
52 |
"disk+dualsoftmax",
|
|
|
53 |
]
|
54 |
|
55 |
def gen_images_pairs(path: str, count: int = 5):
|
|
|
65 |
# image pair path
|
66 |
path = "datasets/sacre_coeur/mapping"
|
67 |
pairs = gen_images_pairs(path, len(example_matchers))
|
68 |
+
gim_pairs = [('datasets/gim/0a.png', 'datasets/gim/0b.png'),
|
69 |
+
('datasets/gim/1a.png', 'datasets/gim/1b.png'),
|
70 |
+
('datasets/gim/2a.png', 'datasets/gim/2b.png')]
|
71 |
+
pairs = gim_pairs + pairs
|
72 |
match_setting_threshold = DEFAULT_SETTING_THRESHOLD
|
73 |
match_setting_max_features = DEFAULT_SETTING_MAX_FEATURES
|
74 |
detect_keypoints_threshold = DEFAULT_DEFAULT_KEYPOINT_THRESHOLD
|
hloc/matchers/gim.py
CHANGED
@@ -38,12 +38,12 @@ class GIM(BaseModel):
|
|
38 |
cmd = ["wget", link, "-O", str(model_path)]
|
39 |
logger.info(f"Downloading the DKMv3 model with `{cmd}`.")
|
40 |
subprocess.run(cmd, check=True)
|
41 |
-
logger.info(f"Loading
|
42 |
# self.net = DKMv3(path_to_weights=str(model_path), device=device)
|
43 |
|
44 |
model = DKMv3(None, 672, 896, upsample_preds=True)
|
45 |
|
46 |
-
checkpoints_path =
|
47 |
state_dict = torch.load(checkpoints_path, map_location='cpu')
|
48 |
if 'state_dict' in state_dict.keys(): state_dict = state_dict['state_dict']
|
49 |
for k in list(state_dict.keys()):
|
|
|
38 |
cmd = ["wget", link, "-O", str(model_path)]
|
39 |
logger.info(f"Downloading the DKMv3 model with `{cmd}`.")
|
40 |
subprocess.run(cmd, check=True)
|
41 |
+
logger.info(f"Loading GIM model...")
|
42 |
# self.net = DKMv3(path_to_weights=str(model_path), device=device)
|
43 |
|
44 |
model = DKMv3(None, 672, 896, upsample_preds=True)
|
45 |
|
46 |
+
checkpoints_path = str(model_path)
|
47 |
state_dict = torch.load(checkpoints_path, map_location='cpu')
|
48 |
if 'state_dict' in state_dict.keys(): state_dict = state_dict['state_dict']
|
49 |
for k in list(state_dict.keys()):
|