Vincentqyw commited on
Commit
e15a186
1 Parent(s): 8004049

update: limit keypoints number

Browse files
app.py CHANGED
@@ -143,7 +143,7 @@ def run(config):
143
  # label="Matcher mode",
144
  # value="NN-mutual",
145
  # )
146
- with gr.Accordion("RANSAC Setting", open=False):
147
  with gr.Row(equal_height=False):
148
  enable_ransac = gr.Checkbox(label="Enable RANSAC")
149
  ransac_method = gr.Dropdown(
@@ -174,7 +174,7 @@ def run(config):
174
  value=10000,
175
  )
176
 
177
- with gr.Accordion("Geometry Setting", open=True):
178
  with gr.Row(equal_height=False):
179
  # show_geom = gr.Checkbox(label="Show Geometry")
180
  choice_estimate_geom = gr.Radio(
@@ -227,13 +227,13 @@ def run(config):
227
  label="Keypoints Matching", type="numpy"
228
  )
229
  with gr.Accordion(
230
- "Open for More: Matches Statistics", open=False
231
  ):
232
  matches_result_info = gr.JSON(label="Matches Statistics")
233
  matcher_info = gr.JSON(label="Match info")
234
 
235
- output_wrapped = gr.Image(label="Wrapped Pair", type="numpy")
236
  with gr.Accordion("Open for More: Geometry info", open=False):
 
237
  geometry_result = gr.JSON(label="Reconstructed Geometry")
238
 
239
  # callbacks
 
143
  # label="Matcher mode",
144
  # value="NN-mutual",
145
  # )
146
+ with gr.Accordion("RANSAC Setting", open=True):
147
  with gr.Row(equal_height=False):
148
  enable_ransac = gr.Checkbox(label="Enable RANSAC")
149
  ransac_method = gr.Dropdown(
 
174
  value=10000,
175
  )
176
 
177
+ with gr.Accordion("Geometry Setting", open=False):
178
  with gr.Row(equal_height=False):
179
  # show_geom = gr.Checkbox(label="Show Geometry")
180
  choice_estimate_geom = gr.Radio(
 
227
  label="Keypoints Matching", type="numpy"
228
  )
229
  with gr.Accordion(
230
+ "Open for More: Matches Statistics", open=True
231
  ):
232
  matches_result_info = gr.JSON(label="Matches Statistics")
233
  matcher_info = gr.JSON(label="Match info")
234
 
 
235
  with gr.Accordion("Open for More: Geometry info", open=False):
236
+ output_wrapped = gr.Image(label="Wrapped Pair", type="numpy")
237
  geometry_result = gr.JSON(label="Reconstructed Geometry")
238
 
239
  # callbacks
common/utils.py CHANGED
@@ -53,7 +53,7 @@ def gen_examples():
53
  match_setting_threshold = 0.1
54
  match_setting_max_features = 2000
55
  detect_keypoints_threshold = 0.01
56
- enable_ransac = False
57
  ransac_method = "RANSAC"
58
  ransac_reproj_threshold = 8
59
  ransac_confidence = 0.999
 
53
  match_setting_threshold = 0.1
54
  match_setting_max_features = 2000
55
  detect_keypoints_threshold = 0.01
56
+ enable_ransac = True
57
  ransac_method = "RANSAC"
58
  ransac_reproj_threshold = 8
59
  ransac_confidence = 0.999
hloc/__init__.py CHANGED
@@ -4,7 +4,8 @@ from packaging import version
4
  __version__ = "1.3"
5
 
6
  formatter = logging.Formatter(
7
- fmt="[%(asctime)s %(name)s %(levelname)s] %(message)s", datefmt="%Y/%m/%d %H:%M:%S"
 
8
  )
9
  handler = logging.StreamHandler()
10
  handler.setFormatter(formatter)
 
4
  __version__ = "1.3"
5
 
6
  formatter = logging.Formatter(
7
+ fmt="[%(asctime)s %(name)s %(levelname)s] %(message)s",
8
+ datefmt="%Y/%m/%d %H:%M:%S",
9
  )
10
  handler = logging.StreamHandler()
11
  handler.setFormatter(formatter)
hloc/extract_features.py CHANGED
@@ -85,17 +85,18 @@ confs = {
85
  "preprocessing": {
86
  "grayscale": False,
87
  "force_resize": True,
88
- "resize_max": 1600,
89
  "width": 640,
90
  "height": 480,
91
  "dfactor": 8,
92
  },
93
  },
94
  "d2net-ss": {
95
- "output": "feats-d2net-ss",
96
  "model": {
97
  "name": "d2net",
98
  "multiscale": False,
 
99
  },
100
  "preprocessing": {
101
  "grayscale": False,
@@ -103,10 +104,11 @@ confs = {
103
  },
104
  },
105
  "d2net-ms": {
106
- "output": "feats-d2net-ms",
107
  "model": {
108
  "name": "d2net",
109
  "multiscale": True,
 
110
  },
111
  "preprocessing": {
112
  "grayscale": False,
@@ -114,7 +116,7 @@ confs = {
114
  },
115
  },
116
  "rootsift": {
117
- "output": "feats-sift",
118
  "model": {
119
  "name": "dog",
120
  "max_keypoints": 5000,
@@ -129,7 +131,7 @@ confs = {
129
  },
130
  },
131
  "sift": {
132
- "output": "feats-sift",
133
  "model": {
134
  "name": "dog",
135
  "descriptor": "sift",
@@ -145,8 +147,12 @@ confs = {
145
  },
146
  },
147
  "sosnet": {
148
- "output": "feats-sosnet",
149
- "model": {"name": "dog", "descriptor": "sosnet"},
 
 
 
 
150
  "preprocessing": {
151
  "grayscale": True,
152
  "resize_max": 1600,
@@ -157,8 +163,12 @@ confs = {
157
  },
158
  },
159
  "hardnet": {
160
- "output": "feats-hardnet",
161
- "model": {"name": "dog", "descriptor": "hardnet"},
 
 
 
 
162
  "preprocessing": {
163
  "grayscale": True,
164
  "resize_max": 1600,
@@ -169,7 +179,7 @@ confs = {
169
  },
170
  },
171
  "disk": {
172
- "output": "feats-disk",
173
  "model": {
174
  "name": "disk",
175
  "max_keypoints": 5000,
@@ -180,7 +190,7 @@ confs = {
180
  },
181
  },
182
  "alike": {
183
- "output": "feats-alike",
184
  "model": {
185
  "name": "alike",
186
  "max_keypoints": 5000,
@@ -196,7 +206,7 @@ confs = {
196
  },
197
  },
198
  "lanet": {
199
- "output": "feats-lanet",
200
  "model": {
201
  "name": "lanet",
202
  "keypoint_threshold": 0.1,
@@ -208,7 +218,7 @@ confs = {
208
  },
209
  },
210
  "darkfeat": {
211
- "output": "feats-darkfeat-n5000-r1024",
212
  "model": {
213
  "name": "darkfeat",
214
  "max_keypoints": 5000,
@@ -225,7 +235,7 @@ confs = {
225
  },
226
  },
227
  "dedode": {
228
- "output": "feats-dedode-n5000-r1024",
229
  "model": {
230
  "name": "dedode",
231
  "max_keypoints": 5000,
@@ -233,14 +243,14 @@ confs = {
233
  "preprocessing": {
234
  "grayscale": False,
235
  "force_resize": True,
236
- "resize_max": 1024,
237
  "width": 768,
238
  "height": 768,
239
  "dfactor": 8,
240
  },
241
  },
242
  "example": {
243
- "output": "feats-example-n5000-r1024",
244
  "model": {
245
  "name": "example",
246
  "keypoint_threshold": 0.1,
@@ -323,13 +333,17 @@ class ImageDataset(torch.utils.data.Dataset):
323
  if isinstance(paths, (Path, str)):
324
  self.names = parse_image_lists(paths)
325
  elif isinstance(paths, collections.Iterable):
326
- self.names = [p.as_posix() if isinstance(p, Path) else p for p in paths]
 
 
327
  else:
328
  raise ValueError(f"Unknown format for path argument {paths}.")
329
 
330
  for name in self.names:
331
  if not (root / name).exists():
332
- raise ValueError(f"Image {name} does not exists in root: {root}.")
 
 
333
 
334
  def __getitem__(self, idx):
335
  name = self.names[idx]
@@ -397,7 +411,10 @@ def extract(model, image_0, conf):
397
 
398
  # assure that the size is divisible by dfactor
399
  size_new = tuple(
400
- map(lambda x: int(x // conf.dfactor * conf.dfactor), image.shape[-2:])
 
 
 
401
  )
402
  image = F.resize(image, size=size_new, antialias=True)
403
  input_ = image.to(device, non_blocking=True)[None]
@@ -435,7 +452,8 @@ def main(
435
  overwrite: bool = False,
436
  ) -> Path:
437
  logger.info(
438
- "Extracting local features with configuration:" f"\n{pprint.pformat(conf)}"
 
439
  )
440
 
441
  dataset = ImageDataset(image_dir, conf["preprocessing"], image_list)
@@ -443,7 +461,9 @@ def main(
443
  feature_path = Path(export_dir, conf["output"] + ".h5")
444
  feature_path.parent.mkdir(exist_ok=True, parents=True)
445
  skip_names = set(
446
- list_h5_names(feature_path) if feature_path.exists() and not overwrite else ()
 
 
447
  )
448
  dataset.names = [n for n in dataset.names if n not in skip_names]
449
  if len(dataset.names) == 0:
@@ -507,7 +527,10 @@ if __name__ == "__main__":
507
  parser.add_argument("--image_dir", type=Path, required=True)
508
  parser.add_argument("--export_dir", type=Path, required=True)
509
  parser.add_argument(
510
- "--conf", type=str, default="superpoint_aachen", choices=list(confs.keys())
 
 
 
511
  )
512
  parser.add_argument("--as_half", action="store_true")
513
  parser.add_argument("--image_list", type=Path)
 
85
  "preprocessing": {
86
  "grayscale": False,
87
  "force_resize": True,
88
+ "resize_max": 1024,
89
  "width": 640,
90
  "height": 480,
91
  "dfactor": 8,
92
  },
93
  },
94
  "d2net-ss": {
95
+ "output": "feats-d2net-ss-n5000-r1600",
96
  "model": {
97
  "name": "d2net",
98
  "multiscale": False,
99
+ "max_keypoints": 5000,
100
  },
101
  "preprocessing": {
102
  "grayscale": False,
 
104
  },
105
  },
106
  "d2net-ms": {
107
+ "output": "feats-d2net-ms-n5000-r1600",
108
  "model": {
109
  "name": "d2net",
110
  "multiscale": True,
111
+ "max_keypoints": 5000,
112
  },
113
  "preprocessing": {
114
  "grayscale": False,
 
116
  },
117
  },
118
  "rootsift": {
119
+ "output": "feats-rootsift-n5000-r1600",
120
  "model": {
121
  "name": "dog",
122
  "max_keypoints": 5000,
 
131
  },
132
  },
133
  "sift": {
134
+ "output": "feats-sift-n5000-r1600",
135
  "model": {
136
  "name": "dog",
137
  "descriptor": "sift",
 
147
  },
148
  },
149
  "sosnet": {
150
+ "output": "feats-sosnet-n5000-r1600",
151
+ "model": {
152
+ "name": "dog",
153
+ "descriptor": "sosnet",
154
+ "max_keypoints": 5000,
155
+ },
156
  "preprocessing": {
157
  "grayscale": True,
158
  "resize_max": 1600,
 
163
  },
164
  },
165
  "hardnet": {
166
+ "output": "feats-hardnet-n5000-r1600",
167
+ "model": {
168
+ "name": "dog",
169
+ "descriptor": "hardnet",
170
+ "max_keypoints": 5000,
171
+ },
172
  "preprocessing": {
173
  "grayscale": True,
174
  "resize_max": 1600,
 
179
  },
180
  },
181
  "disk": {
182
+ "output": "feats-disk-n5000-r1600",
183
  "model": {
184
  "name": "disk",
185
  "max_keypoints": 5000,
 
190
  },
191
  },
192
  "alike": {
193
+ "output": "feats-alike-n5000-r1600",
194
  "model": {
195
  "name": "alike",
196
  "max_keypoints": 5000,
 
206
  },
207
  },
208
  "lanet": {
209
+ "output": "feats-lanet-n5000-r1600",
210
  "model": {
211
  "name": "lanet",
212
  "keypoint_threshold": 0.1,
 
218
  },
219
  },
220
  "darkfeat": {
221
+ "output": "feats-darkfeat-n5000-r1600",
222
  "model": {
223
  "name": "darkfeat",
224
  "max_keypoints": 5000,
 
235
  },
236
  },
237
  "dedode": {
238
+ "output": "feats-dedode-n5000-r1600",
239
  "model": {
240
  "name": "dedode",
241
  "max_keypoints": 5000,
 
243
  "preprocessing": {
244
  "grayscale": False,
245
  "force_resize": True,
246
+ "resize_max": 1600,
247
  "width": 768,
248
  "height": 768,
249
  "dfactor": 8,
250
  },
251
  },
252
  "example": {
253
+ "output": "feats-example-n2000-r1024",
254
  "model": {
255
  "name": "example",
256
  "keypoint_threshold": 0.1,
 
333
  if isinstance(paths, (Path, str)):
334
  self.names = parse_image_lists(paths)
335
  elif isinstance(paths, collections.Iterable):
336
+ self.names = [
337
+ p.as_posix() if isinstance(p, Path) else p for p in paths
338
+ ]
339
  else:
340
  raise ValueError(f"Unknown format for path argument {paths}.")
341
 
342
  for name in self.names:
343
  if not (root / name).exists():
344
+ raise ValueError(
345
+ f"Image {name} does not exists in root: {root}."
346
+ )
347
 
348
  def __getitem__(self, idx):
349
  name = self.names[idx]
 
411
 
412
  # assure that the size is divisible by dfactor
413
  size_new = tuple(
414
+ map(
415
+ lambda x: int(x // conf.dfactor * conf.dfactor),
416
+ image.shape[-2:],
417
+ )
418
  )
419
  image = F.resize(image, size=size_new, antialias=True)
420
  input_ = image.to(device, non_blocking=True)[None]
 
452
  overwrite: bool = False,
453
  ) -> Path:
454
  logger.info(
455
+ "Extracting local features with configuration:"
456
+ f"\n{pprint.pformat(conf)}"
457
  )
458
 
459
  dataset = ImageDataset(image_dir, conf["preprocessing"], image_list)
 
461
  feature_path = Path(export_dir, conf["output"] + ".h5")
462
  feature_path.parent.mkdir(exist_ok=True, parents=True)
463
  skip_names = set(
464
+ list_h5_names(feature_path)
465
+ if feature_path.exists() and not overwrite
466
+ else ()
467
  )
468
  dataset.names = [n for n in dataset.names if n not in skip_names]
469
  if len(dataset.names) == 0:
 
527
  parser.add_argument("--image_dir", type=Path, required=True)
528
  parser.add_argument("--export_dir", type=Path, required=True)
529
  parser.add_argument(
530
+ "--conf",
531
+ type=str,
532
+ default="superpoint_aachen",
533
+ choices=list(confs.keys()),
534
  )
535
  parser.add_argument("--as_half", action="store_true")
536
  parser.add_argument("--image_list", type=Path)
hloc/extractors/d2net.py CHANGED
@@ -17,6 +17,7 @@ class D2Net(BaseModel):
17
  "checkpoint_dir": d2net_path / "models",
18
  "use_relu": True,
19
  "multiscale": False,
 
20
  }
21
  required_inputs = ["image"]
22
 
@@ -50,6 +51,11 @@ class D2Net(BaseModel):
50
  )
51
  keypoints = keypoints[:, [1, 0]] # (x, y) and remove the scale
52
 
 
 
 
 
 
53
  return {
54
  "keypoints": torch.from_numpy(keypoints)[None],
55
  "scores": torch.from_numpy(scores)[None],
 
17
  "checkpoint_dir": d2net_path / "models",
18
  "use_relu": True,
19
  "multiscale": False,
20
+ "max_keypoints": 1024,
21
  }
22
  required_inputs = ["image"]
23
 
 
51
  )
52
  keypoints = keypoints[:, [1, 0]] # (x, y) and remove the scale
53
 
54
+ idxs = scores.argsort()[-self.conf["max_keypoints"] or None :]
55
+ keypoints = keypoints[idxs, :2]
56
+ descriptors = descriptors[idxs]
57
+ scores = scores[idxs]
58
+
59
  return {
60
  "keypoints": torch.from_numpy(keypoints)[None],
61
  "scores": torch.from_numpy(scores)[None],
hloc/extractors/darkfeat.py CHANGED
@@ -32,7 +32,9 @@ class DarkFeat(BaseModel):
32
  model_path.parent.mkdir(exist_ok=True)
33
  cmd_wo_proxy = ["gdown", link, "-O", str(model_path)]
34
  cmd = ["gdown", link, "-O", str(model_path), "--proxy", self.proxy]
35
- logger.info(f"Downloading the DarkFeat model with `{cmd_wo_proxy}`.")
 
 
36
  try:
37
  subprocess.run(cmd_wo_proxy, check=True)
38
  except subprocess.CalledProcessError as e:
@@ -50,6 +52,10 @@ class DarkFeat(BaseModel):
50
  keypoints = pred["keypoints"]
51
  descriptors = pred["descriptors"]
52
  scores = pred["scores"]
 
 
 
 
53
  return {
54
  "keypoints": keypoints[None], # 1 x N x 2
55
  "scores": scores[None], # 1 x N
 
32
  model_path.parent.mkdir(exist_ok=True)
33
  cmd_wo_proxy = ["gdown", link, "-O", str(model_path)]
34
  cmd = ["gdown", link, "-O", str(model_path), "--proxy", self.proxy]
35
+ logger.info(
36
+ f"Downloading the DarkFeat model with `{cmd_wo_proxy}`."
37
+ )
38
  try:
39
  subprocess.run(cmd_wo_proxy, check=True)
40
  except subprocess.CalledProcessError as e:
 
52
  keypoints = pred["keypoints"]
53
  descriptors = pred["descriptors"]
54
  scores = pred["scores"]
55
+ idxs = scores.argsort()[-self.conf["max_keypoints"] or None :]
56
+ keypoints = keypoints[idxs, :2]
57
+ descriptors = descriptors[:, idxs]
58
+ scores = scores[idxs]
59
  return {
60
  "keypoints": keypoints[None], # 1 x N x 2
61
  "scores": scores[None], # 1 x N
hloc/extractors/dedode.py CHANGED
@@ -36,7 +36,9 @@ class DeDoDe(BaseModel):
36
 
37
  # Initialize the line matcher
38
  def _init(self, conf):
39
- model_detector_path = dedode_path / "pretrained" / conf["model_detector_name"]
 
 
40
  model_descriptor_path = (
41
  dedode_path / "pretrained" / conf["model_descriptor_name"]
42
  )
@@ -56,17 +58,24 @@ class DeDoDe(BaseModel):
56
  model_descriptor_path.parent.mkdir(exist_ok=True)
57
  link = self.weight_urls[conf["model_descriptor_name"]]
58
  cmd = ["wget", link, "-O", str(model_descriptor_path)]
59
- logger.info(f"Downloading the DeDoDe descriptor model with `{cmd}`.")
 
 
60
  subprocess.run(cmd, check=True)
61
 
62
  logger.info(f"Loading DeDoDe model...")
63
 
64
  # load the model
65
  weights_detector = torch.load(model_detector_path, map_location="cpu")
66
- weights_descriptor = torch.load(model_descriptor_path, map_location="cpu")
67
- self.detector = dedode_detector_L(weights=weights_detector, device=device)
68
- self.descriptor = dedode_descriptor_B(weights=weights_descriptor, device=device)
69
-
 
 
 
 
 
70
  logger.info(f"Load DeDoDe model done.")
71
 
72
  def _forward(self, data):
@@ -91,9 +100,9 @@ class DeDoDe(BaseModel):
91
 
92
  # step 2: describe keypoints
93
  # dim: 1 x N x 256
94
- description_A = self.descriptor.describe_keypoints(batch_A, keypoints_A)[
95
- "descriptions"
96
- ]
97
  keypoints_A = to_pixel_coords(keypoints_A, H_A, W_A)
98
 
99
  return {
 
36
 
37
  # Initialize the line matcher
38
  def _init(self, conf):
39
+ model_detector_path = (
40
+ dedode_path / "pretrained" / conf["model_detector_name"]
41
+ )
42
  model_descriptor_path = (
43
  dedode_path / "pretrained" / conf["model_descriptor_name"]
44
  )
 
58
  model_descriptor_path.parent.mkdir(exist_ok=True)
59
  link = self.weight_urls[conf["model_descriptor_name"]]
60
  cmd = ["wget", link, "-O", str(model_descriptor_path)]
61
+ logger.info(
62
+ f"Downloading the DeDoDe descriptor model with `{cmd}`."
63
+ )
64
  subprocess.run(cmd, check=True)
65
 
66
  logger.info(f"Loading DeDoDe model...")
67
 
68
  # load the model
69
  weights_detector = torch.load(model_detector_path, map_location="cpu")
70
+ weights_descriptor = torch.load(
71
+ model_descriptor_path, map_location="cpu"
72
+ )
73
+ self.detector = dedode_detector_L(
74
+ weights=weights_detector, device=device
75
+ )
76
+ self.descriptor = dedode_descriptor_B(
77
+ weights=weights_descriptor, device=device
78
+ )
79
  logger.info(f"Load DeDoDe model done.")
80
 
81
  def _forward(self, data):
 
100
 
101
  # step 2: describe keypoints
102
  # dim: 1 x N x 256
103
+ description_A = self.descriptor.describe_keypoints(
104
+ batch_A, keypoints_A
105
+ )["descriptions"]
106
  keypoints_A = to_pixel_coords(keypoints_A, H_A, W_A)
107
 
108
  return {
hloc/extractors/dir.py CHANGED
@@ -8,7 +8,9 @@ import gdown
8
 
9
  from ..utils.base_model import BaseModel
10
 
11
- sys.path.append(str(Path(__file__).parent / "../../third_party/deep-image-retrieval"))
 
 
12
  os.environ["DB_ROOT"] = "" # required by dirtorch
13
 
14
  from dirtorch.utils import common # noqa: E402
@@ -40,7 +42,9 @@ class DIR(BaseModel):
40
  }
41
 
42
  def _init(self, conf):
43
- checkpoint = Path(torch.hub.get_dir(), "dirtorch", conf["model_name"] + ".pt")
 
 
44
  if not checkpoint.exists():
45
  checkpoint.parent.mkdir(exist_ok=True, parents=True)
46
  link = self.dir_models[conf["model_name"]]
 
8
 
9
  from ..utils.base_model import BaseModel
10
 
11
+ sys.path.append(
12
+ str(Path(__file__).parent / "../../third_party/deep-image-retrieval")
13
+ )
14
  os.environ["DB_ROOT"] = "" # required by dirtorch
15
 
16
  from dirtorch.utils import common # noqa: E402
 
42
  }
43
 
44
  def _init(self, conf):
45
+ checkpoint = Path(
46
+ torch.hub.get_dir(), "dirtorch", conf["model_name"] + ".pt"
47
+ )
48
  if not checkpoint.exists():
49
  checkpoint.parent.mkdir(exist_ok=True, parents=True)
50
  link = self.dir_models[conf["model_name"]]
hloc/extractors/dog.py CHANGED
@@ -1,5 +1,8 @@
1
  import kornia
2
- from kornia.feature.laf import laf_from_center_scale_ori, extract_patches_from_pyramid
 
 
 
3
  import numpy as np
4
  import torch
5
  import pycolmap
 
1
  import kornia
2
+ from kornia.feature.laf import (
3
+ laf_from_center_scale_ori,
4
+ extract_patches_from_pyramid,
5
+ )
6
  import numpy as np
7
  import torch
8
  import pycolmap
hloc/extractors/example.py CHANGED
@@ -26,7 +26,6 @@ class Example(BaseModel):
26
  required_inputs = ["image"]
27
 
28
  def _init(self, conf):
29
-
30
  # set checkpoints paths if needed
31
  model_path = example_path / "checkpoints" / f'{conf["model_name"]}'
32
  if not model_path.exists():
 
26
  required_inputs = ["image"]
27
 
28
  def _init(self, conf):
 
29
  # set checkpoints paths if needed
30
  model_path = example_path / "checkpoints" / f'{conf["model_name"]}'
31
  if not model_path.exists():
hloc/extractors/fire.py CHANGED
@@ -34,7 +34,6 @@ class FIRe(BaseModel):
34
  }
35
 
36
  def _init(self, conf):
37
-
38
  assert conf["model_name"] in self.fire_models.keys()
39
  # Config paths
40
  model_path = fire_path / "model" / conf["model_name"]
@@ -64,7 +63,6 @@ class FIRe(BaseModel):
64
  self.scales = conf["scales"]
65
 
66
  def _forward(self, data):
67
-
68
  image = self.norm_rgb(data["image"])
69
 
70
  # Feature extraction.
 
34
  }
35
 
36
  def _init(self, conf):
 
37
  assert conf["model_name"] in self.fire_models.keys()
38
  # Config paths
39
  model_path = fire_path / "model" / conf["model_name"]
 
63
  self.scales = conf["scales"]
64
 
65
  def _forward(self, data):
 
66
  image = self.norm_rgb(data["image"])
67
 
68
  # Feature extraction.
hloc/extractors/fire_local.py CHANGED
@@ -41,7 +41,6 @@ class FIRe(BaseModel):
41
  }
42
 
43
  def _init(self, conf):
44
-
45
  assert conf["model_name"] in self.fire_models.keys()
46
 
47
  # Config paths
@@ -75,7 +74,6 @@ class FIRe(BaseModel):
75
  self.features_num = conf["features_num"]
76
 
77
  def _forward(self, data):
78
-
79
  image = self.norm_rgb(data["image"])
80
 
81
  local_desc = self.net.forward_local(
 
41
  }
42
 
43
  def _init(self, conf):
 
44
  assert conf["model_name"] in self.fire_models.keys()
45
 
46
  # Config paths
 
74
  self.features_num = conf["features_num"]
75
 
76
  def _forward(self, data):
 
77
  image = self.norm_rgb(data["image"])
78
 
79
  local_desc = self.net.forward_local(
hloc/extractors/lanet.py CHANGED
@@ -21,7 +21,9 @@ class LANet(BaseModel):
21
  required_inputs = ["image"]
22
 
23
  def _init(self, conf):
24
- model_path = lanet_path / "checkpoints" / f'PointModel_{conf["model_name"]}.pth'
 
 
25
  if not model_path.exists():
26
  print(f"No model found at {model_path}")
27
  self.net = PointModel(is_test=True)
@@ -34,16 +36,16 @@ class LANet(BaseModel):
34
  _, _, Hc, Wc = descriptors.shape
35
 
36
  # Scores & Descriptors
37
- kpts_score = (
38
- torch.cat([keypoints, scores], dim=1).view(3, -1).t()
39
- )
40
- descriptors = (
41
- descriptors.view(256, Hc, Wc).view(256, -1).t()
42
- )
43
 
44
  # Filter based on confidence threshold
45
- descriptors = descriptors[kpts_score[:, 0] > self.conf["keypoint_threshold"], :]
46
- kpts_score = kpts_score[kpts_score[:, 0] > self.conf["keypoint_threshold"], :]
 
 
 
 
47
  keypoints = kpts_score[:, 1:]
48
  scores = kpts_score[:, 0]
49
 
 
21
  required_inputs = ["image"]
22
 
23
  def _init(self, conf):
24
+ model_path = (
25
+ lanet_path / "checkpoints" / f'PointModel_{conf["model_name"]}.pth'
26
+ )
27
  if not model_path.exists():
28
  print(f"No model found at {model_path}")
29
  self.net = PointModel(is_test=True)
 
36
  _, _, Hc, Wc = descriptors.shape
37
 
38
  # Scores & Descriptors
39
+ kpts_score = torch.cat([keypoints, scores], dim=1).view(3, -1).t()
40
+ descriptors = descriptors.view(256, Hc, Wc).view(256, -1).t()
 
 
 
 
41
 
42
  # Filter based on confidence threshold
43
+ descriptors = descriptors[
44
+ kpts_score[:, 0] > self.conf["keypoint_threshold"], :
45
+ ]
46
+ kpts_score = kpts_score[
47
+ kpts_score[:, 0] > self.conf["keypoint_threshold"], :
48
+ ]
49
  keypoints = kpts_score[:, 1:]
50
  scores = kpts_score[:, 0]
51
 
hloc/extractors/netvlad.py CHANGED
@@ -18,7 +18,9 @@ EPS = 1e-6
18
  class NetVLADLayer(nn.Module):
19
  def __init__(self, input_dim=512, K=64, score_bias=False, intranorm=True):
20
  super().__init__()
21
- self.score_proj = nn.Conv1d(input_dim, K, kernel_size=1, bias=score_bias)
 
 
22
  centers = nn.parameter.Parameter(torch.empty([input_dim, K]))
23
  nn.init.xavier_uniform_(centers)
24
  self.register_parameter("centers", centers)
@@ -54,7 +56,9 @@ class NetVLAD(BaseModel):
54
  assert conf["model_name"] in self.dir_models.keys()
55
 
56
  # Download the checkpoint.
57
- checkpoint = Path(torch.hub.get_dir(), "netvlad", conf["model_name"] + ".mat")
 
 
58
  if not checkpoint.exists():
59
  checkpoint.parent.mkdir(exist_ok=True, parents=True)
60
  link = self.dir_models[conf["model_name"]]
@@ -77,7 +81,9 @@ class NetVLAD(BaseModel):
77
  mat = loadmat(checkpoint, struct_as_record=False, squeeze_me=True)
78
 
79
  # CNN weights.
80
- for layer, mat_layer in zip(self.backbone.children(), mat["net"].layers):
 
 
81
  if isinstance(layer, nn.Conv2d):
82
  w = mat_layer.weights[0] # Shape: S x S x IN x OUT
83
  b = mat_layer.weights[1] # Shape: OUT
 
18
  class NetVLADLayer(nn.Module):
19
  def __init__(self, input_dim=512, K=64, score_bias=False, intranorm=True):
20
  super().__init__()
21
+ self.score_proj = nn.Conv1d(
22
+ input_dim, K, kernel_size=1, bias=score_bias
23
+ )
24
  centers = nn.parameter.Parameter(torch.empty([input_dim, K]))
25
  nn.init.xavier_uniform_(centers)
26
  self.register_parameter("centers", centers)
 
56
  assert conf["model_name"] in self.dir_models.keys()
57
 
58
  # Download the checkpoint.
59
+ checkpoint = Path(
60
+ torch.hub.get_dir(), "netvlad", conf["model_name"] + ".mat"
61
+ )
62
  if not checkpoint.exists():
63
  checkpoint.parent.mkdir(exist_ok=True, parents=True)
64
  link = self.dir_models[conf["model_name"]]
 
81
  mat = loadmat(checkpoint, struct_as_record=False, squeeze_me=True)
82
 
83
  # CNN weights.
84
+ for layer, mat_layer in zip(
85
+ self.backbone.children(), mat["net"].layers
86
+ ):
87
  if isinstance(layer, nn.Conv2d):
88
  w = mat_layer.weights[0] # Shape: S x S x IN x OUT
89
  b = mat_layer.weights[1] # Shape: OUT
hloc/extractors/rekd.py CHANGED
@@ -20,7 +20,9 @@ class REKD(BaseModel):
20
  required_inputs = ["image"]
21
 
22
  def _init(self, conf):
23
- model_path = rekd_path / "checkpoints" / f'PointModel_{conf["model_name"]}.pth'
 
 
24
  if not model_path.exists():
25
  print(f"No model found at {model_path}")
26
  self.net = REKD_(is_test=True)
@@ -34,15 +36,29 @@ class REKD(BaseModel):
34
 
35
  # Scores & Descriptors
36
  kpts_score = (
37
- torch.cat([keypoints, scores], dim=1).view(3, -1).t().cpu().detach().numpy()
 
 
 
 
 
38
  )
39
  descriptors = (
40
- descriptors.view(256, Hc, Wc).view(256, -1).t().cpu().detach().numpy()
 
 
 
 
 
41
  )
42
 
43
  # Filter based on confidence threshold
44
- descriptors = descriptors[kpts_score[:, 0] > self.conf["keypoint_threshold"], :]
45
- kpts_score = kpts_score[kpts_score[:, 0] > self.conf["keypoint_threshold"], :]
 
 
 
 
46
  keypoints = kpts_score[:, 1:]
47
  scores = kpts_score[:, 0]
48
 
 
20
  required_inputs = ["image"]
21
 
22
  def _init(self, conf):
23
+ model_path = (
24
+ rekd_path / "checkpoints" / f'PointModel_{conf["model_name"]}.pth'
25
+ )
26
  if not model_path.exists():
27
  print(f"No model found at {model_path}")
28
  self.net = REKD_(is_test=True)
 
36
 
37
  # Scores & Descriptors
38
  kpts_score = (
39
+ torch.cat([keypoints, scores], dim=1)
40
+ .view(3, -1)
41
+ .t()
42
+ .cpu()
43
+ .detach()
44
+ .numpy()
45
  )
46
  descriptors = (
47
+ descriptors.view(256, Hc, Wc)
48
+ .view(256, -1)
49
+ .t()
50
+ .cpu()
51
+ .detach()
52
+ .numpy()
53
  )
54
 
55
  # Filter based on confidence threshold
56
+ descriptors = descriptors[
57
+ kpts_score[:, 0] > self.conf["keypoint_threshold"], :
58
+ ]
59
+ kpts_score = kpts_score[
60
+ kpts_score[:, 0] > self.conf["keypoint_threshold"], :
61
+ ]
62
  keypoints = kpts_score[:, 1:]
63
  scores = kpts_score[:, 0]
64
 
hloc/extractors/superpoint.py CHANGED
@@ -16,7 +16,10 @@ def sample_descriptors_fix_sampling(keypoints, descriptors, s: int = 8):
16
  keypoints = (keypoints + 0.5) / (keypoints.new_tensor([w, h]) * s)
17
  keypoints = keypoints * 2 - 1 # normalize to (-1, 1)
18
  descriptors = torch.nn.functional.grid_sample(
19
- descriptors, keypoints.view(b, 1, -1, 2), mode="bilinear", align_corners=False
 
 
 
20
  )
21
  descriptors = torch.nn.functional.normalize(
22
  descriptors.reshape(b, c, -1), p=2, dim=1
 
16
  keypoints = (keypoints + 0.5) / (keypoints.new_tensor([w, h]) * s)
17
  keypoints = keypoints * 2 - 1 # normalize to (-1, 1)
18
  descriptors = torch.nn.functional.grid_sample(
19
+ descriptors,
20
+ keypoints.view(b, 1, -1, 2),
21
+ mode="bilinear",
22
+ align_corners=False,
23
  )
24
  descriptors = torch.nn.functional.normalize(
25
  descriptors.reshape(b, c, -1), p=2, dim=1
hloc/match_dense.py CHANGED
@@ -224,7 +224,10 @@ def match(model, path_0, path_1, conf):
224
  image = torch.from_numpy(image / 255.0).float()
225
  # assure that the size is divisible by dfactor
226
  size_new = tuple(
227
- map(lambda x: int(x // conf.dfactor * conf.dfactor), image.shape[-2:])
 
 
 
228
  )
229
  image = F.resize(image, size=size_new, antialias=True)
230
  scale = np.array(size) / np.array(size_new)[::-1]
@@ -291,7 +294,10 @@ def match_images(model, image_0, image_1, conf, device="cpu"):
291
 
292
  # assure that the size is divisible by dfactor
293
  size_new = tuple(
294
- map(lambda x: int(x // conf.dfactor * conf.dfactor), image.shape[-2:])
 
 
 
295
  )
296
  image = F.resize(image, size=size_new)
297
  scale = np.array(size) / np.array(size_new)[::-1]
@@ -348,7 +354,7 @@ def match_images(model, image_0, image_1, conf, device="cpu"):
348
  if "mconf" in pred.keys():
349
  ret["mconf"] = pred["mconf"].cpu().numpy()
350
  else:
351
- ret["mconf"] = np.ones_like(kpts0.cpu().numpy()[:,0])
352
  if "lines0" in pred.keys() and "lines1" in pred.keys():
353
  if "keypoints0" in pred.keys() and "keypoints1" in pred.keys():
354
  kpts0, kpts1 = pred["keypoints0"], pred["keypoints1"]
@@ -357,7 +363,10 @@ def match_images(model, image_0, image_1, conf, device="cpu"):
357
  kpts0_origin = kpts0_origin.cpu().numpy()
358
  kpts1_origin = kpts1_origin.cpu().numpy()
359
  else:
360
- kpts0_origin, kpts1_origin = None, None # np.zeros([0]), np.zeros([0])
 
 
 
361
  lines0, lines1 = pred["lines0"], pred["lines1"]
362
  lines0_raw, lines1_raw = pred["raw_lines0"], pred["raw_lines1"]
363
 
 
224
  image = torch.from_numpy(image / 255.0).float()
225
  # assure that the size is divisible by dfactor
226
  size_new = tuple(
227
+ map(
228
+ lambda x: int(x // conf.dfactor * conf.dfactor),
229
+ image.shape[-2:],
230
+ )
231
  )
232
  image = F.resize(image, size=size_new, antialias=True)
233
  scale = np.array(size) / np.array(size_new)[::-1]
 
294
 
295
  # assure that the size is divisible by dfactor
296
  size_new = tuple(
297
+ map(
298
+ lambda x: int(x // conf.dfactor * conf.dfactor),
299
+ image.shape[-2:],
300
+ )
301
  )
302
  image = F.resize(image, size=size_new)
303
  scale = np.array(size) / np.array(size_new)[::-1]
 
354
  if "mconf" in pred.keys():
355
  ret["mconf"] = pred["mconf"].cpu().numpy()
356
  else:
357
+ ret["mconf"] = np.ones_like(kpts0.cpu().numpy()[:, 0])
358
  if "lines0" in pred.keys() and "lines1" in pred.keys():
359
  if "keypoints0" in pred.keys() and "keypoints1" in pred.keys():
360
  kpts0, kpts1 = pred["keypoints0"], pred["keypoints1"]
 
363
  kpts0_origin = kpts0_origin.cpu().numpy()
364
  kpts1_origin = kpts1_origin.cpu().numpy()
365
  else:
366
+ kpts0_origin, kpts1_origin = (
367
+ None,
368
+ None,
369
+ ) # np.zeros([0]), np.zeros([0])
370
  lines0, lines1 = pred["lines0"], pred["lines1"]
371
  lines0_raw, lines1_raw = pred["raw_lines0"], pred["raw_lines1"]
372
 
hloc/match_features.py CHANGED
@@ -151,7 +151,8 @@ class WorkQueue:
151
  def __init__(self, work_fn, num_threads=1):
152
  self.queue = Queue(num_threads)
153
  self.threads = [
154
- Thread(target=self.thread_fn, args=(work_fn,)) for _ in range(num_threads)
 
155
  ]
156
  for thread in self.threads:
157
  thread.start()
@@ -220,21 +221,24 @@ def main(
220
  features_ref: Optional[Path] = None,
221
  overwrite: bool = False,
222
  ) -> Path:
223
-
224
  if isinstance(features, Path) or Path(features).exists():
225
  features_q = features
226
  if matches is None:
227
  raise ValueError(
228
- "Either provide both features and matches as Path" " or both as names."
 
229
  )
230
  else:
231
  if export_dir is None:
232
  raise ValueError(
233
- "Provide an export_dir if features is not" f" a file path: {features}."
 
234
  )
235
  features_q = Path(export_dir, features + ".h5")
236
  if matches is None:
237
- matches = Path(export_dir, f'{features}_{conf["output"]}_{pairs.stem}.h5')
 
 
238
 
239
  if features_ref is None:
240
  features_ref = features_q
@@ -276,7 +280,8 @@ def match_from_paths(
276
  overwrite: bool = False,
277
  ) -> Path:
278
  logger.info(
279
- "Matching local features with configuration:" f"\n{pprint.pformat(conf)}"
 
280
  )
281
 
282
  if not feature_path_q.exists():
@@ -330,12 +335,11 @@ def match_images(model, feat0, feat1):
330
  desc0 = desc0.unsqueeze(0)
331
  if len(desc1.shape) == 2:
332
  desc1 = desc1.unsqueeze(0)
333
-
334
  if isinstance(feat0["keypoints"], list):
335
  feat0["keypoints"] = feat0["keypoints"][0][None]
336
  if isinstance(feat1["keypoints"], list):
337
  feat1["keypoints"] = feat1["keypoints"][0][None]
338
-
339
  pred = model(
340
  {
341
  "image0": feat0["image"],
@@ -386,7 +390,9 @@ if __name__ == "__main__":
386
  parser = argparse.ArgumentParser()
387
  parser.add_argument("--pairs", type=Path, required=True)
388
  parser.add_argument("--export_dir", type=Path)
389
- parser.add_argument("--features", type=str, default="feats-superpoint-n4096-r1024")
 
 
390
  parser.add_argument("--matches", type=Path)
391
  parser.add_argument(
392
  "--conf", type=str, default="superglue", choices=list(confs.keys())
 
151
  def __init__(self, work_fn, num_threads=1):
152
  self.queue = Queue(num_threads)
153
  self.threads = [
154
+ Thread(target=self.thread_fn, args=(work_fn,))
155
+ for _ in range(num_threads)
156
  ]
157
  for thread in self.threads:
158
  thread.start()
 
221
  features_ref: Optional[Path] = None,
222
  overwrite: bool = False,
223
  ) -> Path:
 
224
  if isinstance(features, Path) or Path(features).exists():
225
  features_q = features
226
  if matches is None:
227
  raise ValueError(
228
+ "Either provide both features and matches as Path"
229
+ " or both as names."
230
  )
231
  else:
232
  if export_dir is None:
233
  raise ValueError(
234
+ "Provide an export_dir if features is not"
235
+ f" a file path: {features}."
236
  )
237
  features_q = Path(export_dir, features + ".h5")
238
  if matches is None:
239
+ matches = Path(
240
+ export_dir, f'{features}_{conf["output"]}_{pairs.stem}.h5'
241
+ )
242
 
243
  if features_ref is None:
244
  features_ref = features_q
 
280
  overwrite: bool = False,
281
  ) -> Path:
282
  logger.info(
283
+ "Matching local features with configuration:"
284
+ f"\n{pprint.pformat(conf)}"
285
  )
286
 
287
  if not feature_path_q.exists():
 
335
  desc0 = desc0.unsqueeze(0)
336
  if len(desc1.shape) == 2:
337
  desc1 = desc1.unsqueeze(0)
 
338
  if isinstance(feat0["keypoints"], list):
339
  feat0["keypoints"] = feat0["keypoints"][0][None]
340
  if isinstance(feat1["keypoints"], list):
341
  feat1["keypoints"] = feat1["keypoints"][0][None]
342
+
343
  pred = model(
344
  {
345
  "image0": feat0["image"],
 
390
  parser = argparse.ArgumentParser()
391
  parser.add_argument("--pairs", type=Path, required=True)
392
  parser.add_argument("--export_dir", type=Path)
393
+ parser.add_argument(
394
+ "--features", type=str, default="feats-superpoint-n4096-r1024"
395
+ )
396
  parser.add_argument("--matches", type=Path)
397
  parser.add_argument(
398
  "--conf", type=str, default="superglue", choices=list(confs.keys())
hloc/matchers/aspanformer.py CHANGED
@@ -21,6 +21,7 @@ class ASpanFormer(BaseModel):
21
  default_conf = {
22
  "weights": "outdoor",
23
  "match_threshold": 0.2,
 
24
  "config_path": aspanformer_path / "configs/aspan/outdoor/aspan_test.py",
25
  "model_name": "weights_aspanformer.tar",
26
  }
@@ -31,24 +32,39 @@ class ASpanFormer(BaseModel):
31
  }
32
 
33
  def _init(self, conf):
34
- model_path = aspanformer_path / "weights" / Path(conf["weights"] + ".ckpt")
 
 
35
  # Download the model.
36
  if not model_path.exists():
37
  # model_path.parent.mkdir(exist_ok=True)
38
  tar_path = aspanformer_path / conf["model_name"]
39
  if not tar_path.exists():
40
  link = self.aspanformer_models[conf["model_name"]]
41
- cmd = ["gdown", link, "-O", str(tar_path), "--proxy", self.proxy]
 
 
 
 
 
 
 
42
  cmd_wo_proxy = ["gdown", link, "-O", str(tar_path)]
43
- logger.info(f"Downloading the Aspanformer model with `{cmd_wo_proxy}`.")
 
 
44
  try:
45
  subprocess.run(cmd_wo_proxy, check=True)
46
  except subprocess.CalledProcessError as e:
47
- logger.info(f"Downloading the Aspanformer model with `{cmd}`.")
 
 
48
  try:
49
  subprocess.run(cmd, check=True)
50
  except subprocess.CalledProcessError as e:
51
- logger.error(f"Failed to download the Aspanformer model.")
 
 
52
  raise e
53
 
54
  do_system(f"cd {str(aspanformer_path)} & tar -xvf {str(tar_path)}")
@@ -58,9 +74,16 @@ class ASpanFormer(BaseModel):
58
  config = get_cfg_defaults()
59
  config.merge_from_file(conf["config_path"])
60
  _config = lower_config(config)
 
 
 
 
 
61
  self.net = _ASpanFormer(config=_config["aspan"])
62
  weight_path = model_path
63
- state_dict = torch.load(str(weight_path), map_location="cpu")["state_dict"]
 
 
64
  self.net.load_state_dict(state_dict, strict=False)
65
 
66
  def _forward(self, data):
 
21
  default_conf = {
22
  "weights": "outdoor",
23
  "match_threshold": 0.2,
24
+ "sinkhorn_iterations": 20,
25
  "config_path": aspanformer_path / "configs/aspan/outdoor/aspan_test.py",
26
  "model_name": "weights_aspanformer.tar",
27
  }
 
32
  }
33
 
34
  def _init(self, conf):
35
+ model_path = (
36
+ aspanformer_path / "weights" / Path(conf["weights"] + ".ckpt")
37
+ )
38
  # Download the model.
39
  if not model_path.exists():
40
  # model_path.parent.mkdir(exist_ok=True)
41
  tar_path = aspanformer_path / conf["model_name"]
42
  if not tar_path.exists():
43
  link = self.aspanformer_models[conf["model_name"]]
44
+ cmd = [
45
+ "gdown",
46
+ link,
47
+ "-O",
48
+ str(tar_path),
49
+ "--proxy",
50
+ self.proxy,
51
+ ]
52
  cmd_wo_proxy = ["gdown", link, "-O", str(tar_path)]
53
+ logger.info(
54
+ f"Downloading the Aspanformer model with `{cmd_wo_proxy}`."
55
+ )
56
  try:
57
  subprocess.run(cmd_wo_proxy, check=True)
58
  except subprocess.CalledProcessError as e:
59
+ logger.info(
60
+ f"Downloading the Aspanformer model with `{cmd}`."
61
+ )
62
  try:
63
  subprocess.run(cmd, check=True)
64
  except subprocess.CalledProcessError as e:
65
+ logger.error(
66
+ f"Failed to download the Aspanformer model."
67
+ )
68
  raise e
69
 
70
  do_system(f"cd {str(aspanformer_path)} & tar -xvf {str(tar_path)}")
 
74
  config = get_cfg_defaults()
75
  config.merge_from_file(conf["config_path"])
76
  _config = lower_config(config)
77
+
78
+ # update: match threshold
79
+ _config["aspan"]["match_coarse"]["thr"] = conf["match_threshold"]
80
+ _config["aspan"]["match_coarse"]["skh_iters"] = conf["sinkhorn_iterations"]
81
+
82
  self.net = _ASpanFormer(config=_config["aspan"])
83
  weight_path = model_path
84
+ state_dict = torch.load(str(weight_path), map_location="cpu")[
85
+ "state_dict"
86
+ ]
87
  self.net.load_state_dict(state_dict, strict=False)
88
 
89
  def _forward(self, data):
hloc/matchers/dkm.py CHANGED
@@ -55,7 +55,9 @@ class DKMv3(BaseModel):
55
 
56
  warp, certainty = self.net.match(img0, img1, device=device)
57
  matches, certainty = self.net.sample(warp, certainty)
58
- kpts1, kpts2 = self.net.to_pixel_coordinates(matches, H_A, W_A, H_B, W_B)
 
 
59
  pred = {}
60
  pred["keypoints0"], pred["keypoints1"] = kpts1, kpts2
61
  return pred
 
55
 
56
  warp, certainty = self.net.match(img0, img1, device=device)
57
  matches, certainty = self.net.sample(warp, certainty)
58
+ kpts1, kpts2 = self.net.to_pixel_coordinates(
59
+ matches, H_A, W_A, H_B, W_B
60
+ )
61
  pred = {}
62
  pred["keypoints0"], pred["keypoints1"] = kpts1, kpts2
63
  return pred
hloc/matchers/dual_softmax.py CHANGED
@@ -3,6 +3,7 @@ import torch
3
  from ..utils.base_model import BaseModel
4
  import numpy as np
5
 
 
6
  # borrow from dedode
7
  def dual_softmax_matcher(
8
  desc_A: tuple["B", "C", "N"],
@@ -17,7 +18,9 @@ def dual_softmax_matcher(
17
  if normalize:
18
  desc_A = desc_A / desc_A.norm(dim=1, keepdim=True)
19
  desc_B = desc_B / desc_B.norm(dim=1, keepdim=True)
20
- sim = torch.einsum("b c n, b c m -> b n m", desc_A, desc_B) * inv_temperature
 
 
21
  P = sim.softmax(dim=-2) * sim.softmax(dim=-1)
22
  mask = torch.nonzero(
23
  (P == P.max(dim=-1, keepdim=True).values)
@@ -47,9 +50,14 @@ class DualSoftMax(BaseModel):
47
  pass
48
 
49
  def _forward(self, data):
50
- if data["descriptors0"].size(-1) == 0 or data["descriptors1"].size(-1) == 0:
 
 
 
51
  matches0 = torch.full(
52
- data["descriptors0"].shape[:2], -1, device=data["descriptors0"].device
 
 
53
  )
54
  return {
55
  "matches0": matches0,
 
3
  from ..utils.base_model import BaseModel
4
  import numpy as np
5
 
6
+
7
  # borrow from dedode
8
  def dual_softmax_matcher(
9
  desc_A: tuple["B", "C", "N"],
 
18
  if normalize:
19
  desc_A = desc_A / desc_A.norm(dim=1, keepdim=True)
20
  desc_B = desc_B / desc_B.norm(dim=1, keepdim=True)
21
+ sim = (
22
+ torch.einsum("b c n, b c m -> b n m", desc_A, desc_B) * inv_temperature
23
+ )
24
  P = sim.softmax(dim=-2) * sim.softmax(dim=-1)
25
  mask = torch.nonzero(
26
  (P == P.max(dim=-1, keepdim=True).values)
 
50
  pass
51
 
52
  def _forward(self, data):
53
+ if (
54
+ data["descriptors0"].size(-1) == 0
55
+ or data["descriptors1"].size(-1) == 0
56
+ ):
57
  matches0 = torch.full(
58
+ data["descriptors0"].shape[:2],
59
+ -1,
60
+ device=data["descriptors0"].device,
61
  )
62
  return {
63
  "matches0": matches0,
hloc/matchers/gluestick.py CHANGED
@@ -33,9 +33,12 @@ class GlueStick(BaseModel):
33
  gluestick_models = {
34
  "checkpoint_GlueStick_MD.tar": "https://github.com/cvg/GlueStick/releases/download/v0.1_arxiv/checkpoint_GlueStick_MD.tar",
35
  }
 
36
  # Initialize the line matcher
37
  def _init(self, conf):
38
- model_path = gluestick_path / "resources" / "weights" / conf["model_name"]
 
 
39
 
40
  # Download the model.
41
  if not model_path.exists():
 
33
  gluestick_models = {
34
  "checkpoint_GlueStick_MD.tar": "https://github.com/cvg/GlueStick/releases/download/v0.1_arxiv/checkpoint_GlueStick_MD.tar",
35
  }
36
+
37
  # Initialize the line matcher
38
  def _init(self, conf):
39
+ model_path = (
40
+ gluestick_path / "resources" / "weights" / conf["model_name"]
41
+ )
42
 
43
  # Download the model.
44
  if not model_path.exists():
hloc/matchers/nearest_neighbor.py CHANGED
@@ -36,24 +36,36 @@ class NearestNeighbor(BaseModel):
36
  pass
37
 
38
  def _forward(self, data):
39
- if data["descriptors0"].size(-1) == 0 or data["descriptors1"].size(-1) == 0:
 
 
 
40
  matches0 = torch.full(
41
- data["descriptors0"].shape[:2], -1, device=data["descriptors0"].device
 
 
42
  )
43
  return {
44
  "matches0": matches0,
45
  "matching_scores0": torch.zeros_like(matches0),
46
  }
47
  ratio_threshold = self.conf["ratio_threshold"]
48
- if data["descriptors0"].size(-1) == 1 or data["descriptors1"].size(-1) == 1:
 
 
 
49
  ratio_threshold = None
50
- sim = torch.einsum("bdn,bdm->bnm", data["descriptors0"], data["descriptors1"])
 
 
51
  matches0, scores0 = find_nn(
52
  sim, ratio_threshold, self.conf["distance_threshold"]
53
  )
54
  if self.conf["do_mutual_check"]:
55
  matches1, scores1 = find_nn(
56
- sim.transpose(1, 2), ratio_threshold, self.conf["distance_threshold"]
 
 
57
  )
58
  matches0 = mutual_check(matches0, matches1)
59
  return {
 
36
  pass
37
 
38
  def _forward(self, data):
39
+ if (
40
+ data["descriptors0"].size(-1) == 0
41
+ or data["descriptors1"].size(-1) == 0
42
+ ):
43
  matches0 = torch.full(
44
+ data["descriptors0"].shape[:2],
45
+ -1,
46
+ device=data["descriptors0"].device,
47
  )
48
  return {
49
  "matches0": matches0,
50
  "matching_scores0": torch.zeros_like(matches0),
51
  }
52
  ratio_threshold = self.conf["ratio_threshold"]
53
+ if (
54
+ data["descriptors0"].size(-1) == 1
55
+ or data["descriptors1"].size(-1) == 1
56
+ ):
57
  ratio_threshold = None
58
+ sim = torch.einsum(
59
+ "bdn,bdm->bnm", data["descriptors0"], data["descriptors1"]
60
+ )
61
  matches0, scores0 = find_nn(
62
  sim, ratio_threshold, self.conf["distance_threshold"]
63
  )
64
  if self.conf["do_mutual_check"]:
65
  matches1, scores1 = find_nn(
66
+ sim.transpose(1, 2),
67
+ ratio_threshold,
68
+ self.conf["distance_threshold"],
69
  )
70
  matches0 = mutual_check(matches0, matches1)
71
  return {
hloc/matchers/roma.py CHANGED
@@ -84,7 +84,9 @@ class Roma(BaseModel):
84
  matches, certainty = self.net.sample(
85
  warp, certainty, num=self.conf["max_keypoints"]
86
  )
87
- kpts1, kpts2 = self.net.to_pixel_coordinates(matches, H_A, W_A, H_B, W_B)
 
 
88
  pred = {}
89
  pred["keypoints0"], pred["keypoints1"] = kpts1, kpts2
90
  pred["mconf"] = certainty
 
84
  matches, certainty = self.net.sample(
85
  warp, certainty, num=self.conf["max_keypoints"]
86
  )
87
+ kpts1, kpts2 = self.net.to_pixel_coordinates(
88
+ matches, H_A, W_A, H_B, W_B
89
+ )
90
  pred = {}
91
  pred["keypoints0"], pred["keypoints1"] = kpts1, kpts2
92
  pred["mconf"] = certainty
hloc/matchers/sgmnet.py CHANGED
@@ -52,9 +52,18 @@ class SGMNet(BaseModel):
52
  # Download the model.
53
  if not sgmnet_weights.exists():
54
  if not tar_path.exists():
55
- cmd = ["gdown", link, "-O", str(tar_path), "--proxy", self.proxy]
 
 
 
 
 
 
 
56
  cmd_wo_proxy = ["gdown", link, "-O", str(tar_path)]
57
- logger.info(f"Downloading the SGMNet model with `{cmd_wo_proxy}`.")
 
 
58
  try:
59
  subprocess.run(cmd_wo_proxy, check=True)
60
  except subprocess.CalledProcessError as e:
@@ -73,7 +82,10 @@ class SGMNet(BaseModel):
73
  self.net = SGM_Model(config)
74
  checkpoint = torch.load(sgmnet_weights, map_location="cpu")
75
  # for ddp model
76
- if list(checkpoint["state_dict"].items())[0][0].split(".")[0] == "module":
 
 
 
77
  new_stat_dict = OrderedDict()
78
  for key, value in checkpoint["state_dict"].items():
79
  new_stat_dict[key[7:]] = value
 
52
  # Download the model.
53
  if not sgmnet_weights.exists():
54
  if not tar_path.exists():
55
+ cmd = [
56
+ "gdown",
57
+ link,
58
+ "-O",
59
+ str(tar_path),
60
+ "--proxy",
61
+ self.proxy,
62
+ ]
63
  cmd_wo_proxy = ["gdown", link, "-O", str(tar_path)]
64
+ logger.info(
65
+ f"Downloading the SGMNet model with `{cmd_wo_proxy}`."
66
+ )
67
  try:
68
  subprocess.run(cmd_wo_proxy, check=True)
69
  except subprocess.CalledProcessError as e:
 
82
  self.net = SGM_Model(config)
83
  checkpoint = torch.load(sgmnet_weights, map_location="cpu")
84
  # for ddp model
85
+ if (
86
+ list(checkpoint["state_dict"].items())[0][0].split(".")[0]
87
+ == "module"
88
+ ):
89
  new_stat_dict = OrderedDict()
90
  for key, value in checkpoint["state_dict"].items():
91
  new_stat_dict[key[7:]] = value
hloc/matchers/sold2.py CHANGED
@@ -35,6 +35,7 @@ class SOLD2(BaseModel):
35
  "image0",
36
  "image1",
37
  ]
 
38
  # Initialize the line matcher
39
  def _init(self, conf):
40
  checkpoint_path = conf["checkpoint_dir"] / conf["weights"]
 
35
  "image0",
36
  "image1",
37
  ]
38
+
39
  # Initialize the line matcher
40
  def _init(self, conf):
41
  checkpoint_path = conf["checkpoint_dir"] / conf["weights"]
hloc/pipelines/4Seasons/localize.py CHANGED
@@ -67,7 +67,9 @@ delete_unused_images(seq_images, timestamps)
67
  generate_query_lists(timestamps, seq_dir, query_list)
68
 
69
  # Generate the localization pairs from the given reference frames.
70
- generate_localization_pairs(sequence, reloc, num_loc_pairs, ref_pairs, loc_pairs)
 
 
71
 
72
  # Extract, match, amd localize.
73
  ffile = extract_features.main(fconf, seq_images, output_dir)
 
67
  generate_query_lists(timestamps, seq_dir, query_list)
68
 
69
  # Generate the localization pairs from the given reference frames.
70
+ generate_localization_pairs(
71
+ sequence, reloc, num_loc_pairs, ref_pairs, loc_pairs
72
+ )
73
 
74
  # Extract, match, amd localize.
75
  ffile = extract_features.main(fconf, seq_images, output_dir)
hloc/pipelines/4Seasons/utils.py CHANGED
@@ -48,7 +48,11 @@ def camera_from_calibration_file(id_, path):
48
  model_name = "PINHOLE"
49
  params = [float(i) for i in [fx, fy, cx, cy]]
50
  camera = Camera(
51
- id=id_, model=model_name, width=int(width), height=int(height), params=params
 
 
 
 
52
  )
53
  return camera
54
 
@@ -149,7 +153,9 @@ def generate_localization_pairs(sequence, reloc, num, ref_pairs, out_path):
149
  """
150
  if "test" in sequence:
151
  # hard pairs will be overwritten by easy ones if available
152
- relocs = [str(reloc).replace("*", d) for d in ["hard", "moderate", "easy"]]
 
 
153
  else:
154
  relocs = [reloc]
155
  query_to_ref_ts = {}
@@ -207,8 +213,12 @@ def evaluate_submission(submission_dir, relocs, ths=[0.1, 0.2, 0.5]):
207
  """Compute the relocalization recall from predicted and ground truth poses."""
208
  for reloc in relocs.parent.glob(relocs.name):
209
  poses_gt = parse_relocalization(reloc, has_poses=True)
210
- poses_pred = parse_relocalization(submission_dir / reloc.name, has_poses=True)
211
- poses_pred = {(ref_ts, q_ts): (R, t) for ref_ts, q_ts, R, t in poses_pred}
 
 
 
 
212
 
213
  error = []
214
  for ref_ts, q_ts, R_gt, t_gt in poses_gt:
 
48
  model_name = "PINHOLE"
49
  params = [float(i) for i in [fx, fy, cx, cy]]
50
  camera = Camera(
51
+ id=id_,
52
+ model=model_name,
53
+ width=int(width),
54
+ height=int(height),
55
+ params=params,
56
  )
57
  return camera
58
 
 
153
  """
154
  if "test" in sequence:
155
  # hard pairs will be overwritten by easy ones if available
156
+ relocs = [
157
+ str(reloc).replace("*", d) for d in ["hard", "moderate", "easy"]
158
+ ]
159
  else:
160
  relocs = [reloc]
161
  query_to_ref_ts = {}
 
213
  """Compute the relocalization recall from predicted and ground truth poses."""
214
  for reloc in relocs.parent.glob(relocs.name):
215
  poses_gt = parse_relocalization(reloc, has_poses=True)
216
+ poses_pred = parse_relocalization(
217
+ submission_dir / reloc.name, has_poses=True
218
+ )
219
+ poses_pred = {
220
+ (ref_ts, q_ts): (R, t) for ref_ts, q_ts, R, t in poses_pred
221
+ }
222
 
223
  error = []
224
  for ref_ts, q_ts, R_gt, t_gt in poses_gt:
hloc/pipelines/7Scenes/create_gt_sfm.py CHANGED
@@ -28,7 +28,9 @@ def interpolate_depth(depth, kp):
28
 
29
  # To maximize the number of points that have depth:
30
  # do bilinear interpolation first and then nearest for the remaining points
31
- interp_lin = grid_sample(depth, kp, align_corners=True, mode="bilinear")[0, :, 0]
 
 
32
  interp_nn = torch.nn.functional.grid_sample(
33
  depth, kp, align_corners=True, mode="nearest"
34
  )[0, :, 0]
@@ -127,7 +129,15 @@ if __name__ == "__main__":
127
  dataset = Path("datasets/7scenes")
128
  outputs = Path("outputs/7Scenes")
129
 
130
- SCENES = ["chess", "fire", "heads", "office", "pumpkin", "redkitchen", "stairs"]
 
 
 
 
 
 
 
 
131
  for scene in SCENES:
132
  sfm_path = outputs / scene / "sfm_superpoint+superglue"
133
  depth_path = dataset / f"depth/7scenes_{scene}/train/depth"
 
28
 
29
  # To maximize the number of points that have depth:
30
  # do bilinear interpolation first and then nearest for the remaining points
31
+ interp_lin = grid_sample(depth, kp, align_corners=True, mode="bilinear")[
32
+ 0, :, 0
33
+ ]
34
  interp_nn = torch.nn.functional.grid_sample(
35
  depth, kp, align_corners=True, mode="nearest"
36
  )[0, :, 0]
 
129
  dataset = Path("datasets/7scenes")
130
  outputs = Path("outputs/7Scenes")
131
 
132
+ SCENES = [
133
+ "chess",
134
+ "fire",
135
+ "heads",
136
+ "office",
137
+ "pumpkin",
138
+ "redkitchen",
139
+ "stairs",
140
+ ]
141
  for scene in SCENES:
142
  sfm_path = outputs / scene / "sfm_superpoint+superglue"
143
  depth_path = dataset / f"depth/7scenes_{scene}/train/depth"
hloc/pipelines/7Scenes/pipeline.py CHANGED
@@ -45,7 +45,9 @@ def run_scene(
45
  create_reference_sfm(gt_dir, ref_sfm_sift, test_list)
46
  create_query_list_with_intrinsics(gt_dir, query_list, test_list)
47
 
48
- features = extract_features.main(feature_conf, images, outputs, as_half=True)
 
 
49
 
50
  sfm_pairs = outputs / f"pairs-db-covis{num_covis}.txt"
51
  pairs_from_covisibility.main(ref_sfm_sift, sfm_pairs, num_matched=num_covis)
@@ -112,7 +114,9 @@ if __name__ == "__main__":
112
  results = (
113
  args.outputs
114
  / scene
115
- / "results_{}.txt".format("dense" if args.use_dense_depth else "sparse")
 
 
116
  )
117
  if args.overwrite or not results.exists():
118
  run_scene(
 
45
  create_reference_sfm(gt_dir, ref_sfm_sift, test_list)
46
  create_query_list_with_intrinsics(gt_dir, query_list, test_list)
47
 
48
+ features = extract_features.main(
49
+ feature_conf, images, outputs, as_half=True
50
+ )
51
 
52
  sfm_pairs = outputs / f"pairs-db-covis{num_covis}.txt"
53
  pairs_from_covisibility.main(ref_sfm_sift, sfm_pairs, num_matched=num_covis)
 
114
  results = (
115
  args.outputs
116
  / scene
117
+ / "results_{}.txt".format(
118
+ "dense" if args.use_dense_depth else "sparse"
119
+ )
120
  )
121
  if args.overwrite or not results.exists():
122
  run_scene(
hloc/pipelines/Aachen/pipeline.py CHANGED
@@ -40,14 +40,18 @@ images = dataset / "images/images_upright/"
40
 
41
  outputs = args.outputs # where everything will be saved
42
  sift_sfm = outputs / "sfm_sift" # from which we extract the reference poses
43
- reference_sfm = outputs / "sfm_superpoint+superglue" # the SfM model we will build
 
 
44
  sfm_pairs = (
45
  outputs / f"pairs-db-covis{args.num_covis}.txt"
46
  ) # top-k most covisible in SIFT model
47
  loc_pairs = (
48
  outputs / f"pairs-query-netvlad{args.num_loc}.txt"
49
  ) # top-k retrieved by NetVLAD
50
- results = outputs / f"Aachen_hloc_superpoint+superglue_netvlad{args.num_loc}.txt"
 
 
51
 
52
  # list the standard configurations available
53
  print(f"Configs for feature extractors:\n{pformat(extract_features.confs)}")
@@ -71,7 +75,9 @@ sfm_matches = match_features.main(
71
  matcher_conf, sfm_pairs, feature_conf["output"], outputs
72
  )
73
 
74
- triangulation.main(reference_sfm, sift_sfm, images, sfm_pairs, features, sfm_matches)
 
 
75
 
76
  global_descriptors = extract_features.main(retrieval_conf, images, outputs)
77
  pairs_from_retrieval.main(
 
40
 
41
  outputs = args.outputs # where everything will be saved
42
  sift_sfm = outputs / "sfm_sift" # from which we extract the reference poses
43
+ reference_sfm = (
44
+ outputs / "sfm_superpoint+superglue"
45
+ ) # the SfM model we will build
46
  sfm_pairs = (
47
  outputs / f"pairs-db-covis{args.num_covis}.txt"
48
  ) # top-k most covisible in SIFT model
49
  loc_pairs = (
50
  outputs / f"pairs-query-netvlad{args.num_loc}.txt"
51
  ) # top-k retrieved by NetVLAD
52
+ results = (
53
+ outputs / f"Aachen_hloc_superpoint+superglue_netvlad{args.num_loc}.txt"
54
+ )
55
 
56
  # list the standard configurations available
57
  print(f"Configs for feature extractors:\n{pformat(extract_features.confs)}")
 
75
  matcher_conf, sfm_pairs, feature_conf["output"], outputs
76
  )
77
 
78
+ triangulation.main(
79
+ reference_sfm, sift_sfm, images, sfm_pairs, features, sfm_matches
80
+ )
81
 
82
  global_descriptors = extract_features.main(retrieval_conf, images, outputs)
83
  pairs_from_retrieval.main(
hloc/pipelines/Aachen_v1_1/pipeline.py CHANGED
@@ -39,14 +39,18 @@ images = dataset / "images/images_upright/"
39
  sift_sfm = dataset / "3D-models/aachen_v_1_1"
40
 
41
  outputs = args.outputs # where everything will be saved
42
- reference_sfm = outputs / "sfm_superpoint+superglue" # the SfM model we will build
 
 
43
  sfm_pairs = (
44
  outputs / f"pairs-db-covis{args.num_covis}.txt"
45
  ) # top-k most covisible in SIFT model
46
  loc_pairs = (
47
  outputs / f"pairs-query-netvlad{args.num_loc}.txt"
48
  ) # top-k retrieved by NetVLAD
49
- results = outputs / f"Aachen-v1.1_hloc_superpoint+superglue_netvlad{args.num_loc}.txt"
 
 
50
 
51
  # list the standard configurations available
52
  print(f"Configs for feature extractors:\n{pformat(extract_features.confs)}")
@@ -64,7 +68,9 @@ sfm_matches = match_features.main(
64
  matcher_conf, sfm_pairs, feature_conf["output"], outputs
65
  )
66
 
67
- triangulation.main(reference_sfm, sift_sfm, images, sfm_pairs, features, sfm_matches)
 
 
68
 
69
  global_descriptors = extract_features.main(retrieval_conf, images, outputs)
70
  pairs_from_retrieval.main(
 
39
  sift_sfm = dataset / "3D-models/aachen_v_1_1"
40
 
41
  outputs = args.outputs # where everything will be saved
42
+ reference_sfm = (
43
+ outputs / "sfm_superpoint+superglue"
44
+ ) # the SfM model we will build
45
  sfm_pairs = (
46
  outputs / f"pairs-db-covis{args.num_covis}.txt"
47
  ) # top-k most covisible in SIFT model
48
  loc_pairs = (
49
  outputs / f"pairs-query-netvlad{args.num_loc}.txt"
50
  ) # top-k retrieved by NetVLAD
51
+ results = (
52
+ outputs / f"Aachen-v1.1_hloc_superpoint+superglue_netvlad{args.num_loc}.txt"
53
+ )
54
 
55
  # list the standard configurations available
56
  print(f"Configs for feature extractors:\n{pformat(extract_features.confs)}")
 
68
  matcher_conf, sfm_pairs, feature_conf["output"], outputs
69
  )
70
 
71
+ triangulation.main(
72
+ reference_sfm, sift_sfm, images, sfm_pairs, features, sfm_matches
73
+ )
74
 
75
  global_descriptors = extract_features.main(retrieval_conf, images, outputs)
76
  pairs_from_retrieval.main(
hloc/pipelines/Aachen_v1_1/pipeline_loftr.py CHANGED
@@ -61,7 +61,9 @@ features, sfm_matches = match_dense.main(
61
  matcher_conf, sfm_pairs, images, outputs, max_kps=8192, overwrite=False
62
  )
63
 
64
- triangulation.main(reference_sfm, sift_sfm, images, sfm_pairs, features, sfm_matches)
 
 
65
 
66
  global_descriptors = extract_features.main(retrieval_conf, images, outputs)
67
  pairs_from_retrieval.main(
 
61
  matcher_conf, sfm_pairs, images, outputs, max_kps=8192, overwrite=False
62
  )
63
 
64
+ triangulation.main(
65
+ reference_sfm, sift_sfm, images, sfm_pairs, features, sfm_matches
66
+ )
67
 
68
  global_descriptors = extract_features.main(retrieval_conf, images, outputs)
69
  pairs_from_retrieval.main(
hloc/pipelines/CMU/pipeline.py CHANGED
@@ -46,20 +46,34 @@ def run_slice(slice_, root, outputs, num_covis, num_loc):
46
  matcher_conf = match_features.confs["superglue"]
47
 
48
  pairs_from_covisibility.main(sift_sfm, sfm_pairs, num_matched=num_covis)
49
- features = extract_features.main(feature_conf, ref_images, outputs, as_half=True)
 
 
50
  sfm_matches = match_features.main(
51
  matcher_conf, sfm_pairs, feature_conf["output"], outputs
52
  )
53
- triangulation.main(ref_sfm, sift_sfm, ref_images, sfm_pairs, features, sfm_matches)
 
 
54
 
55
  generate_query_list(root, query_list, slice_)
56
- global_descriptors = extract_features.main(retrieval_conf, ref_images, outputs)
57
- global_descriptors = extract_features.main(retrieval_conf, query_images, outputs)
 
 
 
 
58
  pairs_from_retrieval.main(
59
- global_descriptors, loc_pairs, num_loc, query_list=query_list, db_model=ref_sfm
 
 
 
 
60
  )
61
 
62
- features = extract_features.main(feature_conf, query_images, outputs, as_half=True)
 
 
63
  loc_matches = match_features.main(
64
  matcher_conf, loc_pairs, feature_conf["output"], outputs
65
  )
@@ -122,5 +136,9 @@ if __name__ == "__main__":
122
  for slice_ in slices:
123
  logger.info("Working on slice %s.", slice_)
124
  run_slice(
125
- f"slice{slice_}", args.dataset, args.outputs, args.num_covis, args.num_loc
 
 
 
 
126
  )
 
46
  matcher_conf = match_features.confs["superglue"]
47
 
48
  pairs_from_covisibility.main(sift_sfm, sfm_pairs, num_matched=num_covis)
49
+ features = extract_features.main(
50
+ feature_conf, ref_images, outputs, as_half=True
51
+ )
52
  sfm_matches = match_features.main(
53
  matcher_conf, sfm_pairs, feature_conf["output"], outputs
54
  )
55
+ triangulation.main(
56
+ ref_sfm, sift_sfm, ref_images, sfm_pairs, features, sfm_matches
57
+ )
58
 
59
  generate_query_list(root, query_list, slice_)
60
+ global_descriptors = extract_features.main(
61
+ retrieval_conf, ref_images, outputs
62
+ )
63
+ global_descriptors = extract_features.main(
64
+ retrieval_conf, query_images, outputs
65
+ )
66
  pairs_from_retrieval.main(
67
+ global_descriptors,
68
+ loc_pairs,
69
+ num_loc,
70
+ query_list=query_list,
71
+ db_model=ref_sfm,
72
  )
73
 
74
+ features = extract_features.main(
75
+ feature_conf, query_images, outputs, as_half=True
76
+ )
77
  loc_matches = match_features.main(
78
  matcher_conf, loc_pairs, feature_conf["output"], outputs
79
  )
 
136
  for slice_ in slices:
137
  logger.info("Working on slice %s.", slice_)
138
  run_slice(
139
+ f"slice{slice_}",
140
+ args.dataset,
141
+ args.outputs,
142
+ args.num_covis,
143
+ args.num_loc,
144
  )
hloc/pipelines/Cambridge/pipeline.py CHANGED
@@ -5,7 +5,13 @@ from .utils import create_query_list_with_intrinsics, scale_sfm_images, evaluate
5
  from ... import extract_features, match_features, pairs_from_covisibility
6
  from ... import triangulation, localize_sfm, pairs_from_retrieval, logger
7
 
8
- SCENES = ["KingsCollege", "OldHospital", "ShopFacade", "StMarysChurch", "GreatCourt"]
 
 
 
 
 
 
9
 
10
 
11
  def run_scene(images, gt_dir, outputs, results, num_covis, num_loc):
@@ -35,7 +41,11 @@ def run_scene(images, gt_dir, outputs, results, num_covis, num_loc):
35
  retrieval_conf = extract_features.confs["netvlad"]
36
 
37
  create_query_list_with_intrinsics(
38
- gt_dir / "empty_all", query_list, test_list, ext=".txt", image_dir=images
 
 
 
 
39
  )
40
  with open(test_list, "r") as f:
41
  query_seqs = {q.split("/")[0] for q in f.read().rstrip().split("\n")}
@@ -49,7 +59,9 @@ def run_scene(images, gt_dir, outputs, results, num_covis, num_loc):
49
  query_prefix=query_seqs,
50
  )
51
 
52
- features = extract_features.main(feature_conf, images, outputs, as_half=True)
 
 
53
  pairs_from_covisibility.main(ref_sfm_sift, sfm_pairs, num_matched=num_covis)
54
  sfm_matches = match_features.main(
55
  matcher_conf, sfm_pairs, feature_conf["output"], outputs
 
5
  from ... import extract_features, match_features, pairs_from_covisibility
6
  from ... import triangulation, localize_sfm, pairs_from_retrieval, logger
7
 
8
+ SCENES = [
9
+ "KingsCollege",
10
+ "OldHospital",
11
+ "ShopFacade",
12
+ "StMarysChurch",
13
+ "GreatCourt",
14
+ ]
15
 
16
 
17
  def run_scene(images, gt_dir, outputs, results, num_covis, num_loc):
 
41
  retrieval_conf = extract_features.confs["netvlad"]
42
 
43
  create_query_list_with_intrinsics(
44
+ gt_dir / "empty_all",
45
+ query_list,
46
+ test_list,
47
+ ext=".txt",
48
+ image_dir=images,
49
  )
50
  with open(test_list, "r") as f:
51
  query_seqs = {q.split("/")[0] for q in f.read().rstrip().split("\n")}
 
59
  query_prefix=query_seqs,
60
  )
61
 
62
+ features = extract_features.main(
63
+ feature_conf, images, outputs, as_half=True
64
+ )
65
  pairs_from_covisibility.main(ref_sfm_sift, sfm_pairs, num_matched=num_covis)
66
  sfm_matches = match_features.main(
67
  matcher_conf, sfm_pairs, feature_conf["output"], outputs
hloc/pipelines/Cambridge/utils.py CHANGED
@@ -42,7 +42,9 @@ def scale_sfm_images(full_model, scaled_model, image_dir):
42
  sy = h / camera.height
43
  assert sx == sy, (sx, sy)
44
  scaled_cameras[cam_id] = camera._replace(
45
- width=w, height=h, params=camera.params * np.array([sx, sx, sy, 1.0])
 
 
46
  )
47
 
48
  write_model(scaled_cameras, images, points3D, scaled_model)
 
42
  sy = h / camera.height
43
  assert sx == sy, (sx, sy)
44
  scaled_cameras[cam_id] = camera._replace(
45
+ width=w,
46
+ height=h,
47
+ params=camera.params * np.array([sx, sx, sy, 1.0]),
48
  )
49
 
50
  write_model(scaled_cameras, images, points3D, scaled_model)
hloc/pipelines/RobotCar/colmap_from_nvm.py CHANGED
@@ -16,11 +16,14 @@ from ...utils.read_write_model import write_model
16
  logger = logging.getLogger(__name__)
17
 
18
 
19
- def read_nvm_model(nvm_path, database_path, image_ids, camera_ids, skip_points=False):
20
-
 
21
  # Extract the intrinsics from the db file instead of the NVM model
22
  db = sqlite3.connect(str(database_path))
23
- ret = db.execute("SELECT camera_id, model, width, height, params FROM cameras;")
 
 
24
  cameras = {}
25
  for camera_id, camera_model, width, height, params in ret:
26
  params = np.fromstring(params, dtype=np.double).reshape(-1)
 
16
  logger = logging.getLogger(__name__)
17
 
18
 
19
+ def read_nvm_model(
20
+ nvm_path, database_path, image_ids, camera_ids, skip_points=False
21
+ ):
22
  # Extract the intrinsics from the db file instead of the NVM model
23
  db = sqlite3.connect(str(database_path))
24
+ ret = db.execute(
25
+ "SELECT camera_id, model, width, height, params FROM cameras;"
26
+ )
27
  cameras = {}
28
  for camera_id, camera_model, width, height, params in ret:
29
  params = np.fromstring(params, dtype=np.double).reshape(-1)
hloc/pipelines/RobotCar/pipeline.py CHANGED
@@ -79,7 +79,9 @@ sift_sfm = outputs / "sfm_sift"
79
  reference_sfm = outputs / "sfm_superpoint+superglue"
80
  sfm_pairs = outputs / f"pairs-db-covis{args.num_covis}.txt"
81
  loc_pairs = outputs / f"pairs-query-netvlad{args.num_loc}.txt"
82
- results = outputs / f"RobotCar_hloc_superpoint+superglue_netvlad{args.num_loc}.txt"
 
 
83
 
84
  # pick one of the configurations for extraction and matching
85
  retrieval_conf = extract_features.confs["netvlad"]
@@ -103,7 +105,9 @@ sfm_matches = match_features.main(
103
  matcher_conf, sfm_pairs, feature_conf["output"], outputs
104
  )
105
 
106
- triangulation.main(reference_sfm, sift_sfm, images, sfm_pairs, features, sfm_matches)
 
 
107
 
108
  global_descriptors = extract_features.main(retrieval_conf, images, outputs)
109
  # TODO: do per location and per camera
 
79
  reference_sfm = outputs / "sfm_superpoint+superglue"
80
  sfm_pairs = outputs / f"pairs-db-covis{args.num_covis}.txt"
81
  loc_pairs = outputs / f"pairs-query-netvlad{args.num_loc}.txt"
82
+ results = (
83
+ outputs / f"RobotCar_hloc_superpoint+superglue_netvlad{args.num_loc}.txt"
84
+ )
85
 
86
  # pick one of the configurations for extraction and matching
87
  retrieval_conf = extract_features.confs["netvlad"]
 
105
  matcher_conf, sfm_pairs, feature_conf["output"], outputs
106
  )
107
 
108
+ triangulation.main(
109
+ reference_sfm, sift_sfm, images, sfm_pairs, features, sfm_matches
110
+ )
111
 
112
  global_descriptors = extract_features.main(retrieval_conf, images, outputs)
113
  # TODO: do per location and per camera
hloc/utils/database.py CHANGED
@@ -100,7 +100,9 @@ CREATE_MATCHES_TABLE = """CREATE TABLE IF NOT EXISTS matches (
100
  cols INTEGER NOT NULL,
101
  data BLOB)"""
102
 
103
- CREATE_NAME_INDEX = "CREATE UNIQUE INDEX IF NOT EXISTS index_name ON images(name)"
 
 
104
 
105
  CREATE_ALL = "; ".join(
106
  [
@@ -150,20 +152,34 @@ class COLMAPDatabase(sqlite3.Connection):
150
  super(COLMAPDatabase, self).__init__(*args, **kwargs)
151
 
152
  self.create_tables = lambda: self.executescript(CREATE_ALL)
153
- self.create_cameras_table = lambda: self.executescript(CREATE_CAMERAS_TABLE)
 
 
154
  self.create_descriptors_table = lambda: self.executescript(
155
  CREATE_DESCRIPTORS_TABLE
156
  )
157
- self.create_images_table = lambda: self.executescript(CREATE_IMAGES_TABLE)
 
 
158
  self.create_two_view_geometries_table = lambda: self.executescript(
159
  CREATE_TWO_VIEW_GEOMETRIES_TABLE
160
  )
161
- self.create_keypoints_table = lambda: self.executescript(CREATE_KEYPOINTS_TABLE)
162
- self.create_matches_table = lambda: self.executescript(CREATE_MATCHES_TABLE)
 
 
 
 
163
  self.create_name_index = lambda: self.executescript(CREATE_NAME_INDEX)
164
 
165
  def add_camera(
166
- self, model, width, height, params, prior_focal_length=False, camera_id=None
 
 
 
 
 
 
167
  ):
168
  params = np.asarray(params, np.float64)
169
  cursor = self.execute(
@@ -298,7 +314,12 @@ def example_usage():
298
 
299
  # Create dummy cameras.
300
 
301
- model1, width1, height1, params1 = 0, 1024, 768, np.array((1024.0, 512.0, 384.0))
 
 
 
 
 
302
  model2, width2, height2, params2 = (
303
  2,
304
  1024,
 
100
  cols INTEGER NOT NULL,
101
  data BLOB)"""
102
 
103
+ CREATE_NAME_INDEX = (
104
+ "CREATE UNIQUE INDEX IF NOT EXISTS index_name ON images(name)"
105
+ )
106
 
107
  CREATE_ALL = "; ".join(
108
  [
 
152
  super(COLMAPDatabase, self).__init__(*args, **kwargs)
153
 
154
  self.create_tables = lambda: self.executescript(CREATE_ALL)
155
+ self.create_cameras_table = lambda: self.executescript(
156
+ CREATE_CAMERAS_TABLE
157
+ )
158
  self.create_descriptors_table = lambda: self.executescript(
159
  CREATE_DESCRIPTORS_TABLE
160
  )
161
+ self.create_images_table = lambda: self.executescript(
162
+ CREATE_IMAGES_TABLE
163
+ )
164
  self.create_two_view_geometries_table = lambda: self.executescript(
165
  CREATE_TWO_VIEW_GEOMETRIES_TABLE
166
  )
167
+ self.create_keypoints_table = lambda: self.executescript(
168
+ CREATE_KEYPOINTS_TABLE
169
+ )
170
+ self.create_matches_table = lambda: self.executescript(
171
+ CREATE_MATCHES_TABLE
172
+ )
173
  self.create_name_index = lambda: self.executescript(CREATE_NAME_INDEX)
174
 
175
  def add_camera(
176
+ self,
177
+ model,
178
+ width,
179
+ height,
180
+ params,
181
+ prior_focal_length=False,
182
+ camera_id=None,
183
  ):
184
  params = np.asarray(params, np.float64)
185
  cursor = self.execute(
 
314
 
315
  # Create dummy cameras.
316
 
317
+ model1, width1, height1, params1 = (
318
+ 0,
319
+ 1024,
320
+ 768,
321
+ np.array((1024.0, 512.0, 384.0)),
322
+ )
323
  model2, width2, height2, params2 = (
324
  2,
325
  1024,
hloc/utils/geometry.py CHANGED
@@ -16,12 +16,12 @@ def compute_epipolar_errors(qvec_r2t, tvec_r2t, p2d_r, p2d_t):
16
  E = vector_to_cross_product_matrix(T_r2t[:3, -1]) @ T_r2t[:3, :3]
17
  l2d_r2t = (E @ to_homogeneous(p2d_r).T).T
18
  l2d_t2r = (E.T @ to_homogeneous(p2d_t).T).T
19
- errors_r = np.abs(np.sum(to_homogeneous(p2d_r) * l2d_t2r, axis=1)) / np.linalg.norm(
20
- l2d_t2r[:, :2], axis=1
21
- )
22
- errors_t = np.abs(np.sum(to_homogeneous(p2d_t) * l2d_r2t, axis=1)) / np.linalg.norm(
23
- l2d_r2t[:, :2], axis=1
24
- )
25
  return E, errors_r, errors_t
26
 
27
 
 
16
  E = vector_to_cross_product_matrix(T_r2t[:3, -1]) @ T_r2t[:3, :3]
17
  l2d_r2t = (E @ to_homogeneous(p2d_r).T).T
18
  l2d_t2r = (E.T @ to_homogeneous(p2d_t).T).T
19
+ errors_r = np.abs(
20
+ np.sum(to_homogeneous(p2d_r) * l2d_t2r, axis=1)
21
+ ) / np.linalg.norm(l2d_t2r[:, :2], axis=1)
22
+ errors_t = np.abs(
23
+ np.sum(to_homogeneous(p2d_t) * l2d_r2t, axis=1)
24
+ ) / np.linalg.norm(l2d_r2t[:, :2], axis=1)
25
  return E, errors_r, errors_t
26
 
27
 
hloc/utils/read_write_model.py CHANGED
@@ -42,7 +42,9 @@ logger = logging.getLogger(__name__)
42
  CameraModel = collections.namedtuple(
43
  "CameraModel", ["model_id", "model_name", "num_params"]
44
  )
45
- Camera = collections.namedtuple("Camera", ["id", "model", "width", "height", "params"])
 
 
46
  BaseImage = collections.namedtuple(
47
  "Image", ["id", "qvec", "tvec", "camera_id", "name", "xys", "point3D_ids"]
48
  )
@@ -126,7 +128,11 @@ def read_cameras_text(path):
126
  height = int(elems[3])
127
  params = np.array(tuple(map(float, elems[4:])))
128
  cameras[camera_id] = Camera(
129
- id=camera_id, model=model, width=width, height=height, params=params
 
 
 
 
130
  )
131
  return cameras
132
 
@@ -151,7 +157,9 @@ def read_cameras_binary(path_to_model_file):
151
  height = camera_properties[3]
152
  num_params = CAMERA_MODEL_IDS[model_id].num_params
153
  params = read_next_bytes(
154
- fid, num_bytes=8 * num_params, format_char_sequence="d" * num_params
 
 
155
  )
156
  cameras[camera_id] = Camera(
157
  id=camera_id,
@@ -222,7 +230,10 @@ def read_images_text(path):
222
  image_name = elems[9]
223
  elems = fid.readline().split()
224
  xys = np.column_stack(
225
- [tuple(map(float, elems[0::3])), tuple(map(float, elems[1::3]))]
 
 
 
226
  )
227
  point3D_ids = np.array(tuple(map(int, elems[2::3])))
228
  images[image_id] = Image(
@@ -259,16 +270,19 @@ def read_images_binary(path_to_model_file):
259
  while current_char != b"\x00": # look for the ASCII 0 entry
260
  image_name += current_char.decode("utf-8")
261
  current_char = read_next_bytes(fid, 1, "c")[0]
262
- num_points2D = read_next_bytes(fid, num_bytes=8, format_char_sequence="Q")[
263
- 0
264
- ]
265
  x_y_id_s = read_next_bytes(
266
  fid,
267
  num_bytes=24 * num_points2D,
268
  format_char_sequence="ddq" * num_points2D,
269
  )
270
  xys = np.column_stack(
271
- [tuple(map(float, x_y_id_s[0::3])), tuple(map(float, x_y_id_s[1::3]))]
 
 
 
272
  )
273
  point3D_ids = np.array(tuple(map(int, x_y_id_s[2::3])))
274
  images[image_id] = Image(
@@ -307,7 +321,13 @@ def write_images_text(images, path):
307
  with open(path, "w") as fid:
308
  fid.write(HEADER)
309
  for _, img in images.items():
310
- image_header = [img.id, *img.qvec, *img.tvec, img.camera_id, img.name]
 
 
 
 
 
 
311
  first_line = " ".join(map(str, image_header))
312
  fid.write(first_line + "\n")
313
 
@@ -387,9 +407,9 @@ def read_points3D_binary(path_to_model_file):
387
  xyz = np.array(binary_point_line_properties[1:4])
388
  rgb = np.array(binary_point_line_properties[4:7])
389
  error = np.array(binary_point_line_properties[7])
390
- track_length = read_next_bytes(fid, num_bytes=8, format_char_sequence="Q")[
391
- 0
392
- ]
393
  track_elems = read_next_bytes(
394
  fid,
395
  num_bytes=8 * track_length,
@@ -478,8 +498,12 @@ def read_model(path, ext=""):
478
  ext = ".txt"
479
  else:
480
  try:
481
- cameras, images, points3D = read_model(os.path.join(path, "model/"))
482
- logger.warning("This SfM file structure was deprecated in hloc v1.1")
 
 
 
 
483
  return cameras, images, points3D
484
  except FileNotFoundError:
485
  raise FileNotFoundError(
@@ -571,7 +595,9 @@ def main():
571
  )
572
  args = parser.parse_args()
573
 
574
- cameras, images, points3D = read_model(path=args.input_model, ext=args.input_format)
 
 
575
 
576
  print("num_cameras:", len(cameras))
577
  print("num_images:", len(images))
@@ -579,7 +605,11 @@ def main():
579
 
580
  if args.output_model is not None:
581
  write_model(
582
- cameras, images, points3D, path=args.output_model, ext=args.output_format
 
 
 
 
583
  )
584
 
585
 
 
42
  CameraModel = collections.namedtuple(
43
  "CameraModel", ["model_id", "model_name", "num_params"]
44
  )
45
+ Camera = collections.namedtuple(
46
+ "Camera", ["id", "model", "width", "height", "params"]
47
+ )
48
  BaseImage = collections.namedtuple(
49
  "Image", ["id", "qvec", "tvec", "camera_id", "name", "xys", "point3D_ids"]
50
  )
 
128
  height = int(elems[3])
129
  params = np.array(tuple(map(float, elems[4:])))
130
  cameras[camera_id] = Camera(
131
+ id=camera_id,
132
+ model=model,
133
+ width=width,
134
+ height=height,
135
+ params=params,
136
  )
137
  return cameras
138
 
 
157
  height = camera_properties[3]
158
  num_params = CAMERA_MODEL_IDS[model_id].num_params
159
  params = read_next_bytes(
160
+ fid,
161
+ num_bytes=8 * num_params,
162
+ format_char_sequence="d" * num_params,
163
  )
164
  cameras[camera_id] = Camera(
165
  id=camera_id,
 
230
  image_name = elems[9]
231
  elems = fid.readline().split()
232
  xys = np.column_stack(
233
+ [
234
+ tuple(map(float, elems[0::3])),
235
+ tuple(map(float, elems[1::3])),
236
+ ]
237
  )
238
  point3D_ids = np.array(tuple(map(int, elems[2::3])))
239
  images[image_id] = Image(
 
270
  while current_char != b"\x00": # look for the ASCII 0 entry
271
  image_name += current_char.decode("utf-8")
272
  current_char = read_next_bytes(fid, 1, "c")[0]
273
+ num_points2D = read_next_bytes(
274
+ fid, num_bytes=8, format_char_sequence="Q"
275
+ )[0]
276
  x_y_id_s = read_next_bytes(
277
  fid,
278
  num_bytes=24 * num_points2D,
279
  format_char_sequence="ddq" * num_points2D,
280
  )
281
  xys = np.column_stack(
282
+ [
283
+ tuple(map(float, x_y_id_s[0::3])),
284
+ tuple(map(float, x_y_id_s[1::3])),
285
+ ]
286
  )
287
  point3D_ids = np.array(tuple(map(int, x_y_id_s[2::3])))
288
  images[image_id] = Image(
 
321
  with open(path, "w") as fid:
322
  fid.write(HEADER)
323
  for _, img in images.items():
324
+ image_header = [
325
+ img.id,
326
+ *img.qvec,
327
+ *img.tvec,
328
+ img.camera_id,
329
+ img.name,
330
+ ]
331
  first_line = " ".join(map(str, image_header))
332
  fid.write(first_line + "\n")
333
 
 
407
  xyz = np.array(binary_point_line_properties[1:4])
408
  rgb = np.array(binary_point_line_properties[4:7])
409
  error = np.array(binary_point_line_properties[7])
410
+ track_length = read_next_bytes(
411
+ fid, num_bytes=8, format_char_sequence="Q"
412
+ )[0]
413
  track_elems = read_next_bytes(
414
  fid,
415
  num_bytes=8 * track_length,
 
498
  ext = ".txt"
499
  else:
500
  try:
501
+ cameras, images, points3D = read_model(
502
+ os.path.join(path, "model/")
503
+ )
504
+ logger.warning(
505
+ "This SfM file structure was deprecated in hloc v1.1"
506
+ )
507
  return cameras, images, points3D
508
  except FileNotFoundError:
509
  raise FileNotFoundError(
 
595
  )
596
  args = parser.parse_args()
597
 
598
+ cameras, images, points3D = read_model(
599
+ path=args.input_model, ext=args.input_format
600
+ )
601
 
602
  print("num_cameras:", len(cameras))
603
  print("num_images:", len(images))
 
605
 
606
  if args.output_model is not None:
607
  write_model(
608
+ cameras,
609
+ images,
610
+ points3D,
611
+ path=args.output_model,
612
+ ext=args.output_format,
613
  )
614
 
615
 
hloc/utils/viz.py CHANGED
@@ -19,7 +19,9 @@ def cm_RdGn(x):
19
  return np.clip(c, 0, 1)
20
 
21
 
22
- def plot_images(imgs, titles=None, cmaps="gray", dpi=100, pad=0.5, adaptive=True):
 
 
23
  """Plot a set of images horizontally.
24
  Args:
25
  imgs: a list of NumPy or PyTorch images, RGB (H, W, 3) or mono (H, W).
@@ -129,7 +131,13 @@ def add_text(
129
  ):
130
  ax = plt.gcf().axes[idx]
131
  t = ax.text(
132
- *pos, text, fontsize=fs, ha=ha, va=va, color=color, transform=ax.transAxes
 
 
 
 
 
 
133
  )
134
  if lcolor is not None:
135
  t.set_path_effects(
 
19
  return np.clip(c, 0, 1)
20
 
21
 
22
+ def plot_images(
23
+ imgs, titles=None, cmaps="gray", dpi=100, pad=0.5, adaptive=True
24
+ ):
25
  """Plot a set of images horizontally.
26
  Args:
27
  imgs: a list of NumPy or PyTorch images, RGB (H, W, 3) or mono (H, W).
 
131
  ):
132
  ax = plt.gcf().axes[idx]
133
  t = ax.text(
134
+ *pos,
135
+ text,
136
+ fontsize=fs,
137
+ ha=ha,
138
+ va=va,
139
+ color=color,
140
+ transform=ax.transAxes
141
  )
142
  if lcolor is not None:
143
  t.set_path_effects(
hloc/utils/viz_3d.py CHANGED
@@ -46,7 +46,9 @@ def init_figure(height: int = 800) -> go.Figure:
46
  dragmode="orbit",
47
  ),
48
  margin=dict(l=0, r=0, b=0, t=0, pad=0),
49
- legend=dict(orientation="h", yanchor="top", y=0.99, xanchor="left", x=0.1),
 
 
50
  )
51
  return fig
52
 
@@ -68,7 +70,9 @@ def plot_points(
68
  mode="markers",
69
  name=name,
70
  legendgroup=name,
71
- marker=dict(size=ps, color=color, line_width=0.0, colorscale=colorscale),
 
 
72
  )
73
  fig.add_trace(tr)
74
 
@@ -162,7 +166,9 @@ def plot_camera_colmap(
162
  )
163
 
164
 
165
- def plot_cameras(fig: go.Figure, reconstruction: pycolmap.Reconstruction, **kwargs):
 
 
166
  """Plot a camera as a cone with camera frustum."""
167
  for image_id, image in reconstruction.images.items():
168
  plot_camera_colmap(
 
46
  dragmode="orbit",
47
  ),
48
  margin=dict(l=0, r=0, b=0, t=0, pad=0),
49
+ legend=dict(
50
+ orientation="h", yanchor="top", y=0.99, xanchor="left", x=0.1
51
+ ),
52
  )
53
  return fig
54
 
 
70
  mode="markers",
71
  name=name,
72
  legendgroup=name,
73
+ marker=dict(
74
+ size=ps, color=color, line_width=0.0, colorscale=colorscale
75
+ ),
76
  )
77
  fig.add_trace(tr)
78
 
 
166
  )
167
 
168
 
169
+ def plot_cameras(
170
+ fig: go.Figure, reconstruction: pycolmap.Reconstruction, **kwargs
171
+ ):
172
  """Plot a camera as a cone with camera frustum."""
173
  for image_id, image in reconstruction.images.items():
174
  plot_camera_colmap(