hugoycj commited on
Commit
6af8c80
1 Parent(s): 78dc292

Remove dependencies on hloc

Browse files
Files changed (2) hide show
  1. app.py +2 -20
  2. util/match_extraction.py +0 -175
app.py CHANGED
@@ -17,7 +17,6 @@ from functools import partial
17
  import matplotlib.pyplot as plt
18
  import shutil
19
  from util.utils import seed_all_random_engines
20
- from util.match_extraction import extract_match
21
  from util.load_img_folder import load_and_preprocess_images
22
  from util.geometry_guided_sampling import geometry_guided_sampling
23
  from pytorch3d.vis.plotly_vis import get_camera_wireframe
@@ -196,25 +195,8 @@ def estimate_images_pose(image_folder, mode) -> None:
196
  start_time = time.time()
197
 
198
  # Perform match extraction
199
- if cfg.GGS.enable:
200
- # Optional TODO: remove the keypoints outside the cropped region?
201
-
202
- kp1, kp2, i12 = extract_match(image_folder, image_info)
203
-
204
- keys = ["kp1", "kp2", "i12", "img_shape"]
205
- values = [kp1, kp2, i12, images.shape]
206
- matches_dict = dict(zip(keys, values))
207
-
208
- cfg.GGS.pose_encoding_type = cfg.MODEL.pose_encoding_type
209
- GGS_cfg = OmegaConf.to_container(cfg.GGS)
210
-
211
- cond_fn = partial(
212
- geometry_guided_sampling, matches_dict=matches_dict, GGS_cfg=GGS_cfg
213
- )
214
- print("=====> Sampling with GGS <=====")
215
- else:
216
- cond_fn = None
217
- print("=====> Sampling without GGS <=====")
218
 
219
  # Forward
220
  with torch.no_grad():
 
17
  import matplotlib.pyplot as plt
18
  import shutil
19
  from util.utils import seed_all_random_engines
 
20
  from util.load_img_folder import load_and_preprocess_images
21
  from util.geometry_guided_sampling import geometry_guided_sampling
22
  from pytorch3d.vis.plotly_vis import get_camera_wireframe
 
195
  start_time = time.time()
196
 
197
  # Perform match extraction
198
+ cond_fn = None
199
+ print("=====> Sampling without GGS <=====")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
200
 
201
  # Forward
202
  with torch.no_grad():
util/match_extraction.py DELETED
@@ -1,175 +0,0 @@
1
- # Copyright (c) Meta Platforms, Inc. and affiliates.
2
- # All rights reserved.
3
- #
4
- # This source code is licensed under the license found in the
5
- # LICENSE file in the root directory of this source tree.
6
-
7
- import os
8
- import shutil
9
- import tempfile
10
- from pathlib import Path
11
-
12
- import numpy as np
13
- import pycolmap
14
- from typing import Optional, List, Dict, Any
15
- from hloc import (
16
- extract_features,
17
- logger,
18
- match_features,
19
- pairs_from_exhaustive,
20
- )
21
- from hloc.triangulation import (
22
- import_features,
23
- import_matches,
24
- estimation_and_geometric_verification,
25
- parse_option_args,
26
- OutputCapture,
27
- )
28
- from hloc.utils.database import (
29
- COLMAPDatabase,
30
- image_ids_to_pair_id,
31
- pair_id_to_image_ids,
32
- )
33
- from hloc.reconstruction import create_empty_db, import_images, get_image_ids
34
-
35
-
36
- def extract_match(image_folder_path: str, image_info: Dict):
37
- # Now only supports SPSG
38
- with tempfile.TemporaryDirectory() as tmpdir:
39
- tmp_mapping = os.path.join(tmpdir, "mapping")
40
- os.makedirs(tmp_mapping)
41
- for filename in os.listdir(image_folder_path):
42
- if filename.lower().endswith(
43
- (".jpg", ".jpeg", ".png", ".bmp", ".gif", ".tiff")
44
- ):
45
- shutil.copy(
46
- os.path.join(image_folder_path, filename),
47
- os.path.join(tmp_mapping, filename),
48
- )
49
- matches, keypoints = run_hloc(tmpdir)
50
-
51
- # From the format of colmap to PyTorch3D
52
- kp1, kp2, i12 = colmap_keypoint_to_pytorch3d(matches, keypoints, image_info)
53
-
54
- return kp1, kp2, i12
55
-
56
-
57
- def colmap_keypoint_to_pytorch3d(matches, keypoints, image_info):
58
- kp1, kp2, i12 = [], [], []
59
- bbox_xyxy, scale = image_info["bboxes_xyxy"], image_info["resized_scales"]
60
-
61
- for idx in keypoints:
62
- # coordinate change from COLMAP to OpenCV
63
- cur_keypoint = keypoints[idx] - 0.5
64
-
65
- # go to the coordiante after cropping
66
- # use idx - 1 here because the COLMAP format starts from 1 instead of 0
67
- cur_keypoint = cur_keypoint - [
68
- bbox_xyxy[idx - 1][0],
69
- bbox_xyxy[idx - 1][1],
70
- ]
71
- cur_keypoint = cur_keypoint * scale[idx - 1]
72
- keypoints[idx] = cur_keypoint
73
-
74
- for (r_idx, q_idx), pair_match in matches.items():
75
- if pair_match is not None:
76
- kp1.append(keypoints[r_idx][pair_match[:, 0]])
77
- kp2.append(keypoints[q_idx][pair_match[:, 1]])
78
-
79
- i12_pair = np.array([[r_idx - 1, q_idx - 1]])
80
- i12.append(np.repeat(i12_pair, len(pair_match), axis=0))
81
-
82
- if kp1:
83
- kp1, kp2, i12 = map(np.concatenate, (kp1, kp2, i12), (0, 0, 0))
84
- else:
85
- kp1 = kp2 = i12 = None
86
-
87
- return kp1, kp2, i12
88
-
89
-
90
- def run_hloc(output_dir: str):
91
- # learned from
92
- # https://github.com/cvg/Hierarchical-Localization/blob/master/pipeline_SfM.ipynb
93
-
94
- images = Path(output_dir)
95
- outputs = Path(os.path.join(output_dir, "output"))
96
- sfm_pairs = outputs / "pairs-sfm.txt"
97
- sfm_dir = outputs / "sfm"
98
- features = outputs / "features.h5"
99
- matches = outputs / "matches.h5"
100
-
101
- feature_conf = extract_features.confs[
102
- "superpoint_inloc"
103
- ] # or superpoint_max
104
- matcher_conf = match_features.confs["superpoint+lightglue"]
105
-
106
- references = [
107
- p.relative_to(images).as_posix()
108
- for p in (images / "mapping/").iterdir()
109
- ]
110
-
111
- extract_features.main(
112
- feature_conf, images, image_list=references, feature_path=features
113
- )
114
- pairs_from_exhaustive.main(sfm_pairs, image_list=references)
115
- match_features.main(
116
- matcher_conf, sfm_pairs, features=features, matches=matches
117
- )
118
-
119
- matches, keypoints = compute_matches_and_keypoints(
120
- sfm_dir, images, sfm_pairs, features, matches, image_list=references
121
- )
122
-
123
- return matches, keypoints
124
-
125
-
126
- def compute_matches_and_keypoints(
127
- sfm_dir: Path,
128
- image_dir: Path,
129
- pairs: Path,
130
- features: Path,
131
- matches: Path,
132
- camera_mode: pycolmap.CameraMode = pycolmap.CameraMode.AUTO,
133
- verbose: bool = False,
134
- min_match_score: Optional[float] = None,
135
- image_list: Optional[List[str]] = None,
136
- image_options: Optional[Dict[str, Any]] = None,
137
- ) -> pycolmap.Reconstruction:
138
- # learned from
139
- # https://github.com/cvg/Hierarchical-Localization/blob/master/hloc/reconstruction.py
140
-
141
- sfm_dir.mkdir(parents=True, exist_ok=True)
142
- database = sfm_dir / "database.db"
143
-
144
- create_empty_db(database)
145
- import_images(image_dir, database, camera_mode, image_list, image_options)
146
- image_ids = get_image_ids(database)
147
- import_features(image_ids, database, features)
148
- import_matches(image_ids, database, pairs, matches, min_match_score)
149
- estimation_and_geometric_verification(database, pairs, verbose)
150
-
151
- db = COLMAPDatabase.connect(database)
152
-
153
- matches = dict(
154
- (
155
- pair_id_to_image_ids(pair_id),
156
- _blob_to_array_safe(data, np.uint32, (-1, 2)),
157
- )
158
- for pair_id, data in db.execute("SELECT pair_id, data FROM matches")
159
- )
160
-
161
- keypoints = dict(
162
- (image_id, _blob_to_array_safe(data, np.float32, (-1, 2)))
163
- for image_id, data in db.execute("SELECT image_id, data FROM keypoints")
164
- )
165
-
166
- db.close()
167
-
168
- return matches, keypoints
169
-
170
-
171
- def _blob_to_array_safe(blob, dtype, shape=(-1,)):
172
- if blob is not None:
173
- return np.fromstring(blob, dtype=dtype).reshape(*shape)
174
- else:
175
- return blob