Spaces:
Running
Running
File size: 6,157 Bytes
4c88343 57c1094 4c88343 57c1094 4c88343 57c1094 4c88343 57c1094 4c88343 57c1094 4c88343 57c1094 4c88343 57c1094 4c88343 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 |
import pickle
import random
import numpy as np
import pycolmap
from matplotlib import cm
from .utils.io import read_image
from .utils.viz import (
add_text,
cm_RdGn,
plot_images,
plot_keypoints,
plot_matches,
)
def visualize_sfm_2d(
reconstruction,
image_dir,
color_by="visibility",
selected=[],
n=1,
seed=0,
dpi=75,
):
assert image_dir.exists()
if not isinstance(reconstruction, pycolmap.Reconstruction):
reconstruction = pycolmap.Reconstruction(reconstruction)
if not selected:
image_ids = reconstruction.reg_image_ids()
selected = random.Random(seed).sample(image_ids, min(n, len(image_ids)))
for i in selected:
image = reconstruction.images[i]
keypoints = np.array([p.xy for p in image.points2D])
visible = np.array([p.has_point3D() for p in image.points2D])
if color_by == "visibility":
color = [(0, 0, 1) if v else (1, 0, 0) for v in visible]
text = f"visible: {np.count_nonzero(visible)}/{len(visible)}"
elif color_by == "track_length":
tl = np.array(
[
(
reconstruction.points3D[p.point3D_id].track.length()
if p.has_point3D()
else 1
)
for p in image.points2D
]
)
max_, med_ = np.max(tl), np.median(tl[tl > 1])
tl = np.log(tl)
color = cm.jet(tl / tl.max()).tolist()
text = f"max/median track length: {max_}/{med_}"
elif color_by == "depth":
p3ids = [p.point3D_id for p in image.points2D if p.has_point3D()]
z = np.array(
[
(image.cam_from_world * reconstruction.points3D[j].xyz)[-1]
for j in p3ids
]
)
z -= z.min()
color = cm.jet(z / np.percentile(z, 99.9))
text = f"visible: {np.count_nonzero(visible)}/{len(visible)}"
keypoints = keypoints[visible]
else:
raise NotImplementedError(f"Coloring not implemented: {color_by}.")
name = image.name
fig = plot_images([read_image(image_dir / name)], dpi=dpi)
plot_keypoints([keypoints], colors=[color], ps=4)
add_text(0, text)
add_text(0, name, pos=(0.01, 0.01), fs=5, lcolor=None, va="bottom")
return fig
def visualize_loc(
results,
image_dir,
reconstruction=None,
db_image_dir=None,
selected=[],
n=1,
seed=0,
prefix=None,
**kwargs,
):
assert image_dir.exists()
with open(str(results) + "_logs.pkl", "rb") as f:
logs = pickle.load(f)
if not selected:
queries = list(logs["loc"].keys())
if prefix:
queries = [q for q in queries if q.startswith(prefix)]
selected = random.Random(seed).sample(queries, min(n, len(queries)))
if reconstruction is not None:
if not isinstance(reconstruction, pycolmap.Reconstruction):
reconstruction = pycolmap.Reconstruction(reconstruction)
for qname in selected:
loc = logs["loc"][qname]
visualize_loc_from_log(
image_dir, qname, loc, reconstruction, db_image_dir, **kwargs
)
def visualize_loc_from_log(
image_dir,
query_name,
loc,
reconstruction=None,
db_image_dir=None,
top_k_db=2,
dpi=75,
):
q_image = read_image(image_dir / query_name)
if loc.get("covisibility_clustering", False):
# select the first, largest cluster if the localization failed
loc = loc["log_clusters"][loc["best_cluster"] or 0]
inliers = np.array(loc["PnP_ret"]["inliers"])
mkp_q = loc["keypoints_query"]
n = len(loc["db"])
if reconstruction is not None:
# for each pair of query keypoint and its matched 3D point,
# we need to find its corresponding keypoint in each database image
# that observes it. We also count the number of inliers in each.
kp_idxs, kp_to_3D_to_db = loc["keypoint_index_to_db"]
counts = np.zeros(n)
dbs_kp_q_db = [[] for _ in range(n)]
inliers_dbs = [[] for _ in range(n)]
for i, (inl, (p3D_id, db_idxs)) in enumerate(
zip(inliers, kp_to_3D_to_db)
):
track = reconstruction.points3D[p3D_id].track
track = {el.image_id: el.point2D_idx for el in track.elements}
for db_idx in db_idxs:
counts[db_idx] += inl
kp_db = track[loc["db"][db_idx]]
dbs_kp_q_db[db_idx].append((i, kp_db))
inliers_dbs[db_idx].append(inl)
else:
# for inloc the database keypoints are already in the logs
assert "keypoints_db" in loc
assert "indices_db" in loc
counts = np.array(
[np.sum(loc["indices_db"][inliers] == i) for i in range(n)]
)
# display the database images with the most inlier matches
db_sort = np.argsort(-counts)
for db_idx in db_sort[:top_k_db]:
if reconstruction is not None:
db = reconstruction.images[loc["db"][db_idx]]
db_name = db.name
db_kp_q_db = np.array(dbs_kp_q_db[db_idx])
kp_q = mkp_q[db_kp_q_db[:, 0]]
kp_db = np.array([db.points2D[i].xy for i in db_kp_q_db[:, 1]])
inliers_db = inliers_dbs[db_idx]
else:
db_name = loc["db"][db_idx]
kp_q = mkp_q[loc["indices_db"] == db_idx]
kp_db = loc["keypoints_db"][loc["indices_db"] == db_idx]
inliers_db = inliers[loc["indices_db"] == db_idx]
db_image = read_image((db_image_dir or image_dir) / db_name)
color = cm_RdGn(inliers_db).tolist()
text = f"inliers: {sum(inliers_db)}/{len(inliers_db)}"
plot_images([q_image, db_image], dpi=dpi)
plot_matches(kp_q, kp_db, color, a=0.1)
add_text(0, text)
opts = dict(pos=(0.01, 0.01), fs=5, lcolor=None, va="bottom")
add_text(0, query_name, **opts)
add_text(1, db_name, **opts)
|