|
import cv2 |
|
import open3d |
|
import os |
|
import matplotlib.pyplot as plt |
|
from PIL import Image |
|
import json |
|
|
|
from misc.utils import parse_camera_info |
|
from sem_seg_utils import * |
|
from visualize_3d import visualize_wireframe |
|
|
|
class PointCloudReaderPerspective(): |
|
|
|
def __init__(self, path, resolution="full", random_level=0, generate_color=False, generate_normal=False, |
|
generate_segmentation=False): |
|
perspective_str = "perspective" |
|
self.path = path |
|
self.random_level = random_level |
|
self.resolution = resolution |
|
self.generate_color = generate_color |
|
self.generate_normal = generate_normal |
|
self.generate_segmentation = generate_segmentation |
|
sections = sorted([p for p in os.listdir(os.path.join(path, "2D_rendering"))]) |
|
|
|
sections_views = [sorted(os.listdir(os.path.join(*[path, "2D_rendering", p, perspective_str, self.resolution]))) \ |
|
if os.path.isdir(os.path.join(*[path, "2D_rendering", p, perspective_str, self.resolution])) \ |
|
else [] \ |
|
for p in sections] |
|
|
|
self.depth_paths = [] |
|
self.rgb_paths = [] |
|
self.seg_paths = [] |
|
self.normal_paths = [] |
|
self.pose_paths = [] |
|
for p, views in zip(sections, sections_views): |
|
if not os.path.isdir(os.path.join(*[path, "2D_rendering", p, perspective_str, self.resolution])): |
|
continue |
|
|
|
self.depth_paths += [os.path.join(*[path, "2D_rendering", p, perspective_str, self.resolution, v, "depth.png"]) for v in views] |
|
self.rgb_paths += [os.path.join(*[path, "2D_rendering", p, perspective_str, self.resolution, v, "rgb_rawlight.png"]) for v in views] |
|
self.seg_paths += [os.path.join(*[path, "2D_rendering", p, perspective_str, self.resolution, v, "semantic.png"]) for v in views] |
|
self.normal_paths += [os.path.join(*[path, "2D_rendering", p, perspective_str, self.resolution, v, "normal.png"]) for v in views] |
|
self.pose_paths += [os.path.join(*[path, "2D_rendering", p, perspective_str, self.resolution, v, "camera_pose.txt"]) for v in views] |
|
|
|
self.point_cloud = self.generate_point_cloud(self.random_level, color=self.generate_color, |
|
normal=self.generate_normal, |
|
seg=self.generate_segmentation) |
|
|
|
|
|
def read_camera_center(self): |
|
camera_centers = [] |
|
print(self.camera_paths) |
|
print(self.depth_paths) |
|
for i in range(len(self.camera_paths)): |
|
with open(self.camera_paths[i], 'r') as f: |
|
line = f.readline() |
|
center = list(map(float, line.strip().split(" "))) |
|
camera_centers.append(np.asarray([center[0], center[1], center[2]])) |
|
print(camera_centers) |
|
return camera_centers |
|
|
|
def generate_point_cloud(self, random_level=0, color=False, normal=False, seg=False): |
|
coords = [] |
|
colors = [] |
|
segmentations = [] |
|
normals = [] |
|
points = {} |
|
|
|
|
|
for i in range(len(self.depth_paths)): |
|
print(i) |
|
|
|
W, H = (1280, 720) |
|
depth_img = cv2.imread(self.depth_paths[i], cv2.IMREAD_ANYDEPTH | cv2.IMREAD_ANYCOLOR) / 1000. |
|
inv_depth_mask = depth_img < .2 |
|
depth_img[inv_depth_mask] = .2 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
camera_pose = np.loadtxt(self.pose_paths[i]) |
|
rot, trans, K = parse_camera_info(camera_pose, H, W, inverse=True) |
|
|
|
pose = np.eye(4) |
|
pose[:3, :3] = rot |
|
pose[:3, 3] = trans / 1000. |
|
inv_pose = np.linalg.inv(pose) |
|
|
|
xs, ys = np.meshgrid(range(W), range(H), indexing='xy') |
|
|
|
|
|
|
|
|
|
|
|
if color: |
|
rgb_img = cv2.imread(self.rgb_paths[i]) |
|
rgb_img = cv2.cvtColor(rgb_img, cv2.COLOR_BGR2RGB) |
|
|
|
if seg: |
|
seg_img = Image.open(self.seg_paths[i]) |
|
|
|
seg_labels = np.array(seg_img.convert(mode="P", palette=create_color_palette())) |
|
|
|
def seg_grad(seg1): |
|
|
|
dx = np.abs(seg1[:, 2:] - seg1[:, :-2]) |
|
dy = np.abs(seg1[2:, :] - seg1[:-2, :]) |
|
|
|
grad = np.zeros_like(seg1) |
|
grad[:, 1:-1] = dx |
|
grad[1:-1, :] = np.maximum(grad[1:-1, :], dy) |
|
|
|
grad = grad != 0 |
|
return grad |
|
|
|
def depth_grad(depth1): |
|
|
|
dx = np.abs(depth1[:, 2:] - depth1[:, :-2]) |
|
dy = np.abs(depth1[2:, :] - depth1[:-2, :]) |
|
|
|
grad = np.zeros_like(depth1) |
|
grad[:, 1:-1] = dx |
|
grad[1:-1, :] = np.maximum(grad[1:-1, :], dy) |
|
|
|
grad = np.abs(grad) > 0.1 |
|
return grad |
|
|
|
grad_mask = np.logical_and(depth_grad(depth_img), seg_grad(seg_labels)) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
valid_mask = np.argwhere(grad_mask == 0) |
|
|
|
ys = valid_mask[:, 0] |
|
xs = valid_mask[:, 1] |
|
|
|
seg_labels[inv_depth_mask] = 38 |
|
seg_labels = np.tile(np.round(seg_labels)[ys, xs].reshape(-1, 1), reps=[1, 3]) |
|
|
|
zs = depth_img[ys, xs] |
|
xs = xs.reshape(1, -1) |
|
ys = ys.reshape(1, -1) |
|
zs = zs.reshape(1, -1) |
|
|
|
inverse_K = np.linalg.inv(K) |
|
|
|
xyz = (inverse_K[:3, :3].dot(np.concatenate([xs, ys, np.ones_like(xs)], axis=0))) |
|
xyz = zs * (xyz / np.linalg.norm(xyz, axis=0, ord=2)) |
|
|
|
xyz_o3d = open3d.geometry.PointCloud() |
|
xyz_o3d.points = open3d.utility.Vector3dVector(xyz.T) |
|
xyz_o3d.transform(pose) |
|
xyz_global = np.asarray(xyz_o3d.points) |
|
|
|
segmentations += list(seg_labels) |
|
colors += list(rgb_img[ys, xs].reshape(-1,3)) |
|
coords += list(xyz_global) |
|
|
|
|
|
points['coords'] = np.asarray(coords) * 1000. |
|
points['colors'] = np.asarray(colors) / 255.0 |
|
points['segs'] = np.asarray(segmentations) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
return points |
|
|
|
def get_point_cloud(self): |
|
return self.point_cloud |
|
|
|
def display_inlier_outlier(self, cloud, ind): |
|
inlier_cloud = cloud.select_down_sample(ind) |
|
|
|
|
|
print("Showing outliers (red) and inliers (gray): ") |
|
|
|
|
|
|
|
return inlier_cloud |
|
|
|
def visualize(self, o3d_pcd=None): |
|
|
|
print("Visualizing...") |
|
pcd = open3d.geometry.PointCloud() |
|
|
|
if o3d_pcd is None: |
|
pcd.points = open3d.utility.Vector3dVector(self.point_cloud['coords']) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
pcd.colors = open3d.utility.Vector3dVector(self.point_cloud['segs'] / 255.) |
|
else: |
|
pcd = o3d_pcd |
|
|
|
vis = open3d.visualization.Visualizer() |
|
|
|
vis.create_window(window_name="O3D", width=1280, height=720, left=0, top=0, |
|
visible=True) |
|
|
|
|
|
|
|
vis.add_geometry(pcd) |
|
|
|
|
|
with open("/media/sinisa/Sinisa_hdd_data/Sinisa_Projects/corridor_localisation/Datasets/Structured_3D_dataset/Structured3D/Structured3D_0/Structured3D/train/scene_00015/annotation_3d.json") as file: |
|
annos = json.load(file) |
|
|
|
wireframe_geo_list = visualize_wireframe(annos, vis=False, ret=True) |
|
|
|
vis.add_geometry(wireframe_geo_list[0]) |
|
vis.add_geometry(wireframe_geo_list[1]) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
vis.run() |
|
vis.destroy_window() |
|
|
|
def generate_density(self, width=256, height=256): |
|
|
|
ps = self.point_cloud["coords"] |
|
|
|
unique_coords, unique_ind = np.unique(np.round(ps / 10) * 10., return_index=True, axis=0) |
|
|
|
ps = unique_coords |
|
|
|
|
|
image_res = np.array((width, height)) |
|
|
|
max_coords = np.max(ps, axis=0) |
|
min_coords = np.min(ps, axis=0) |
|
max_m_min = max_coords - min_coords |
|
|
|
max_coords = max_coords + 0.1 * max_m_min |
|
min_coords = min_coords - 0.1 * max_m_min |
|
|
|
|
|
|
|
coordinates = \ |
|
np.round( |
|
(ps[:, :2] - min_coords[None, :2]) / (max_coords[None,:2] - min_coords[None, :2]) * image_res[None]) |
|
coordinates = np.minimum(np.maximum(coordinates, np.zeros_like(image_res)), |
|
image_res - 1) |
|
|
|
density = np.zeros((height, width), dtype=np.float32) |
|
|
|
unique_coordinates, counts = np.unique(coordinates, return_counts=True, axis=0) |
|
print(np.unique(counts)) |
|
|
|
|
|
unique_coordinates = unique_coordinates.astype(np.int32) |
|
|
|
density[unique_coordinates[:, 1], unique_coordinates[:, 0]] = counts |
|
density = density / np.max(density) |
|
|
|
|
|
plt.figure() |
|
plt.imshow(density) |
|
plt.show() |
|
|
|
return density |
|
|
|
def subsample_pcd(self, seg=False): |
|
|
|
pcd = open3d.geometry.PointCloud() |
|
pcd.points = open3d.utility.Vector3dVector(self.point_cloud['coords']) |
|
|
|
|
|
|
|
if seg: |
|
pcd.colors = open3d.utility.Vector3dVector(self.point_cloud['segs'] / 255.) |
|
else: |
|
pcd.colors = open3d.utility.Vector3dVector(self.point_cloud['colors']) |
|
|
|
final_pcd = pcd |
|
final_pcd, inds = pcd.remove_statistical_outlier(nb_neighbors=10, |
|
std_ratio=3.0) |
|
|
|
final_pcd = final_pcd.uniform_down_sample(every_k_points=10) |
|
return final_pcd |
|
|
|
def export_ply_from_o3d_pcd(self, path, pcd, seg=False): |
|
''' |
|
ply |
|
format ascii 1.0 |
|
comment Mars model by Paul Bourke |
|
element vertex 259200 |
|
property float x |
|
property float y |
|
property float z |
|
property uchar r |
|
property uchar g |
|
property uchar b |
|
property float nx |
|
property float ny |
|
property float nz |
|
end_header |
|
''' |
|
|
|
coords = np.asarray(pcd.points) |
|
colors = (np.asarray(pcd.colors) * 255).astype(np.int32) |
|
with open(path, "w") as f: |
|
f.write("ply\n") |
|
f.write("format ascii 1.0\n") |
|
f.write("element vertex %d\n" % coords.shape[0]) |
|
f.write("property float x\n") |
|
f.write("property float y\n") |
|
f.write("property float z\n") |
|
if self.generate_color: |
|
f.write("property uchar red\n") |
|
f.write("property uchar green\n") |
|
f.write("property uchar blue\n") |
|
|
|
f.write("end_header\n") |
|
for i in range(coords.shape[0]): |
|
coord = coords[i].tolist() |
|
color = colors[i].tolist() |
|
data = coord + color |
|
f.write(" ".join(list(map(str,data)))+'\n') |
|
|