Spaces:
Running
on
Zero
Running
on
Zero
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. | |
# | |
# NVIDIA CORPORATION & AFFILIATES and its licensors retain all intellectual property | |
# and proprietary rights in and to this software, related documentation | |
# and any modifications thereto. Any use, reproduction, disclosure or | |
# distribution of this software and related documentation without an express | |
# license agreement from NVIDIA CORPORATION & AFFILIATES is strictly prohibited. | |
import json | |
import math | |
import numpy as np | |
import os | |
import argparse | |
import multiprocessing as mp | |
from multiprocessing import Pool | |
import trimesh | |
import tqdm | |
import torch | |
import nvdiffrast.torch as dr | |
import kaolin as kal | |
import glob | |
import ipdb | |
import pytorch3d.ops | |
parser = argparse.ArgumentParser(description='sample surface points from mesh') | |
parser.add_argument( | |
'--n_proc', type=int, default=8, | |
help='Number of processes to run in parallel' | |
'(0 means sequential execution).') | |
parser.add_argument( | |
'--n_points', type=int, default=5000, | |
help='Number of points to sample per model.') | |
parser.add_argument( | |
'--n_views', type=int, default=100, | |
help='Number of views per model.') | |
parser.add_argument( | |
'--image_height', type=int, default=640, | |
help='Depth image height.') | |
parser.add_argument( | |
'--image_width', type=int, default=640, | |
help='Depth image width.') | |
parser.add_argument( | |
'--focal_length_x', type=float, default=640, | |
help='Focal length in x direction.') | |
parser.add_argument( | |
'--focal_length_y', type=float, default=640, | |
help='Focal length in y direction.') | |
parser.add_argument( | |
'--principal_point_x', type=float, default=320, | |
help='Principal point location in x direction.') | |
parser.add_argument( | |
'--principal_point_y', type=float, default=320, | |
help='Principal point location in y direction.') | |
parser.add_argument("--shape_root", type=str, default='/mnt/petrelfs/caoziang/3D_generation/Checkpoint_all/diffusion_shapenet_testmodel27_omni_ablation2/ddpm_5000/test', help="path to the save resules shapenet dataset") | |
parser.add_argument("--save_root", type=str, default='/mnt/petrelfs/caoziang/3D_generation/Checkpoint_all/diffusion_shapenet_testmodel27_omni_ablation2/ddpm_vis_ab2surface', help="path to the split shapenet dataset") | |
options = parser.parse_args() | |
# create array for inverse mapping | |
coordspx2 = np.stack(np.nonzero(np.ones((options.image_height, options.image_width))), -1).astype(np.float32) | |
coordspx2 = coordspx2[:, ::-1] | |
fusion_intrisics = np.array( | |
[ | |
[options.focal_length_x, 0, options.principal_point_x], | |
[0, options.focal_length_y, options.principal_point_y], | |
[0, 0, 1] | |
]) | |
# glctx = dr.RasterizeGLContext() # EGL/egl.h: No such file or directory | |
glctx = dr.RasterizeCudaContext() | |
def CalcLinearZ(depth): | |
# depth = depth * 2 - 1 | |
zFar = 100.0 | |
zNear = 0.1 | |
linear = zNear / (zFar - depth * (zFar - zNear)) * zFar | |
return linear | |
def projection_cv_new(fx, fy, cx, cy, width, height, n=1.0, f=50.0): | |
return np.array( | |
[[-2 * fx / width, 0.0, (width - 2 * cx) / width, 0.0], | |
[0.0, -2 * fy / height, (height - 2 * cy) / height, 0.0], | |
[0.0, 0.0, (-f - n) / (f - n), -2.0 * f * n / (f - n)], | |
[0.0, 0.0, -1.0, 0.0]]) | |
def interpolate(attr, rast, attr_idx, rast_db=None): | |
return dr.interpolate( | |
attr.contiguous(), rast, attr_idx, rast_db=rast_db, | |
diff_attrs=None if rast_db is None else 'all') | |
def render_nvdiffrast(v_pos, tris, T_bx4x4): | |
# T_bx4x4 - world to cam | |
proj = projection_cv_new( | |
fx=options.focal_length_x, fy=options.focal_length_y, cx=options.principal_point_x, | |
cy=options.principal_point_y, | |
width=options.image_width, height=options.image_height, n=0.1, f=100.0) | |
fix = torch.eye(4, dtype=torch.float32, device='cuda') | |
fix[2, 2] = -1 | |
fix[1, 1] = -1 | |
fix[0, 0] = -1 | |
fix = fix.unsqueeze(0).repeat(T_bx4x4.shape[0], 1, 1) | |
proj = torch.tensor(proj, dtype=torch.float32, device='cuda').unsqueeze(0).repeat(T_bx4x4.shape[0], 1, 1) | |
T_world_cam_bx4x4 = torch.bmm(fix, T_bx4x4) | |
mvp = torch.bmm(proj, T_world_cam_bx4x4) | |
v_pos_clip = torch.matmul( | |
torch.nn.functional.pad(v_pos, pad=(0, 1), mode='constant', value=1.0), | |
torch.transpose(mvp, 1, 2)) | |
rast, db = dr.rasterize( | |
glctx, torch.tensor(v_pos_clip, dtype=torch.float32, device='cuda'), tris.int(), | |
(options.image_height, options.image_width)) | |
v_pos_cam = torch.matmul( | |
torch.nn.functional.pad(v_pos, pad=(0, 1), mode='constant', value=1.0), | |
torch.transpose(T_world_cam_bx4x4, 1, 2)) | |
gb_pos_cam, _ = interpolate(v_pos_cam, rast, tris.int()) | |
depth_maps = gb_pos_cam[..., 2].abs() | |
return depth_maps | |
def as_mesh(scene_or_mesh): | |
""" | |
Convert a possible scene to a mesh. | |
If conversion occurs, the returned mesh has only vertex and face data. | |
""" | |
if isinstance(scene_or_mesh, trimesh.Scene): | |
if len(scene_or_mesh.geometry) == 0: | |
mesh = None # empty scene | |
else: | |
# we lose texture information here | |
mesh = trimesh.util.concatenate( | |
tuple( | |
trimesh.Trimesh(vertices=g.vertices, faces=g.faces) | |
for g in scene_or_mesh.geometry.values())) | |
else: | |
assert (isinstance(scene_or_mesh, trimesh.Trimesh)) | |
mesh = scene_or_mesh | |
return mesh | |
def render(mesh_v, mesh_f, Rs): | |
""" | |
Render the given mesh using the generated views. | |
:param base_mesh: mesh to render | |
:type base_mesh: mesh.Mesh | |
:param Rs: rotation matrices | |
:type Rs: [numpy.ndarray] | |
:return: depth maps | |
:rtype: numpy.ndarray | |
""" | |
T_bx4x4 = torch.zeros((options.n_views, 4, 4), dtype=torch.float32, device='cuda') | |
T_bx4x4[:, 3, 3] = 1 | |
T_bx4x4[:, 2, 3] = 1 | |
T_bx4x4[:, :3, :3] = torch.tensor(Rs, dtype=torch.float32, device='cuda') | |
depthmaps = render_nvdiffrast( | |
mesh_v, | |
mesh_f, T_bx4x4) | |
return depthmaps | |
def get_points(): | |
""" | |
:param n_points: number of points | |
:type n_points: int | |
:return: list of points | |
:rtype: numpy.ndarray | |
""" | |
rnd = 1. | |
points = [] | |
offset = 2. / options.n_views | |
increment = math.pi * (3. - math.sqrt(5.)) | |
for i in range(options.n_views): | |
y = ((i * offset) - 1) + (offset / 2) | |
r = math.sqrt(1 - pow(y, 2)) | |
phi = ((i + rnd) % options.n_views) * increment | |
x = math.cos(phi) * r | |
z = math.sin(phi) * r | |
points.append([x, y, z]) | |
return np.array(points) | |
def get_views(semi_sphere=False): | |
""" | |
Generate a set of views to generate depth maps from. | |
:param n_views: number of views per axis | |
:type n_views: int | |
:return: rotation matrices | |
:rtype: [numpy.ndarray] | |
""" | |
Rs = [] | |
points = get_points() | |
if semi_sphere: | |
points[:, 2] = -np.abs(points[:, 2]) - 0.1 | |
for i in range(points.shape[0]): | |
longitude = - math.atan2(points[i, 0], points[i, 1]) | |
latitude = math.atan2(points[i, 2], math.sqrt(points[i, 0] ** 2 + points[i, 1] ** 2)) | |
R_x = np.array( | |
[[1, 0, 0], | |
[0, math.cos(latitude), -math.sin(latitude)], | |
[0, math.sin(latitude), math.cos(latitude)]]) | |
R_y = np.array( | |
[[math.cos(longitude), 0, math.sin(longitude)], | |
[0, 1, 0], | |
[-math.sin(longitude), 0, math.cos(longitude)]]) | |
R = R_x @ R_y | |
Rs.append(R) | |
return Rs | |
def fusion(depthmaps, Rs): | |
""" | |
Fuse the rendered depth maps. | |
:param depthmaps: depth maps | |
:type depthmaps: numpy.ndarray | |
:param Rs: rotation matrices corresponding to views | |
:type Rs: [numpy.ndarray] | |
:return: (T)SDF | |
:rtype: numpy.ndarray | |
""" | |
# sample points inside mask | |
sample_per_view = options.n_points // options.n_views | |
sample_bxn = torch.zeros((options.n_views, sample_per_view), device='cuda', dtype=torch.long) | |
for i in range(len(Rs)): | |
mask = depthmaps[i] > 0 | |
valid_idx = torch.nonzero(mask.reshape(-1)).squeeze(-1) | |
idx = list(range(valid_idx.shape[0])) | |
np.random.shuffle(idx) | |
idx = idx[:sample_per_view] | |
sample_bxn[i] = torch.tensor(valid_idx[idx]) | |
depthmaps = torch.gather(depthmaps.reshape(options.n_views, -1), 1, sample_bxn) | |
inv_Ks_bx3x3 = torch.tensor(np.linalg.inv(fusion_intrisics), dtype=torch.float32, device='cuda').unsqueeze( | |
0).repeat(options.n_views, 1, 1) | |
T_bx4x4 = torch.zeros((options.n_views, 4, 4), dtype=torch.float32, device='cuda') | |
T_bx4x4[:, 3, 3] = 1 | |
T_bx4x4[:, 2, 3] = 1 | |
T_bx4x4[:, :3, :3] = torch.tensor(Rs, dtype=torch.float32, device='cuda') | |
inv_T_bx4x4 = torch.inverse(T_bx4x4) | |
tf_coords_bxpx2 = torch.tensor(coordspx2.copy(), dtype=torch.float32, device='cuda').unsqueeze(0).repeat( | |
options.n_views, 1, 1) | |
tf_coords_bxpx2 = torch.gather(tf_coords_bxpx2, 1, sample_bxn.unsqueeze(-1).repeat(1, 1, 2)) | |
tf_coords_bxpx3 = torch.cat([tf_coords_bxpx2, torch.ones_like(tf_coords_bxpx2[..., :1])], -1) | |
tf_coords_bxpx3 *= depthmaps.reshape(options.n_views, -1, 1) | |
tf_cam_bxpx3 = torch.bmm(inv_Ks_bx3x3, tf_coords_bxpx3.transpose(1, 2)).transpose(1, 2) | |
tf_cam_bxpx4 = torch.cat([tf_cam_bxpx3, torch.ones_like(tf_cam_bxpx3[..., :1])], -1) | |
tf_world_bxpx3 = torch.bmm(inv_T_bx4x4, tf_cam_bxpx4.transpose(1, 2)).transpose(1, 2)[..., :3] | |
return tf_world_bxpx3.reshape(-1, 3) | |
def normalize(vertices, faces, normalized_scale=0.9, rotate_x=False): | |
vertices = vertices.cuda() | |
if rotate_x: # rotate along x axis for 90 degrees to match the two coordiantes | |
rot_mat = torch.eye(n=3, device='cuda') | |
theta = np.pi / 90 # degree | |
rot_mat[1,1] = np.cos(theta) | |
rot_mat[2,2] = np.cos(theta) | |
rot_mat[1,2] =-np.sin(theta) | |
rot_mat[2,1] = np.sin(theta) | |
# ipdb.set_trace() | |
vertices = rot_mat @ vertices.transpose(0,1) | |
vertices = vertices.transpose(0,1) | |
scale = (vertices.max(dim=0)[0] - vertices.min(dim=0)[0]).max() | |
mesh_v1 = vertices / scale * normalized_scale | |
mesh_f1 = faces.long().cuda() | |
return mesh_v1, mesh_f1 | |
def sample_surface_pts(path): | |
# ipdb.set_trace() | |
try: | |
mesh_path, output_pth, debug = path | |
# mesh = kal.io.obj.import_mesh(mesh_path) | |
# ipdb.set_trace() | |
mesh = trimesh.load(mesh_path) # fail to load ply? | |
#ipdb.set_trace() | |
if mesh.vertices.shape[0] == 0: | |
return | |
mesh_v = torch.Tensor(mesh.vertices) | |
mesh_v, mesh_f = normalize(mesh_v, torch.Tensor(mesh.faces), normalized_scale=0.9, rotate_x=True) | |
# generate camera matrices | |
# Rs = get_views() | |
# Rs = get_views(semi_sphere=True) | |
Rs = get_views(semi_sphere=False) | |
# get depth images | |
depths = render(mesh_v, mesh_f, Rs) | |
# project to world space | |
try: | |
pcd = fusion(depths, Rs) | |
except: | |
return | |
pcd = pcd.cpu().numpy() | |
#np.savez(output_pth, pcd=pcd) | |
#ipdb.set_trace() | |
#if debug: | |
pcd = trimesh.points.PointCloud(pcd) | |
pcd.export(output_pth.replace('.npz', '.obj')) | |
except Exception as e: | |
# print('error') | |
print(e, flush=True) | |
if __name__ == '__main__': | |
mp.set_start_method('spawn') | |
shapenet_root = options.shape_root | |
save_root = options.save_root | |
debug = True | |
#model_list = sorted(os.listdir(shapenet_root))[:7500] | |
# model_list=glob.glob(os.path.join(shapenet_root, '*.obj')) | |
# os.makedirs(save_root, exist_ok=True) | |
# cmds = [(os.path.join(shapenet_root, id.split('/')[-1]), os.path.join(save_root, id.split('/')[-1]), debug) for id in model_list] | |
# cmds = [(os.path.join(shapenet_root, id.split('/')[-1]), os.path.join(save_root, 'pcd_4096.ply'), debug) for id in model_list] | |
# cmds += [(os.path.join(shapenet_root, id.split('/')[-1]), os.path.join(save_root, 'test.obj'), debug) for id in model_list] | |
objv_dataset = '/mnt/sfs-common/yslan/Dataset/Obajverse/chunk-jpeg-normal/bs_16_fixsave3/170K/512/' | |
dataset_json = os.path.join(objv_dataset, 'dataset.json') | |
with open(dataset_json, 'r') as f: | |
dataset_json = json.load(f) | |
# all_objs = dataset_json['Animals'][::3][:6250] | |
all_objs = dataset_json['Animals'][::3][1100:2200] | |
all_objs = all_objs[:600] | |
cmds = [] | |
# for instance_name in os.listdir(shapenet_root)[:]: | |
# cmds += [(os.path.join(shapenet_root, instance_name), os.path.join(save_root, f'{instance_name.split(".")[0]}_pcd_4096.ply'), debug)] | |
# ! for gt | |
# for obj_folder in sorted(os.listdir(shapenet_root)): | |
# cmds += [(os.path.join(shapenet_root, obj_folder, 'meshes/model.obj'), os.path.join(save_root, f'{obj_folder}_pcd_4096.ply'), debug)] | |
# ! for baseline samples | |
os.makedirs(save_root, exist_ok=True) | |
# ! free3d | |
# for obj_folder in tqdm.tqdm(sorted(os.listdir(shapenet_root))): | |
# if not os.path.isdir(os.path.join(shapenet_root, obj_folder)): | |
# continue | |
# if 'LGM' in shapenet_root: | |
# gs_path = os.path.join(shapenet_root,obj_folder, f'0gaussian.ply') | |
# else: # splatter-img | |
# gs_path = os.path.join(shapenet_root,obj_folder, f'0/mesh.ply') | |
# pcd = trimesh.load(gs_path).vertices # unsqueeze() | |
# fps_pcd, fps_idx = pytorch3d.ops.sample_farthest_points( | |
# # torch.from_numpy(pcd).unsqueeze(0).cuda(), K=4096, | |
# torch.from_numpy(pcd).unsqueeze(0).cuda(), K=4000, | |
# random_start_point=True) # B self.latent_num | |
# # assert fps_pcd.shape[1] == 4096 | |
# pcd = trimesh.points.PointCloud(fps_pcd[0].cpu().numpy()) | |
# output_path = os.path.join(save_root, f'{obj_folder}_pcd_4096.ply') | |
# pcd.export(output_path.replace('.npz', '.obj')) | |
# objv | |
# for obj_folder in tqdm.tqdm(sorted(os.listdir(all_objs))): | |
for obj_folder in tqdm.tqdm(all_objs): | |
# ipdb.set_trace() | |
if not os.path.isdir(os.path.join(shapenet_root, obj_folder)): | |
continue | |
save_name = '-'.join(obj_folder.split('/')) | |
if 'LGM' in shapenet_root: | |
gs_path = os.path.join(shapenet_root,obj_folder, f'0gaussian.ply') | |
else: # splatter-img | |
gs_path = os.path.join(shapenet_root,obj_folder, f'0/mesh.ply') | |
pcd = trimesh.load(gs_path).vertices # unsqueeze() | |
fps_pcd, fps_idx = pytorch3d.ops.sample_farthest_points( | |
# torch.from_numpy(pcd).unsqueeze(0).cuda(), K=4096, | |
torch.from_numpy(pcd).unsqueeze(0).cuda(), K=4000, | |
random_start_point=True) # B self.latent_num | |
# assert fps_pcd.shape[1] == 4096 | |
pcd = trimesh.points.PointCloud(fps_pcd[0].cpu().numpy()) | |
output_path = os.path.join(save_root, f'{save_name}_pcd_4096.ply') | |
pcd.export(output_path.replace('.npz', '.obj')) | |
# ! lgm | |
# for idx in [0]: | |
# for i in range(10): | |
# img=os.path.join(shapenet_root,obj_folder, str(idx),f'{i}.jpg') | |
# img=os.path.join(path,obj_folder, str(idx),f'sample-0-{i}.jpg') | |
# files.append(img) | |
# if 'CRM' in shapenet_root: | |
# # ipdb.set_trace() | |
# mesh_path = glob.glob(os.path.join(shapenet_root, obj_folder, f'{idx}', '*.obj'))[0] | |
# else: | |
# if os.path.exists((os.path.join(shapenet_root, obj_folder, f'{idx}/mesh.obj'))): | |
# mesh_path = os.path.join(shapenet_root, obj_folder, f'{idx}/mesh.obj') | |
# else: | |
# mesh_path = os.path.join(shapenet_root, obj_folder, f'{idx}/mesh.ply') | |
# cmds += [(mesh_path, os.path.join(save_root, f'{obj_folder}_pcd_4096.ply'), debug)] | |
if options.n_proc == 0: | |
for filepath in tqdm.tqdm(cmds): | |
sample_surface_pts(filepath) | |
else: | |
with Pool(options.n_proc) as p: | |
list(tqdm.tqdm(p.imap(sample_surface_pts, cmds), total=len(cmds))) | |