yuandong513 commited on
Commit
d5bdc55
1 Parent(s): 88a95b5

Update download script

Browse files
depth_warp_example.py ADDED
@@ -0,0 +1,424 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ import sys
3
+ sys.path.append('./')
4
+ from typing import Dict
5
+ import numpy as np
6
+ from omegaconf import DictConfig, ListConfig
7
+ import torch
8
+ from torch.utils.data import Dataset
9
+ from pathlib import Path
10
+ import json
11
+ from PIL import Image
12
+ from torchvision import transforms
13
+ import torchvision
14
+ from einops import rearrange
15
+ import pytorch_lightning as pl
16
+ import copy
17
+ import csv
18
+ import cv2
19
+ import random
20
+ import matplotlib.pyplot as plt
21
+ from torch.utils.data import DataLoader
22
+ import json
23
+ import os, sys
24
+ import math
25
+ from torch.utils.data.distributed import DistributedSampler
26
+ import albumentations
27
+ import time
28
+ from tqdm import tqdm
29
+ import torch.nn.functional as F
30
+ import pdb
31
+ os.environ["OPENCV_IO_ENABLE_OPENEXR"]="1"
32
+ from ctypes import CDLL, c_void_p, c_int,c_int32, c_float,c_bool
33
+ quick_zbuff = CDLL("./lib/build/zbuff.so")
34
+ DEBUG=False
35
+
36
+
37
+
38
+
39
+ def get_coordinate_xy(coord_shape, device):
40
+ """get meshgride coordinate of x, y and the shape is (B, H, W)"""
41
+ bs, height, width = coord_shape
42
+ y_coord, x_coord = torch.meshgrid([torch.arange(0, height, dtype=torch.float32, device=device),\
43
+ torch.arange(0, width, dtype=torch.float32, device=device)])
44
+ y_coord, x_coord = y_coord.contiguous(), x_coord.contiguous()
45
+ y_coord, x_coord = y_coord.unsqueeze(0).repeat(bs, 1, 1), \
46
+ x_coord.unsqueeze(0).repeat(bs, 1, 1)
47
+
48
+ return x_coord, y_coord
49
+
50
+
51
+
52
+ def read_dnormal(normald_path, cond_pos):
53
+ cond_cam_dis = np.linalg.norm(cond_pos, 2)
54
+
55
+ near = 0.867 #sqrt(3) * 0.5
56
+ near_distance = cond_cam_dis - near
57
+
58
+ normald = cv2.imread(normald_path, cv2.IMREAD_UNCHANGED).astype(np.float32)
59
+ depth = normald[...,3:]
60
+
61
+ depth[depth<near_distance] = 0
62
+
63
+ return depth
64
+
65
+
66
+ def get_intr(target_im):
67
+ h, w = target_im.shape[:2]
68
+
69
+ fx = fy = 1422.222
70
+ res_raw = 1024
71
+ f_x = f_y = fx * h / res_raw
72
+ K = torch.tensor([f_x, 0, w / 2, 0, f_y, h / 2, 0, 0, 1]).reshape(3, 3)
73
+ # print("intr: ", K)
74
+ return K
75
+
76
+
77
+ def convert_pose(C2W):
78
+ flip_yz = np.eye(4)
79
+ flip_yz[1, 1] = -1
80
+ flip_yz[2, 2] = -1
81
+ C2W = np.matmul(C2W, flip_yz)
82
+ return torch.from_numpy(C2W)
83
+
84
+
85
+
86
+ def read_camera_matrix_single(json_file):
87
+ with open(json_file, 'r', encoding='utf8') as reader:
88
+ json_content = json.load(reader)
89
+
90
+ # NOTE that different from unity2blender experiments.
91
+ camera_matrix = np.eye(4)
92
+ camera_matrix[:3, 0] = np.array(json_content['x'])
93
+ camera_matrix[:3, 1] = -np.array(json_content['y'])
94
+ camera_matrix[:3, 2] = -np.array(json_content['z'])
95
+ camera_matrix[:3, 3] = np.array(json_content['origin'])
96
+
97
+
98
+ '''
99
+ camera_matrix = np.eye(4)
100
+ camera_matrix[:3, 0] = np.array(json_content['x'])
101
+ camera_matrix[:3, 1] = np.array(json_content['y'])
102
+ camera_matrix[:3, 2] = np.array(json_content['z'])
103
+ camera_matrix[:3, 3] = np.array(json_content['origin'])
104
+ # print(camera_matrix)
105
+ '''
106
+
107
+ return camera_matrix
108
+
109
+
110
+ def read_w2c(camera):
111
+ tm = camera
112
+ tm = np.asarray(tm)
113
+
114
+ cam_pos = tm[:3, 3:]
115
+ world2cam = np.zeros_like(tm)
116
+ world2cam[:3, :3] = tm[:3,:3].transpose()
117
+ world2cam[:3,3:] = -tm[:3,:3].transpose() @ tm[:3,3:]
118
+ world2cam[-1, -1] = 1
119
+
120
+ return world2cam, np.linalg.norm(cam_pos, 2 , axis=0)
121
+
122
+
123
+
124
+ def get_camera_pos(camera):
125
+ tm = camera['transform_matrix']
126
+ tm = np.asarray(tm)
127
+
128
+ cam_pos = tm[:3, 3:]
129
+ return cam_pos
130
+
131
+
132
+ def to_torch_tensor(input):
133
+ if isinstance(input, np.ndarray):
134
+ input = torch.from_numpy(input)
135
+ return input
136
+
137
+
138
+ def image_warping_v1(target_img, ref_img, K, c2w_t, c2w_r, target_depth, ref_depth, scale_factor=1.0, device=torch.device("cpu"), save_root=None):
139
+
140
+ # normalized input imgs [-1, 1]
141
+ target_img = target_img.astype(np.float32)
142
+ ref_img = ref_img.astype(np.float32)
143
+ target_img = target_img /255. * 2. -1
144
+ ref_img = ref_img / 255. * 2. - 1
145
+
146
+ with torch.no_grad():
147
+
148
+ ref_K = K
149
+
150
+ # target_img: [H, W, 3], target_depth:[H, W, 1], K:[3, 3], T_t2r:[4, 4]
151
+ t_img = to_torch_tensor(target_img).permute(2, 0, 1).unsqueeze(0).float().to(device) # [1, 3, H, W]
152
+ r_img = to_torch_tensor(ref_img).permute(2, 0, 1).unsqueeze(0).float().to(device) # [1, 3, H, W]
153
+
154
+
155
+ # T_t2r = to_torch_tensor(T_t2r).unsqueeze(0).float().to(device) # [1, 4, 4]
156
+
157
+ c2w_t = to_torch_tensor(c2w_t).unsqueeze(0).float().to(device)
158
+ c2w_r = to_torch_tensor(c2w_r).unsqueeze(0).float().to(device)
159
+ target_depth = to_torch_tensor(target_depth).permute(2, 0, 1).float().to(device) #[1, H, W]
160
+ ref_depth = to_torch_tensor(ref_depth).permute(2, 0, 1).float().to(device) #[1, H, W]
161
+
162
+ K = to_torch_tensor(K).unsqueeze(0).float().to(device) # [1, 3, 3]
163
+ ref_K = to_torch_tensor(ref_K).unsqueeze(0).float().to(device) # [1, 3, 3]
164
+
165
+ t_pose = {"intr": K, "extr": torch.inverse(c2w_t)}
166
+ r_pose = {"intr": K, "extr": torch.inverse(c2w_r)}
167
+
168
+
169
+ ref_img_warped, ref_depth_warpped = image_warpping_reproj(depth_ref=ref_depth,
170
+ depth_src=None,
171
+ ref_pose=r_pose,
172
+ src_pose=t_pose,
173
+ img_ref=r_img)
174
+
175
+
176
+
177
+ # only using in debug
178
+ if save_root is not None:
179
+ os.makedirs(save_root, exist_ok=True)
180
+
181
+
182
+ img_w = ref_img_warped[0].permute(1,2,0).detach().cpu().numpy()
183
+ t_img = t_img[0].permute(1,2,0).detach().cpu().numpy()
184
+ r_img = r_img[0].permute(1,2,0).detach().cpu().numpy()
185
+
186
+
187
+ img_blend = 0.5 * t_img + 0.5 * img_w
188
+
189
+ save_name = os.path.join(save_root, f"blend.jpg")
190
+ img_vis = np.hstack([t_img, img_w, r_img, img_blend, 0.5 * t_img + 0.5 * r_img])
191
+
192
+ cv2.imwrite(save_name, np.clip((img_vis + 1) / 2 * 255, 0, 255).astype(np.uint8)[:, :, (2, 1, 0)])
193
+
194
+ return ref_img_warped
195
+
196
+
197
+
198
+ def zbuff_check(xyz,bs,height,width):
199
+
200
+ p_xyz, depth = xyz[:, :2] / (xyz[:, 2:3].clamp(min=1e-10)),xyz[:,2:3]
201
+ x_src = p_xyz[:, 0].view([bs, 1, -1 ]).round() #[B, H, W]
202
+ y_src = p_xyz[:, 1].view([bs, 1, -1]).round()
203
+
204
+ valid_mask_0= torch.logical_and(x_src<width, y_src<height).view(-1)
205
+ valid_mask_1= torch.logical_and(x_src>=0, y_src>=0).view(-1)
206
+ valid_mask = torch.logical_and(valid_mask_0, valid_mask_1)
207
+
208
+
209
+ x_src = x_src.clamp(0, width - 1).long()
210
+ y_src = y_src.clamp(0, height - 1).long()
211
+
212
+ buffs= -torch.ones((height,width)).to(xyz)
213
+ z_buffs= -torch.ones((height,width)).to(xyz)
214
+
215
+ # zbuff_check
216
+
217
+ src_x = x_src.view(-1).numpy().astype(np.int32)
218
+ src_y = y_src.view(-1).numpy().astype(np.int32)
219
+ depth = depth.view(-1).numpy().astype(np.float32)
220
+ data_size = c_int(src_x.shape[0])
221
+ valid_mask = valid_mask.numpy()
222
+
223
+ buffs= buffs.numpy().astype(np.float32)
224
+ z_buffs= z_buffs.numpy().astype(np.float32)
225
+
226
+ h, w = z_buffs.shape
227
+
228
+
229
+ # using C++ version
230
+ quick_zbuff.zbuff_check(src_x.ctypes.data_as(c_void_p), src_y.ctypes.data_as(c_void_p), \
231
+ depth.ctypes.data_as(c_void_p), data_size, valid_mask.ctypes.data_as(c_void_p), buffs.ctypes.data_as(c_void_p),\
232
+ z_buffs.ctypes.data_as(c_void_p), h, w)
233
+
234
+
235
+
236
+ '''
237
+ for idx, (x, y, z) in enumerate(zip(x_src.view(-1),y_src.view(-1), depth.view(-1))):
238
+ if not valid_mask[idx]:
239
+ continue
240
+ if buffs[y,x] ==-1:
241
+ buffs[y,x] =idx
242
+ z_buffs[y,x] =z
243
+ else:
244
+ if z_buffs[y,x] > z:
245
+ buffs[y,x] =idx
246
+ z_buffs[y,x] =z
247
+ '''
248
+
249
+ valid_buffs = torch.from_numpy(buffs[buffs!=-1])
250
+
251
+
252
+
253
+
254
+
255
+ return valid_buffs.long()
256
+
257
+
258
+
259
+
260
+
261
+ def reproject_with_depth_batch(depth_ref, depth_src, ref_pose, src_pose, xy_coords, img_ref):
262
+ """project the reference point cloud into the source view, then project back"""
263
+ # img_src: [B, 3, H, W], depth:[B, H, W], extr: w2c
264
+ img_tgt = -torch.ones_like(img_ref)
265
+
266
+ depth_tgt = 5 * torch.ones_like(img_ref) # background setting to 5
267
+
268
+ intrinsics_ref, extrinsics_ref = ref_pose["intr"], ref_pose["extr"]
269
+ intrinsics_src, extrinsics_src = src_pose["intr"], src_pose["extr"]
270
+
271
+
272
+
273
+ bs, height, width = depth_ref.shape[:3]
274
+ ## step1. project reference pixels to the source view
275
+ # reference view x, y
276
+ x_ref, y_ref = xy_coords # (B, H, W)
277
+ x_ref, y_ref = x_ref.view([bs, 1, -1]), y_ref.view([bs, 1, -1]) # (B, 1, H*W)
278
+ ref_indx = (y_ref * height+ x_ref).long().squeeze()
279
+
280
+ depth_mask = torch.logical_not(((depth_ref.view([bs, 1, -1]))[..., ref_indx] ==5.))[0,0]
281
+ x_ref = x_ref[..., depth_mask]
282
+ y_ref = y_ref[..., depth_mask]
283
+
284
+ depth_ref = depth_ref.view(bs, 1, -1)
285
+ depth_ref = depth_ref[..., depth_mask]
286
+
287
+ # reference 3D space, depth_view condition
288
+ xyz_ref = torch.matmul(torch.inverse(intrinsics_ref), torch.cat([x_ref, y_ref, torch.ones_like(x_ref)], dim=1) * depth_ref.view([bs, 1, -1])) # (B, 3, H*W)
289
+ # source 3D space
290
+ xyz_src = torch.matmul(torch.matmul(extrinsics_src, torch.inverse(extrinsics_ref)), \
291
+ torch.cat([xyz_ref, torch.ones_like(x_ref)], dim=1))[:, :3]
292
+
293
+
294
+ # source view x, y
295
+ k_xyz_src = torch.matmul(intrinsics_src, xyz_src)
296
+ zbuff_idx = zbuff_check(k_xyz_src, bs, height, width)
297
+ x_ref = x_ref[..., zbuff_idx]
298
+ y_ref = y_ref[..., zbuff_idx]
299
+ depth_ref= depth_ref[..., zbuff_idx]
300
+ k_xyz_src = k_xyz_src[...,zbuff_idx]
301
+ xy_src = k_xyz_src[:, :2] / (k_xyz_src[:, 2:3].clamp(min=1e-10)) # (B, 2, H*W)
302
+ src_depth = k_xyz_src[:, 2:3]
303
+
304
+
305
+ x_src = xy_src[:, 0].view([bs, 1, -1 ]).round() #[B, H, W]
306
+ y_src = xy_src[:, 1].view([bs, 1, -1]).round()
307
+
308
+ # x_src_norm = x_src / ((width - 1) / 2) - 1
309
+ # y_src_norm = y_src / ((height - 1) / 2) - 1
310
+ # xy_src_norm = torch.stack([x_src_norm, y_src_norm], dim=3)
311
+ x_src = x_src.clamp(0, width - 1).long()
312
+ y_src = y_src.clamp(0, height - 1).long()
313
+
314
+ img_tgt_tmp = img_tgt.permute(0, 2, 3, 1) #[B, H, W, 3]
315
+ depth_tgt_tmp = depth_tgt.permute(0, 2, 3, 1)[...,0] #[B, H, W, 1]
316
+ img_ref_tmp = img_ref.permute(0, 2, 3, 1) #[B, H, W, 3]
317
+
318
+
319
+ B, _, H, W = img_ref.shape
320
+ bs_tensor = torch.arange(B, dtype=x_src.dtype, device=x_src.device).unsqueeze(1).unsqueeze(1).repeat(1, H, W)
321
+
322
+ bs_tensor = torch.zeros_like(x_ref).long()
323
+ x_ref = x_ref.long()
324
+ y_ref = y_ref.long()
325
+
326
+ img_tgt_tmp[bs_tensor, y_src, x_src] = img_ref_tmp[bs_tensor, y_ref, x_ref]
327
+ img_tgt = img_tgt_tmp.permute(0, 3, 1, 2)
328
+
329
+ depth_tgt_tmp[bs_tensor,y_src,x_src]=src_depth
330
+ depth_tgt = depth_tgt_tmp.unsqueeze(1)
331
+
332
+
333
+
334
+ return img_tgt, depth_tgt
335
+
336
+
337
+ def image_warpping_reproj(depth_ref, depth_src, ref_pose, src_pose,
338
+ img_ref, mask_ref=None,
339
+ thres_p_dist=15, thres_d_diff=0.1, device=torch.device("cpu"), bg_color=1.0):
340
+ """check geometric consistency
341
+ consider two factor:
342
+ 1.disparity < 1
343
+ 2.relative depth differ ratio < 0.001
344
+ # warp img_src to ref
345
+
346
+
347
+ depth_ref: depth reference
348
+ """
349
+ # img_src: [B, 3, H, W], depth:[B, H, W], extr: w2c, mask_ref[B, H, W]
350
+
351
+ x_ref, y_ref = get_coordinate_xy(depth_ref.shape, device=device) # (B, H, W)
352
+ xy_coords = x_ref, y_ref
353
+
354
+ img_ref_warped, depth_ref_warpped = \
355
+ reproject_with_depth_batch(depth_ref, depth_src, ref_pose, src_pose, xy_coords, img_ref)
356
+
357
+ img = ((img_ref[0].permute(1,2,0) +1.) /2 * 255)
358
+ warp_img = ((img_ref_warped[0].permute(1,2,0) +1.) /2 * 255)
359
+
360
+
361
+
362
+ return img_ref_warped, depth_ref_warpped
363
+
364
+
365
+
366
+
367
+
368
+
369
+ def warp(img_list, normald_list, json_list, cond_idx, target_idx):
370
+
371
+ cond_img = img_list[cond_idx]
372
+ target_img = img_list[target_idx]
373
+
374
+
375
+ cond_camera_path= json_list[cond_idx]
376
+ cond_view_c2w = read_camera_matrix_single(cond_camera_path)
377
+
378
+ cond_view_pos = cond_view_c2w[:3, 3:]
379
+ cond_world_view_depth = read_dnormal(normald_list[cond_idx], cond_view_pos)
380
+ cond_world_view_depth = torch.from_numpy(cond_world_view_depth)
381
+ # background is mapped to far plane e.g. 5
382
+ cond_world_view_depth[cond_world_view_depth==0]=5.
383
+
384
+ cond_img = cv2.imread(cond_img)
385
+
386
+ # target parameters
387
+ target_camera_path= json_list[target_idx]
388
+ target_view_c2w = read_camera_matrix_single(target_camera_path)
389
+ target_view_pos = target_view_c2w[:3, 3:]
390
+
391
+ target_world_view_depth = read_dnormal(normald_list[target_idx], target_view_pos)
392
+ target_world_view_depth = torch.from_numpy(target_world_view_depth)
393
+ # background is mapped to far plane e.g. 5
394
+ target_world_view_depth[target_world_view_depth==0]=5.
395
+
396
+ K = get_intr(cond_world_view_depth) # fixed metric from our blender
397
+ target_img = cv2.imread(target_img)
398
+
399
+
400
+ cond_normal_warped = image_warping_v1(target_img, cond_img,
401
+ K,
402
+ convert_pose(target_view_c2w),
403
+ convert_pose(cond_view_c2w),
404
+ target_world_view_depth,
405
+ cond_world_view_depth,
406
+ scale_factor=1.0, device=torch.device("cpu"), save_root='./depth_warpping_exps')
407
+
408
+
409
+
410
+ if __name__ == '__main__':
411
+
412
+
413
+
414
+ img_handler = './campos_512_v4/{:05d}/{:05d}.png'
415
+ normald_handler = './campos_512_v4/{:05d}/{:05d}_nd.exr'
416
+ json_handler = './campos_512_v4/{:05d}/{:05d}.json'
417
+ img_list = [img_handler.format(i,i) for i in range(40)]
418
+ normald_list = [normald_handler.format(i,i) for i in range(40)]
419
+ json_list = [json_handler.format(i,i) for i in range(40)]
420
+
421
+
422
+ warp(img_list, normald_list, json_list, int(sys.argv[1]), int(sys.argv[2]))
423
+
424
+
download_gobjaverse_280k.py ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Alibaba, Inc. and its affiliates.
2
+ # python /home/joseph/richdreamer/dataset/gobjaverse/download_gobjaverse_280k.py /mnt/Storage/Datasets/gobjaverse_280k gobjaverse_280k.json 16
3
+
4
+ import os, sys, json
5
+ from multiprocessing import Pool
6
+
7
+ def download_url(item):
8
+ end = 40 # hard-coded
9
+ copy_items = ['.json','.png','_albedo.png','_hdr.exr','_mr.png','_nd.exr','_ng.exr'] # hard-coded
10
+ global save_dir
11
+ oss_base_dir = os.path.join("https://virutalbuy-public.oss-cn-hangzhou.aliyuncs.com/share/aigc3d/objaverse", item, "campos_512_v4")
12
+ for index in range(end):
13
+ index = "{:05d}".format(index)
14
+ for copy_item in copy_items:
15
+ postfix = index + "/" + index + copy_item
16
+ oss_full_dir = os.path.join(oss_base_dir, postfix)
17
+ local_path = os.path.join(save_dir, item, index + "/")
18
+ basename = os.path.basename(oss_full_dir)
19
+ print("local_path", local_path)
20
+ print("remote url", oss_full_dir)
21
+ mkdir_command = "mkdir -p {}".format(local_path)
22
+ os.system(mkdir_command)
23
+ if os.path.exists(os.path.join(local_path, basename)):
24
+ print("existing, skipping")
25
+ continue
26
+ curl_command = "curl -o {} -C - {}".format(os.path.join(local_path, basename + '.tmp'), oss_full_dir)
27
+ print(curl_command)
28
+ os.system(curl_command)
29
+ mv_command = "mv {} {}".format(os.path.join(local_path, basename + '.tmp'), os.path.join(local_path, basename))
30
+ print(mv_command)
31
+ os.system(mv_command)
32
+ # os.system("wget -P {} {}".format(os.path.join(save_dir, item, index + "/"), oss_full_dir))
33
+
34
+ if __name__=="__main__":
35
+ assert len(sys.argv) == 4, "eg: python ./scripts/data/download_gobjaverse_280k.py ./gobjaverse_280k ./gobjaverse_280k.json 10"
36
+ save_dir = str(sys.argv[1])
37
+ json_file = str(sys.argv[2])
38
+ n_threads = int(sys.argv[3])
39
+
40
+ data = json.load(open(json_file))
41
+ p = Pool(n_threads)
42
+ p.map(download_url, data)
download_objaverse_280k_tar.py ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Alibaba, Inc. and its affiliates.
2
+
3
+ import os, sys, json
4
+ from multiprocessing import Pool
5
+
6
+ def download_url(item):
7
+ global save_dir
8
+ oss_full_dir = os.path.join("https://virutalbuy-public.oss-cn-hangzhou.aliyuncs.com/share/aigc3d/objaverse_tar", item+".tar")
9
+ os.system("wget -P {} {}".format(os.path.join(save_dir, item.split("/")[0]), oss_full_dir))
10
+
11
+ if __name__=="__main__":
12
+ assert len(sys.argv) == 4, "eg: python download_objaverse.py ./data /path/to/json_file 10"
13
+ save_dir = str(sys.argv[1])
14
+ json_file = str(sys.argv[2])
15
+ n_threads = int(sys.argv[3])
16
+
17
+ data = json.load(open(json_file))
18
+ p = Pool(n_threads)
19
+ p.map(download_url, data)
process_blender_dataset.py ADDED
@@ -0,0 +1,77 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+
3
+ import glob
4
+ import cv2
5
+ import json
6
+ import numpy as np
7
+ import pdb
8
+ import os
9
+
10
+
11
+ normal_list = sorted(glob.glob('./blender_data/*_normal.png'))
12
+ camera_list = sorted(glob.glob('./blender_data/*.json'))
13
+
14
+
15
+
16
+ def blender2midas(img):
17
+ '''Blender: rub
18
+ midas: lub
19
+ '''
20
+ img[...,0] = -img[...,0]
21
+ img[...,1] = -img[...,1]
22
+ img[...,-1] = -img[...,-1]
23
+ return img
24
+
25
+
26
+
27
+ def read_camera_matrix_single(json_file):
28
+ with open(json_file, 'r', encoding='utf8') as reader:
29
+ json_content = json.load(reader)
30
+
31
+ '''
32
+ camera_matrix = np.eye(4)
33
+ camera_matrix[:3, 0] = np.array(json_content['x'])
34
+ camera_matrix[:3, 1] = -np.array(json_content['y'])
35
+ camera_matrix[:3, 2] = -np.array(json_content['z'])
36
+ camera_matrix[:3, 3] = np.array(json_content['origin'])
37
+ '''
38
+
39
+ # suppose is true
40
+ camera_matrix = np.eye(4)
41
+ camera_matrix[:3, 0] = np.array(json_content['x'])
42
+ camera_matrix[:3, 1] = np.array(json_content['y'])
43
+ camera_matrix[:3, 2] = np.array(json_content['z'])
44
+ camera_matrix[:3, 3] = np.array(json_content['origin'])
45
+
46
+
47
+
48
+ return camera_matrix
49
+
50
+
51
+
52
+ os.makedirs('./blender_system',exist_ok= True)
53
+
54
+
55
+ for idx, (normal_path, camera_json) in enumerate(zip(normal_list, camera_list)):
56
+ normal = cv2.imread(normal_path)
57
+ # to xyz channel
58
+ normal = normal[..., ::-1]
59
+ world_normal = (normal.astype(np.float32)/255. * 2.) - 1
60
+
61
+ cond_c2w = read_camera_matrix_single(camera_json)
62
+ # identity map
63
+ view_cn = blender2midas(world_normal@ (cond_c2w[:3,:3]))
64
+
65
+ view_cn = (view_cn+1.)/2. * 255
66
+ view_cn = np.asarray(np.clip(view_cn, 0, 255), np.uint8)
67
+ z_dir = view_cn[...,-1]
68
+ mask = z_dir < 127
69
+ view_cn = view_cn[..., ::-1]
70
+
71
+ visual_mask = view_cn * mask[...,None]
72
+
73
+
74
+ cv2.imwrite(os.path.join("./blender_system/", "{:04d}.png".format(idx)), view_cn)
75
+ cv2.imwrite(os.path.join("./blender_system/", "visual_mask_{:04d}.png".format(idx)), visual_mask)
76
+
77
+
process_unity_dataset.py ADDED
@@ -0,0 +1,92 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+
3
+ import glob
4
+ import cv2
5
+ import json
6
+ import numpy as np
7
+ import pdb
8
+ import os
9
+ os.environ["OPENCV_IO_ENABLE_OPENEXR"]="1"
10
+
11
+ os.makedirs("./normal_visualized/",exist_ok=True)
12
+ os.makedirs("./unity_system/",exist_ok=True)
13
+
14
+ normal_handler = './campos_512_v4/{:05d}/{:05d}_nd.exr'
15
+ json_handler = './campos_512_v4/{:05d}/{:05d}.json'
16
+ normal_list = [normal_handler.format(i,i) for i in range(40)]
17
+ json_list = [json_handler.format(i,i) for i in range(40)]
18
+
19
+ def read_camera_matrix_single(json_file):
20
+
21
+ with open(json_file, 'r', encoding='utf8') as reader:
22
+ json_content = json.load(reader)
23
+
24
+ '''
25
+ camera_matrix = np.eye(4)
26
+ camera_matrix[:3, 0] = np.array(json_content['x'])
27
+ camera_matrix[:3, 1] = -np.array(json_content['y'])
28
+ camera_matrix[:3, 2] = -np.array(json_content['z'])
29
+ camera_matrix[:3, 3] = np.array(json_content['origin'])
30
+ '''
31
+
32
+ # suppose is true
33
+ camera_matrix = np.eye(4)
34
+ camera_matrix[:3, 0] = np.array(json_content['x'])
35
+ camera_matrix[:3, 1] = np.array(json_content['y'])
36
+ camera_matrix[:3, 2] = np.array(json_content['z'])
37
+ camera_matrix[:3, 3] = np.array(json_content['origin'])
38
+
39
+
40
+ return camera_matrix
41
+
42
+
43
+ def unity2blender(normal):
44
+ normal_clone = normal.copy()
45
+ normal_clone[...,0] = -normal[...,-1]
46
+ normal_clone[...,1] = -normal[...,0]
47
+ normal_clone[...,2] = normal[...,1]
48
+
49
+ return normal_clone
50
+
51
+ def blender2midas(img):
52
+ '''Blender: rub
53
+ midas: lub
54
+ '''
55
+ img[...,0] = -img[...,0]
56
+ img[...,1] = -img[...,1]
57
+ img[...,-1] = -img[...,-1]
58
+ return img
59
+
60
+ for normal in normal_list:
61
+ assert os.path.exists(normal), normal
62
+ for json_path in json_list:
63
+ assert os.path.exists(json_path), json_path
64
+
65
+
66
+ for idx, (normal_path, camera_json) in enumerate(zip(normal_list, json_list)):
67
+
68
+ normald = cv2.imread(normal_path, cv2.IMREAD_UNCHANGED).astype(np.float32)
69
+ normal = normald[...,:3]
70
+ normal_norm = (np.linalg.norm(normal, 2, axis=-1, keepdims= True))
71
+ # depth has some problems
72
+ normal = normal / normal_norm
73
+ normal = np.nan_to_num(normal,nan=-1.)
74
+
75
+
76
+ # unity2blender
77
+ world_normal = unity2blender(normal)
78
+
79
+ cond_c2w = read_camera_matrix_single(camera_json)
80
+ view_cn = blender2midas(world_normal@ (cond_c2w[:3,:3]))
81
+ view_cn = (view_cn+1.)/2. * 255
82
+ view_cn = np.asarray(np.clip(view_cn, 0, 255), np.uint8)
83
+
84
+ z_dir = view_cn[...,-1]
85
+ mask = z_dir < 127
86
+
87
+ view_cn = view_cn[..., ::-1]
88
+ visual_mask = view_cn * mask[...,None]
89
+
90
+ cv2.imwrite(os.path.join("./unity_system/", "{:04d}.png".format(idx)), view_cn)
91
+ cv2.imwrite(os.path.join("./unity_system/", "visual_mask_{:04d}.png".format(idx)), visual_mask)
92
+