Datasets:

ArXiv:
License:
vr-folding / data /data_examples /vis_samples.py
xiaoxiaoxh's picture
Upload 1495 files
7614d38
import os
import os.path as osp
import pathlib
import open3d as o3d
import numpy as np
import pandas as pd
import zarr
import pickle
import tqdm
# helper functions
# ================
def _get_groups_df(samples_group):
rows = dict()
for key, group in samples_group.items():
rows[key] = group.attrs.asdict()
groups_df = pd.DataFrame(data=list(rows.values()), index=rows.keys())
groups_df.drop_duplicates(inplace=True)
groups_df['group_key'] = groups_df.index
return groups_df
class VRFoldingDatasetExample:
def __init__(self,
# zarr
zarr_path: str,
num_pc_sample: int = 4000,
num_views: int = 4,
only_foreground_pc: bool = True,
vis=False,
# catch all
**kwargs):
"""
simple dataset class to handle data in .zarr format
"""
super().__init__()
path = pathlib.Path(os.path.expanduser(zarr_path))
assert(path.exists())
self.zarr_path = str(path.absolute())
root = zarr.open(self.zarr_path, mode='r')
samples_group = root['samples']
# extract common info from sample group
_, sample_group = next(samples_group.groups())
print(sample_group.tree())
# load group metadata
groups_df = _get_groups_df((samples_group))
# check if index is sorted
assert(groups_df.index.is_monotonic_increasing)
groups_df['idx'] = np.arange(len(groups_df))
# global state
self.samples_group = samples_group
self.groups_df = groups_df
# params
self.num_pc_sample = num_pc_sample
self.num_views = num_views
self.only_foreground_pc = only_foreground_pc
self.vis = vis
video_info_path = self.zarr_path
# find all video sequences
self.find_video_idxs(video_info_path)
# find all valid grip intervals
self.find_valid_grip_intervals(video_info_path)
def find_video_idxs(self, video_seq_cache_dir: str):
os.makedirs(video_seq_cache_dir, exist_ok=True)
cache_path = os.path.join(video_seq_cache_dir, 'video_seq.pkl')
if os.path.exists(cache_path):
print('Loading video sequences cache in {}'.format(cache_path))
with open(cache_path, 'rb') as f:
self.video_to_idxs_dict, self.idx_to_video_list = pickle.load(f)
else:
data_length = self.__len__()
self.video_to_idxs_dict = dict()
self.idx_to_video_list = []
print('Finding video sequences...')
for idx in tqdm.tqdm(range(data_length), ncols=0):
dataset_idx = idx
row = self.groups_df.iloc[dataset_idx]
group = self.samples_group[row.group_key]
attrs = group.attrs.asdict()
video_id = attrs['video_id']
if video_id not in self.video_to_idxs_dict:
self.video_to_idxs_dict[video_id] = []
self.video_to_idxs_dict[video_id].append(idx)
self.idx_to_video_list.append(video_id)
print('Finish finding video sequences!')
with open(cache_path, 'wb') as f:
pickle.dump((self.video_to_idxs_dict, self.idx_to_video_list), f)
print('Saving video sequences cache to {}'.format(cache_path))
def find_valid_grip_intervals(self, video_seq_cache_dir: str):
os.makedirs(video_seq_cache_dir, exist_ok=True)
def is_valid_grip(grip_vertex_ids):
return grip_vertex_ids[0] != -1
cache_path = os.path.join(video_seq_cache_dir, 'video_grip_interval_v2.pkl')
if os.path.exists(cache_path):
print('Loading video grip interval cache in {}'.format(cache_path))
with open(cache_path, 'rb') as f:
self.interval_to_idxs_dict, self.idx_to_interval_list = pickle.load(f)
else:
data_length = self.__len__()
self.interval_to_idxs_dict = dict()
self.idx_to_interval_list = []
assert self.video_to_idxs_dict is not None
print('Finding video valid grip intervals...')
in_interval = False
interval_count = 0
for idx in tqdm.tqdm(range(data_length), ncols=0):
dataset_idx = idx
row = self.groups_df.iloc[dataset_idx]
group = self.samples_group[row.group_key]
attrs = group.attrs.asdict()
video_id = attrs['video_id']
grip_point_group = group['grip_vertex_id']
left_grip_vertex_ids = grip_point_group['left_grip_vertex_id'][:]
right_grip_vertex_ids = grip_point_group['right_grip_vertex_id'][:]
if not in_interval and (is_valid_grip(left_grip_vertex_ids) or is_valid_grip(right_grip_vertex_ids)):
# interval start if any hand is grasped
self.interval_to_idxs_dict[interval_count] = []
in_interval = True
if in_interval:
self.interval_to_idxs_dict[interval_count].append(idx)
self.idx_to_interval_list.append(interval_count)
else:
self.idx_to_interval_list.append(-1)
if in_interval and not is_valid_grip(left_grip_vertex_ids) and not is_valid_grip(right_grip_vertex_ids) \
or self.video_to_idxs_dict[video_id][-1] == idx:
# interval end (both hands are released) or video end
in_interval = False
interval_count += 1
print('Finish finding {} valid grip intervals!'.format(interval_count))
with open(cache_path, 'wb') as f:
pickle.dump((self.interval_to_idxs_dict, self.idx_to_interval_list), f)
print('Saving grip interval cache to {}'.format(cache_path))
def __len__(self):
return len(self.groups_df)
def data_io(self, idx: int) -> dict:
dataset_idx = idx
row = self.groups_df.iloc[dataset_idx]
group = self.samples_group[row.group_key]
# io
attrs = group.attrs.asdict()
instance_id = attrs['instance_id']
scale = attrs['scale']
pc_group = group['point_cloud']
mesh_group = group['mesh']
grip_point_group = group['grip_vertex_id']
hand_pose_group = group['hand_pose']
if 'cls' in pc_group:
pc_cls = pc_group['cls'][:]
pc_cls[pc_cls > 0] = 1 # only two classes (foreground + background)
else:
pc_cls = np.zeros(pc_group['point'][:].shape[0]).astype(np.uint8)
data = {
'cloth_sim_verts': mesh_group['cloth_verts'][:], # complete mesh vertices in task space
'cloth_nocs_verts': mesh_group['cloth_nocs_verts'][:], # complete mesh vertices in NOCS space
'cloth_faces_tri': mesh_group['cloth_faces_tri'][:], # mesh faces triangles
'pc_nocs': pc_group['nocs'][:], # NOCS coordinates of input partial point cloud
'pc_sim': pc_group['point'][:], # XYZ of input partial point cloud
'pc_sim_rgb': pc_group['rgb'][:], # RGB of input partial point cloud
'pc_sizes': pc_group['sizes'][:], # per-view number of points in input partial point cloud
'pc_cls': pc_cls, # classification label of input partial point cloud
'left_grip_vertex_ids': grip_point_group['left_grip_vertex_id'][:], # left-hand grasped mesh vertex id
'right_grip_vertex_ids': grip_point_group['right_grip_vertex_id'][:], # right-hand grasped mesh vertex id
'left_hand_pos': hand_pose_group['left_hand_pos'][:], # the positions of 25 finger bones in left hand
'right_hand_pos': hand_pose_group['right_hand_pos'][:], # the positions of 25 finger bones in right hand
'left_hand_euler': hand_pose_group['left_hand_euler'][:], # the euler angles of 25 finger bones in left hand
'right_hand_euler': hand_pose_group['right_hand_euler'][:], # the euler angles of 25 finger bones in right hand
'video_id': attrs['video_id'], # video id
'scale': scale
}
return data
def get_base_data(self, idx:int, seed:int, data_in: dict) -> dict:
num_pc_sample = self.num_pc_sample
num_views = self.num_views
if self.only_foreground_pc:
foreground_idxs = data_in['pc_cls'] == 0
if data_in['pc_cls'].shape[0] != data_in['pc_sim_rgb'].shape[0]:
foreground_idxs = np.arange(data_in['pc_sim_rgb'].shape[0])
data_in['pc_sim_rgb'] = data_in['pc_sim_rgb'][foreground_idxs]
data_in['pc_sim'] = data_in['pc_sim'][foreground_idxs]
data_in['pc_nocs'] = data_in['pc_nocs'][foreground_idxs]
data_in['pc_cls'] = data_in['pc_cls'][foreground_idxs]
rs = np.random.RandomState(seed=seed)
all_idxs = np.arange(len(data_in['pc_sim']))
all_num_views = len(data_in['pc_sizes'])
if num_views < all_num_views:
idxs_mask = np.zeros_like(all_idxs, dtype=np.bool)
selected_view_idxs = np.sort(rs.choice(all_num_views, size=num_views, replace=False))
view_idxs = np.concatenate([[0], np.cumsum(data_in['pc_sizes'])])
for i in selected_view_idxs:
idxs_mask[view_idxs[i]: view_idxs[i+1]] = True
all_idxs = all_idxs[idxs_mask]
if all_idxs.shape[0] >= num_pc_sample:
selected_idxs = rs.choice(all_idxs, size=num_pc_sample, replace=False)
else:
np.random.seed(seed)
np.random.shuffle(all_idxs)
res_num = len(all_idxs) - num_pc_sample
selected_idxs = np.concatenate([all_idxs, all_idxs[:res_num]], axis=0)
pc_sim_rgb = data_in['pc_sim_rgb'][selected_idxs].astype(np.float32) / 255
pc_sim = data_in['pc_sim'][selected_idxs].astype(np.float32)
pc_nocs = data_in['pc_nocs'][selected_idxs].astype(np.float32)
pc_cls = data_in['pc_cls'][selected_idxs].astype(np.int64)
pc_nocs[pc_cls != 0, :] = -1.0
dataset_idx = np.array([idx])
video_id = np.array([int(data_in['video_id'])])
scale = np.array([data_in['scale']])
cloth_sim_verts = data_in['cloth_sim_verts']
cloth_nocs_verts = data_in['cloth_nocs_verts']
left_grip_vertex_ids = data_in['left_grip_vertex_ids']
right_grip_vertex_ids = data_in['right_grip_vertex_ids']
left_grip_point_sim = np.array([-10., -10., -10.], dtype=np.float32)
right_grip_point_sim = np.array([-10., -10., -10.], dtype=np.float32)
left_grip_point_nocs = np.array([-2., -2., -2.], dtype=np.float32)
right_grip_point_nocs = np.array([-2., -2., -2.], dtype=np.float32)
is_left_hand_valid_grasp = False
is_right_hand_valid_grasp = False
for hand_id, grip_vertex_ids in enumerate((left_grip_vertex_ids, right_grip_vertex_ids)):
if grip_vertex_ids[0] != -1:
# valid grasp point on the garment
grip_vertices_sim = cloth_sim_verts[grip_vertex_ids, :]
mean_grip_point_sim = np.mean(grip_vertices_sim, axis=0)
grip_vertices_nocs = cloth_nocs_verts[grip_vertex_ids, :]
mean_grip_point_nocs = np.mean(grip_vertices_nocs, axis=0)
if hand_id == 0:
left_grip_point_sim = mean_grip_point_sim.astype(np.float32)
left_grip_point_nocs = mean_grip_point_nocs.astype(np.float32)
is_left_hand_valid_grasp = True
else:
right_grip_point_sim = mean_grip_point_sim.astype(np.float32)
right_grip_point_nocs = mean_grip_point_nocs.astype(np.float32)
is_right_hand_valid_grasp = True
data = {
'x': pc_sim_rgb, # RGB of input partial point cloud
'y': pc_nocs, # NOCS coordinates of input partial point cloud
'pos': pc_sim, # XYZ of input partial point cloud
'cls': pc_cls, # classification label of input partial point cloud
'dataset_idx': dataset_idx, # dataset index
'video_id': video_id, # video id
'left_grip_point_sim': left_grip_point_sim, # left hand grasp-point in task space
'left_grip_point_nocs': left_grip_point_nocs, # left hand grasp-point in NOCS space
'right_grip_point_sim': right_grip_point_sim, # right hand grasp-point in task space
'right_grip_point_nocs': right_grip_point_nocs, # right hand grasp-point in NOCS space
'scale': scale
}
if self.vis:
vis_list = []
pc_rgb_sim_pcd = o3d.geometry.PointCloud()
pc_rgb_sim_pcd.points = o3d.utility.Vector3dVector(pc_sim)
pc_rgb_sim_pcd.colors = o3d.utility.Vector3dVector(pc_sim_rgb)
vis_list.append(pc_rgb_sim_pcd)
mesh_sim_pcd = o3d.geometry.PointCloud()
mesh_sim_pcd.points = o3d.utility.Vector3dVector(cloth_sim_verts)
mesh_sim_pcd.colors = o3d.utility.Vector3dVector(cloth_nocs_verts)
vis_list.append(mesh_sim_pcd.translate((0.8, 0., 0.)))
if is_left_hand_valid_grasp:
left_grasp_sphere = o3d.geometry.TriangleMesh.create_sphere(radius=0.05)
left_grasp_sphere.paint_uniform_color([0.9, 0.1, 0.1]) # red
left_grasp_sphere = left_grasp_sphere.translate(left_grip_point_sim)
vis_list.append(left_grasp_sphere)
if is_right_hand_valid_grasp:
right_grasp_sphere = o3d.geometry.TriangleMesh.create_sphere(radius=0.05)
right_grasp_sphere.paint_uniform_color([0.1, 0.1, 0.9]) # blue
right_grasp_sphere = right_grasp_sphere.translate(right_grip_point_sim)
vis_list.append(right_grasp_sphere)
# visualization
vis = o3d.visualization.Visualizer()
vis.create_window(window_name='Press q or Esc to quit', width=1640, height=1080)
for item in vis_list:
vis.add_geometry(item)
vis.get_render_option().load_from_json(osp.join(osp.curdir, 'render_option.json'))
param = o3d.io.read_pinhole_camera_parameters(osp.join(os.getcwd(), 'view_point.json'))
vis.get_view_control().convert_from_pinhole_camera_parameters(param)
vis.run()
param = vis.get_view_control().convert_to_pinhole_camera_parameters()
o3d.io.write_pinhole_camera_parameters(osp.join(os.getcwd(), 'view_point.json'), param)
vis.close()
return data
def __getitem__(self, idx: int) -> dict:
raw_data = self.data_io(idx)
input_data = self.get_base_data(idx, seed=idx, data_in=raw_data)
return input_data
if __name__=='__main__':
os.chdir(osp.dirname(osp.realpath(__file__)))
zarr_path = osp.join(osp.dirname(osp.realpath(__file__)), 'VR_Folding', 'vr_simulation_folding_dataset_example.zarr', 'Tshirt')
print(zarr_path)
dataset = VRFoldingDatasetExample(zarr_path=zarr_path, vis=True)
for i in range(len(dataset)):
if dataset.idx_to_interval_list[i] == -1:
# skip static frame without valid grasp point
continue
data = dataset[i]
video_id = data['video_id']
print(f'Reading sample {i}, video {video_id}! Press q or Esc on the window to quit visualization of current frame!')