hushell commited on
Commit
f0d013b
1 Parent(s): bf952ba

init from mihai generated h5 files

Browse files
.gitattributes CHANGED
@@ -53,3 +53,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
53
  *.jpg filter=lfs diff=lfs merge=lfs -text
54
  *.jpeg filter=lfs diff=lfs merge=lfs -text
55
  *.webp filter=lfs diff=lfs merge=lfs -text
 
 
53
  *.jpg filter=lfs diff=lfs merge=lfs -text
54
  *.jpeg filter=lfs diff=lfs merge=lfs -text
55
  *.webp filter=lfs diff=lfs merge=lfs -text
56
+ *.hdf5 filter=lfs diff=lfs merge=lfs -text
bbox_dataset.py ADDED
@@ -0,0 +1,166 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ GraspNet dataset processing.
2
+ """
3
+
4
+ import os
5
+ import sys
6
+ import numpy as np
7
+ import numpy.ma as ma
8
+ import scipy.io as scio
9
+ from scipy.optimize import linear_sum_assignment
10
+ from PIL import Image
11
+ from skimage.measure import label, regionprops
12
+ import cv2
13
+
14
+ import torch
15
+ from collections import abc as container_abcs
16
+ from torch.utils.data import Dataset
17
+ from tqdm import tqdm
18
+ from torch.utils.data import DataLoader
19
+ from time import time
20
+
21
+ BASE_DIR = os.path.dirname(os.path.abspath(__file__))
22
+ from .data_utils import CameraInfo, transform_point_cloud, create_point_cloud_from_depth_image, \
23
+ get_workspace_mask, remove_invisible_grasp_points
24
+ import h5py
25
+
26
+
27
+ class GraspNetDataset(Dataset):
28
+ def __init__(self, root, valid_obj_idxs, camera='kinect', split='train', remove_invisible=True,
29
+ augment=False, limited_data=False, overfitting=False, k_grasps=1, ground_truth_type="topk", caching=True):
30
+ self.root = root
31
+ self.split = split
32
+ self.remove_invisible = remove_invisible
33
+ self.valid_obj_idxs = valid_obj_idxs
34
+ self.camera = camera
35
+ self.augment = augment
36
+ self.k_grasps = k_grasps
37
+ self.ground_truth_type = ground_truth_type
38
+ self.overfitting = overfitting
39
+ self.caching = caching
40
+
41
+ if overfitting:
42
+ limited_data = True
43
+ self.limited_data = limited_data
44
+
45
+ if split == 'train':
46
+ self.sceneIds = list(range(100))
47
+ elif split == 'test':
48
+ self.sceneIds = list(range(100, 190))
49
+ elif split == 'test_seen':
50
+ self.sceneIds = list(range(100, 130))
51
+ elif split == 'test_similar':
52
+ self.sceneIds = list(range(130, 160))
53
+ elif split == 'test_novel':
54
+ self.sceneIds = list(range(160, 190))
55
+ if limited_data:
56
+ self.sceneIds = self.sceneIds[:10]
57
+ self.sceneIds = ['scene_{}'.format(str(x).zfill(4)) for x in self.sceneIds]
58
+
59
+ filename = f"dataset/{split}_labels"
60
+ if limited_data and not overfitting:
61
+ filename += "_limited"
62
+ if overfitting:
63
+ filename += "_overfitting"
64
+ filename += ".hdf5"
65
+ self.h5_filename = filename
66
+ self.h5_file = None
67
+ self.grasp_labels_filename = "dataset/grasp_labels.hdf5"
68
+ self.grasp_labels_file = None
69
+
70
+ with h5py.File(self.h5_filename, 'r') as f:
71
+ self.len = f['depthpath'].shape[0]
72
+
73
+ def __len__(self):
74
+ return self.len
75
+
76
+ def __getitem__(self, index):
77
+ if self.h5_file is None:
78
+ self.h5_file = h5py.File(self.h5_filename, 'r')
79
+
80
+ ann_id = int(str(self.h5_file['metapath'][index], 'utf-8').split("meta")[1][1:-4])
81
+
82
+ color = np.array(Image.open(self.h5_file['colorpath'][index]), dtype=np.float32) / 255.0
83
+ depth = np.array(Image.open(self.h5_file['depthpath'][index]))
84
+
85
+ # fixing depth image where value is 0
86
+ p99 = np.percentile(depth[depth != 0], 99)
87
+ # p1 = abs(np.percentile(depth[depth != 0], 1))
88
+ depth[depth > p99] = p99
89
+ depth[depth == 0] = p99
90
+
91
+ seg = np.array(Image.open(self.h5_file['labelpath'][index]))
92
+ meta = scio.loadmat(self.h5_file['metapath'][index])
93
+ scene = self.h5_file['scenename'][index]
94
+
95
+ main_path = str(self.h5_file['metapath'][index], 'utf-8').split("meta")[0]
96
+ cam_extrinsics = np.load(os.path.join(str(self.h5_file['metapath'][index], 'utf-8').split("meta")[0],
97
+ 'camera_poses.npy'))[ann_id]
98
+ cam_wrt_table = np.load(os.path.join(str(self.h5_file['metapath'][index], 'utf-8').split("meta")[0],
99
+ 'cam0_wrt_table.npy'))
100
+ cam_extrinsics = cam_wrt_table.dot(cam_extrinsics).astype(np.float32)
101
+
102
+ try:
103
+ obj_idxs = meta['cls_indexes'].flatten().astype(np.int32)
104
+ poses = meta['poses']
105
+ intrinsic = meta['intrinsic_matrix']
106
+ factor_depth = meta['factor_depth']
107
+ except Exception as e:
108
+ print(repr(e))
109
+ print(scene)
110
+
111
+ # h_ratio = 800 / 720
112
+ # w_ratio = 1333 / 1280
113
+
114
+ camera = CameraInfo(1280.0, 720.0, intrinsic[0][0], intrinsic[1][1], intrinsic[0][2], intrinsic[1][2], factor_depth)
115
+
116
+ ## generate cloud required to remove invisible grasp points
117
+ #cloud = create_point_cloud_from_depth_image(depth, camera, organized=True)
118
+
119
+ obj_bounding_boxes = []
120
+ for i, obj_idx in enumerate(obj_idxs):
121
+ if obj_idx not in self.valid_obj_idxs:
122
+ continue
123
+ if (seg == obj_idx).sum() < 50:
124
+ continue
125
+
126
+ seg_cpy = seg.copy()
127
+ seg_cpy[seg != obj_idx] = 0
128
+ seg_cpy[seg == obj_idx] = 1
129
+ seg_labels = label(seg_cpy)
130
+ regions = regionprops(seg_labels)
131
+
132
+ # b has start_height, start_width, end_height, end_width = (x_min, y_min, x_max, y_max)
133
+ b = regions[0].bbox
134
+ # saved bbox has xyxy
135
+ H, W = seg.shape[0], seg.shape[1]
136
+
137
+ obj_bounding_boxes.append(np.array([b[1] / W, b[0] / H, b[3] / W, b[2] / H])[None].repeat(self.k_grasps, 0))
138
+ obj_bounding_boxes = np.concatenate(obj_bounding_boxes, axis=0).astype(np.float32)
139
+
140
+ ret_dict = {}
141
+ #ret_dict['point_cloud'] = cloud.transpose((2, 0, 1)).astype(np.float32)
142
+ ret_dict['color'] = color.transpose((2, 0, 1)).astype(np.float32)
143
+ ret_dict['depth'] = (depth / camera.scale).astype(np.float32)
144
+ ret_dict['objectness_label'] = seg.astype(np.int32)
145
+ ret_dict['obj_bounding_boxes'] = obj_bounding_boxes
146
+ ret_dict['camera_intrinsics'] = np.expand_dims(np.concatenate([intrinsic.reshape(-1), factor_depth[0]]), -1).astype(np.float32)
147
+ ret_dict['camera_extrinsics'] = cam_extrinsics.astype(np.float32)
148
+ #ret_dict['transformed_points'] = transformed_points.astype(np.float32)
149
+ ret_dict['obj_idxs'] = obj_idxs
150
+
151
+ return ret_dict
152
+
153
+
154
+ def load_valid_obj_idxs():
155
+ obj_names = list(range(88))
156
+ valid_obj_idxs = []
157
+ for i, obj_name in enumerate(obj_names):
158
+ if i == 18: continue
159
+ valid_obj_idxs.append(i + 1) # here align with label png
160
+
161
+ return valid_obj_idxs
162
+
163
+
164
+ def my_worker_init_fn(worker_id):
165
+ np.random.seed(np.random.get_state()[1][0] + worker_id)
166
+ pass
data_utils.py ADDED
@@ -0,0 +1,149 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ Tools for data processing.
2
+ Author: chenxi-wang
3
+ """
4
+
5
+ import numpy as np
6
+
7
+ class CameraInfo():
8
+ """ Camera intrisics for point cloud creation. """
9
+ def __init__(self, width, height, fx, fy, cx, cy, scale):
10
+ self.width = width
11
+ self.height = height
12
+ self.fx = fx
13
+ self.fy = fy
14
+ self.cx = cx
15
+ self.cy = cy
16
+ self.scale = scale
17
+
18
+ def create_point_cloud_from_depth_image(depth, camera, organized=True):
19
+ """ Generate point cloud using depth image only.
20
+
21
+ Input:
22
+ depth: [numpy.ndarray, (H,W), numpy.float32]
23
+ depth image
24
+ camera: [CameraInfo]
25
+ camera intrinsics
26
+ organized: bool
27
+ whether to keep the cloud in image shape (H,W,3)
28
+
29
+ Output:
30
+ cloud: [numpy.ndarray, (H,W,3)/(H*W,3), numpy.float32]
31
+ generated cloud, (H,W,3) for organized=True, (H*W,3) for organized=False
32
+ """
33
+ assert(depth.shape[0] == camera.height and depth.shape[1] == camera.width)
34
+ xmap = np.arange(camera.width)
35
+ ymap = np.arange(camera.height)
36
+ xmap, ymap = np.meshgrid(xmap, ymap)
37
+ points_z = depth / camera.scale
38
+ points_x = (xmap - camera.cx) * points_z / camera.fx
39
+ points_y = (ymap - camera.cy) * points_z / camera.fy
40
+ cloud = np.stack([points_x, points_y, points_z], axis=-1)
41
+ if not organized:
42
+ cloud = cloud.reshape([-1, 3])
43
+ return cloud
44
+
45
+ def transform_point_cloud(cloud, transform, format='4x4'):
46
+ """ Transform points to new coordinates with transformation matrix.
47
+
48
+ Input:
49
+ cloud: [np.ndarray, (N,3), np.float32]
50
+ points in original coordinates
51
+ transform: [np.ndarray, (3,3)/(3,4)/(4,4), np.float32]
52
+ transformation matrix, could be rotation only or rotation+translation
53
+ format: [string, '3x3'/'3x4'/'4x4']
54
+ the shape of transformation matrix
55
+ '3x3' --> rotation matrix
56
+ '3x4'/'4x4' --> rotation matrix + translation matrix
57
+
58
+ Output:
59
+ cloud_transformed: [np.ndarray, (N,3), np.float32]
60
+ points in new coordinates
61
+ """
62
+ if not (format == '3x3' or format == '4x4' or format == '3x4'):
63
+ raise ValueError('Unknown transformation format, only support \'3x3\' or \'4x4\' or \'3x4\'.')
64
+ if format == '3x3':
65
+ cloud_transformed = np.dot(transform, cloud.T).T
66
+ elif format == '4x4' or format == '3x4':
67
+ ones = np.ones(cloud.shape[0])[:, np.newaxis]
68
+ cloud_ = np.concatenate([cloud, ones], axis=1)
69
+ cloud_transformed = np.dot(transform, cloud_.T).T
70
+ cloud_transformed = cloud_transformed[:, :3]
71
+ return cloud_transformed
72
+
73
+ def compute_point_dists(A, B):
74
+ """ Compute pair-wise point distances in two matrices.
75
+
76
+ Input:
77
+ A: [np.ndarray, (N,3), np.float32]
78
+ point cloud A
79
+ B: [np.ndarray, (M,3), np.float32]
80
+ point cloud B
81
+
82
+ Output:
83
+ dists: [np.ndarray, (N,M), np.float32]
84
+ distance matrix
85
+ """
86
+ A = A[:, np.newaxis, :]
87
+ B = B[np.newaxis, :, :]
88
+ dists = np.linalg.norm(A-B, axis=-1)
89
+ return dists
90
+
91
+ def remove_invisible_grasp_points(cloud, grasp_points, pose, th=0.01):
92
+ """ Remove invisible part of object model according to scene point cloud.
93
+
94
+ Input:
95
+ cloud: [np.ndarray, (N,3), np.float32]
96
+ scene point cloud
97
+ grasp_points: [np.ndarray, (M,3), np.float32]
98
+ grasp point label in object coordinates
99
+ pose: [np.ndarray, (4,4), np.float32]
100
+ transformation matrix from object coordinates to world coordinates
101
+ th: [float]
102
+ if the minimum distance between a grasp point and the scene points is greater than outlier, the point will be removed
103
+
104
+ Output:
105
+ visible_mask: [np.ndarray, (M,), np.bool]
106
+ mask to show the visible part of grasp points
107
+ """
108
+ grasp_points_trans = transform_point_cloud(grasp_points, pose)
109
+ dists = compute_point_dists(grasp_points_trans, cloud)
110
+ min_dists = dists.min(axis=1)
111
+ visible_mask = (min_dists < th)
112
+ return visible_mask
113
+
114
+ def get_workspace_mask(cloud, seg, trans=None, organized=True, outlier=0):
115
+ """ Keep points in workspace as input.
116
+
117
+ Input:
118
+ cloud: [np.ndarray, (H,W,3), np.float32]
119
+ scene point cloud
120
+ seg: [np.ndarray, (H,W,), np.uint8]
121
+ segmantation label of scene points
122
+ trans: [np.ndarray, (4,4), np.float32]
123
+ transformation matrix for scene points, default: None.
124
+ organized: [bool]
125
+ whether to keep the cloud in image shape (H,W,3)
126
+ outlier: [float]
127
+ if the distance between a point and workspace is greater than outlier, the point will be removed
128
+
129
+ Output:
130
+ workspace_mask: [np.ndarray, (H,W)/(H*W,), np.bool]
131
+ mask to indicate whether scene points are in workspace
132
+ """
133
+ if organized:
134
+ h, w, _ = cloud.shape
135
+ cloud = cloud.reshape([h*w, 3])
136
+ seg = seg.reshape(h*w)
137
+ if trans is not None:
138
+ cloud = transform_point_cloud(cloud, trans)
139
+ foreground = cloud[seg>0]
140
+ xmin, ymin, zmin = foreground.min(axis=0)
141
+ xmax, ymax, zmax = foreground.max(axis=0)
142
+ mask_x = ((cloud[:,0] > xmin-outlier) & (cloud[:,0] < xmax+outlier))
143
+ mask_y = ((cloud[:,1] > ymin-outlier) & (cloud[:,1] < ymax+outlier))
144
+ mask_z = ((cloud[:,2] > zmin-outlier) & (cloud[:,2] < zmax+outlier))
145
+ workspace_mask = (mask_x & mask_y & mask_z)
146
+ if organized:
147
+ workspace_mask = workspace_mask.reshape([h, w])
148
+
149
+ return workspace_mask
grasp_labels.hdf5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6c2cb4478af68123236739f784c430432558bb1984240c810519501b43e5ba73
3
+ size 27649926048
test_bbox_dataset.ipynb ADDED
The diff for this file is too large to render. See raw diff
 
test_labels.hdf5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cce93092c5d48ace0ffbc34a800a0c8997cc12fd9161382ea9dca36187d26d0b
3
+ size 13738218528
test_labels_limited.hdf5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:57f845819b33566b49137e16669fb21a47d193310d9229b9ed94e03707f5948b
3
+ size 1847477632
test_novel_labels.hdf5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7b6ab6d23ba11440f2c37d895bec769751841551796ffbf19e9a09f8b3e760e4
3
+ size 3820886464
test_novel_labels_limited.hdf5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e99e19baddac4480def00e03cc3a5829abce9e06266b4190478f21ef23554399
3
+ size 1221612544
test_seen_labels.hdf5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a56ab3f596b08bdcee0b55eb90b52193f0f18463f7aed58e37c7b1b98b0865fa
3
+ size 5767926784
test_seen_labels_limited.hdf5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:57f845819b33566b49137e16669fb21a47d193310d9229b9ed94e03707f5948b
3
+ size 1847477632
test_seen_labels_overfitting.hdf5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4accd843326975e7ba805ee390104c19f671617991c73ec814aaaadca9f0ce36
3
+ size 1845877136
test_similar_labels.hdf5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:37698bc1d5568b85eccf659cc568796865a50e2b1973f224741cf571324977f5
3
+ size 4149426496
test_similar_labels_limited.hdf5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3ad322ac599799601be38ce79dce25955e33a01641d178cfa1efd2c89977e7d1
3
+ size 1699060928
train_labels.hdf5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:62a5ff1adba287d590d364ecd38d4d28679d998e1fe56143cdb2d5a3aec9123e
3
+ size 17807294464
train_labels_limited.hdf5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a3bc086730b39a2c2957d4d029adc30fcb87a3791f6f3968eb1a5a181d447dd3
3
+ size 1623629632
train_labels_overfitting.hdf5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0216e80f3596d8050ed4113e7d3176398356ba313b47ceca3f86eed6e51d53d7
3
+ size 1622029136