| import warnings |
| warnings.filterwarnings("ignore", category=DeprecationWarning) |
|
|
| import sys |
| import os |
| import argparse |
| import glob |
| import json |
| from concurrent.futures import ProcessPoolExecutor |
|
|
| |
| from scannet200_constants import * |
| from scannet200_splits import * |
| from utils import * |
|
|
| CLOUD_FILE_PFIX = '_vh_clean_2' |
| SEGMENTS_FILE_PFIX = '.0.010000.segs.json' |
| AGGREGATIONS_FILE_PFIX = '.aggregation.json' |
| CLASS_IDs = VALID_CLASS_IDS_200 |
|
|
| _OUTPUT_ROOT = '' |
| _TRAIN_SCENES = set() |
| _VAL_SCENES = set() |
| _VOXEL_SIZE = 0.2 |
| _NORMALIZE = False |
| _LABEL_MAP = {} |
|
|
|
|
| def init_worker(output_root, train_scenes, val_scenes, voxel_size, normalize, label_map): |
| global _OUTPUT_ROOT, _TRAIN_SCENES, _VAL_SCENES, _VOXEL_SIZE, _NORMALIZE, _LABEL_MAP |
| _OUTPUT_ROOT = output_root |
| _TRAIN_SCENES = set(train_scenes) |
| _VAL_SCENES = set(val_scenes) |
| _VOXEL_SIZE = voxel_size |
| _NORMALIZE = normalize |
| _LABEL_MAP = label_map |
|
|
| def normalize_pointcloud(points): |
| centered = points - np.mean(points, axis=0, keepdims=True) |
| scale = np.max(np.linalg.norm(centered, axis=1)) |
| if scale < 1e-8: |
| return centered |
| return centered / scale |
|
|
|
|
| def handle_process(scene_path): |
|
|
| scene_id = os.path.basename(scene_path) |
| mesh_path = os.path.join(scene_path, f'{scene_id}{CLOUD_FILE_PFIX}.ply') |
| segments_file = os.path.join(scene_path, f'{scene_id}{CLOUD_FILE_PFIX}{SEGMENTS_FILE_PFIX}') |
| aggregations_file = os.path.join(scene_path, f'{scene_id}{AGGREGATIONS_FILE_PFIX}') |
| info_file = os.path.join(scene_path, f'{scene_id}.txt') |
| if _NORMALIZE: |
| norm_suffix = '_normalized' |
| else: |
| norm_suffix = '' |
| if scene_id in _TRAIN_SCENES: |
| output_file = os.path.join(_OUTPUT_ROOT, 'train', f'{scene_id}{norm_suffix}.ply') |
| voxel_output_file = os.path.join(_OUTPUT_ROOT, 'train', f'{scene_id}_voxel_{_VOXEL_SIZE}{norm_suffix}.ply') |
| split_name = 'train' |
| elif scene_id in _VAL_SCENES: |
| output_file = os.path.join(_OUTPUT_ROOT, 'val', f'{scene_id}{norm_suffix}.ply') |
| voxel_output_file = os.path.join(_OUTPUT_ROOT, 'val', f'{scene_id}_voxel_{_VOXEL_SIZE}{norm_suffix}.ply') |
| split_name = 'val' |
| else: |
| output_file = os.path.join(_OUTPUT_ROOT, 'test', f'{scene_id}{norm_suffix}.ply') |
| voxel_output_file = os.path.join(_OUTPUT_ROOT, 'test', f'{scene_id}_voxel_{_VOXEL_SIZE}{norm_suffix}.ply') |
| split_name = 'test' |
| print('Processing: ', scene_id, 'in ', split_name) |
|
|
| |
| info_dict = {} |
| with open(info_file) as f: |
| for line in f: |
| (key, val) = line.split(" = ") |
| info_dict[key] = np.fromstring(val, sep=' ') |
|
|
| if 'axisAlignment' not in info_dict: |
| rot_matrix = np.identity(4) |
| else: |
| rot_matrix = info_dict['axisAlignment'].reshape(4, 4) |
|
|
| mesh_data = read_plymesh(mesh_path) |
| if mesh_data is None: |
| raise ValueError(f'Empty mesh: {mesh_path}') |
| pointcloud, faces_array = mesh_data |
|
|
| |
| r_points = pointcloud[:, :3].transpose() |
| r_points = np.append(r_points, np.ones((1, r_points.shape[1])), axis=0) |
| r_points = np.dot(rot_matrix, r_points) |
| pointcloud = np.append(r_points.transpose()[:, :3], pointcloud[:, 3:], axis=1) |
|
|
| if _NORMALIZE: |
| pointcloud[:, :3] = normalize_pointcloud(pointcloud[:, :3]) |
|
|
| points = pointcloud[:, :3] |
| colors = pointcloud[:, 3:6] |
|
|
| |
| with open(segments_file) as f: |
| segments = json.load(f) |
| seg_indices = np.array(segments['segIndices']) |
|
|
| |
| with open(aggregations_file) as f: |
| aggregation = json.load(f) |
| seg_groups = np.array(aggregation['segGroups']) |
|
|
| |
| labelled_pc = np.zeros((pointcloud.shape[0], 1)) |
| instance_ids = np.zeros((pointcloud.shape[0], 1)) |
| for group in seg_groups: |
| p_inds, label_id = point_indices_from_group(seg_indices, group, _LABEL_MAP, CLASS_IDs) |
|
|
| labelled_pc[p_inds] = label_id |
| instance_ids[p_inds] = group['id'] |
|
|
| labelled_pc = labelled_pc.astype(int) |
| instance_ids = instance_ids.astype(int) |
|
|
| |
| processed_vertices = np.hstack((pointcloud[:, :6], labelled_pc, instance_ids)) |
|
|
| if (np.any(np.isnan(processed_vertices)) or not np.all(np.isfinite(processed_vertices))): |
| raise ValueError('nan') |
|
|
| |
| save_plymesh(processed_vertices, faces_array, output_file, with_label=True, verbose=False) |
|
|
| |
| quantized_points, quantized_scene_colors, quantized_labels, quantized_instances = voxelize_pointcloud( |
| points, |
| colors, |
| labelled_pc, |
| instance_ids, |
| faces_array, |
| voxel_size=_VOXEL_SIZE, |
| ) |
| quantized_pc = np.hstack((quantized_points, quantized_scene_colors, quantized_labels, quantized_instances)) |
| save_plymesh(quantized_pc, faces=None, filename=voxel_output_file, with_label=True, verbose=False) |
|
|
| if __name__ == '__main__': |
| parser = argparse.ArgumentParser() |
| parser.add_argument('--dataset_root', required=True, help='Path to the ScanNet dataset containing scene folders') |
| parser.add_argument('--output_root', required=True, help='Output path where train/val folders will be located') |
| parser.add_argument('--label_map_file', required=True, help='path to scannetv2-labels.combined.tsv') |
| parser.add_argument('--num_workers', default=4, type=int, help='The number of parallel workers') |
| parser.add_argument('--train_val_splits_path', default=None, help='Where the txt files with the train/val splits live') |
| parser.add_argument('--voxel_size', default=0.2, type=float, help='Size of the voxel for voxelization') |
| parser.add_argument('--normalize_pointcloud', action='store_true', help='Normalize each scene point cloud to a unit sphere after axis alignment') |
| config = parser.parse_args() |
|
|
| |
| labels_pd = pd.read_csv(config.label_map_file, sep='\t', header=0) |
| label_map = dict(zip(labels_pd['raw_category'], labels_pd['id'])) |
|
|
| |
| with open(config.train_val_splits_path + '/scannetv2_train.txt') as train_file: |
| train_scenes = train_file.read().splitlines() |
| with open(config.train_val_splits_path + '/scannetv2_val.txt') as val_file: |
| val_scenes = val_file.read().splitlines() |
|
|
| |
| train_output_dir = os.path.join(config.output_root, 'train') |
| if not os.path.exists(train_output_dir): |
| os.makedirs(train_output_dir) |
| val_output_dir = os.path.join(config.output_root, 'val') |
| if not os.path.exists(val_output_dir): |
| os.makedirs(val_output_dir) |
| test_output_dir = os.path.join(config.output_root, 'test') |
| if not os.path.exists(test_output_dir): |
| os.makedirs(test_output_dir) |
|
|
| |
| scene_paths = sorted(glob.glob(config.dataset_root + '/*')) |
|
|
| |
| print('Processing scenes...') |
| with ProcessPoolExecutor( |
| max_workers=config.num_workers, |
| initializer=init_worker, |
| initargs=( |
| config.output_root, |
| train_scenes, |
| val_scenes, |
| config.voxel_size, |
| config.normalize_pointcloud, |
| label_map, |
| ), |
| ) as pool: |
| _ = list(pool.map(handle_process, scene_paths)) |
|
|