Datasets:

Languages:
English
ArXiv:
License:
mattdeitke commited on
Commit
203a301
1 Parent(s): 28e13bd

add rendering script

Browse files
.gitignore CHANGED
@@ -1,3 +1,5 @@
 
 
1
  # Byte-compiled / optimized / DLL files
2
  __pycache__/
3
  *.py[cod]
 
1
+ scripts/rendering/blender-*
2
+
3
  # Byte-compiled / optimized / DLL files
4
  __pycache__/
5
  *.py[cod]
Makefile CHANGED
@@ -10,10 +10,14 @@ lint: ## [Local development] Run mypy, pylint and black
10
  python -m pylint objaverse_xl
11
  python -m black --check objaverse_xl
12
  python -m isort --check-only objaverse_xl
 
 
13
 
14
- format: ## [Local development] Auto-format python code using black
15
  python -m isort objaverse_xl
16
  python -m black objaverse_xl
 
 
17
 
18
  test: ## [Local development] Run unit tests
19
  python -m pytest -x -s -v tests
 
10
  python -m pylint objaverse_xl
11
  python -m black --check objaverse_xl
12
  python -m isort --check-only objaverse_xl
13
+ python -m black --check scripts --exclude scripts/rendering/blender-3.2.2-linux-x64/
14
+ python -m isort --check-only scripts/**/*.py --skip scripts/rendering/blender-3.2.2-linux-x64/
15
 
16
+ format: ## [Local development] Auto-format python code using black, don't include blender
17
  python -m isort objaverse_xl
18
  python -m black objaverse_xl
19
+ python -m isort scripts/**/*.py --skip scripts/rendering/blender-3.2.2-linux-x64/
20
+ python -m black scripts --exclude scripts/rendering/blender-3.2.2-linux-x64/
21
 
22
  test: ## [Local development] Run unit tests
23
  python -m pytest -x -s -v tests
requirements.txt CHANGED
@@ -3,4 +3,5 @@ pandas
3
  pyarrow
4
  tqdm
5
  loguru
6
- fsspec>=2022.11.0
 
 
3
  pyarrow
4
  tqdm
5
  loguru
6
+ fsspec>=2022.11.0
7
+ gputil==1.4.0
scripts/rendering/README.md ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Objaverse-XL Rendering Script
2
+
3
+ ## Setup
4
+
5
+ 1. Clone the repository and enter the rendering directory:
6
+
7
+ ```bash
8
+ git clone https://github.com/allenai/objaverse-xl.git && \
9
+ cd objaverse-xl/scripts/rendering
10
+ ```
11
+
12
+ 2. Download Blender:
13
+
14
+ ```bash
15
+ wget https://download.blender.org/release/Blender3.2/blender-3.2.2-linux-x64.tar.xz && \
16
+ tar -xf blender-3.2.2-linux-x64.tar.xz && \
17
+ rm blender-3.2.2-linux-x64.tar.xz
18
+ ```
19
+
20
+ 3. If you're on a headless Linux server, install Xorg and start it:
21
+
22
+ ```bash
23
+ sudo apt-get install xserver-xorg -y && \
24
+ sudo python3 start_x_server.py start
25
+ ```
26
+
27
+ 4. Install the Python dependencies. Note that Python >3.8 is required:
28
+
29
+ ```bash
30
+ cd ../.. && \
31
+ pip install -r requirements.txt && \
32
+ pip install -e . && \
33
+ cd scripts/rendering
34
+ ```
35
+
36
+ ## Usage
37
+
38
+ After setup, you can render objects using the `main.py` script:
39
+
40
+ ```bash
41
+ python3 main.py
42
+ ```
43
+
scripts/rendering/blender_script.py ADDED
@@ -0,0 +1,899 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Blender script to render images of 3D models."""
2
+
3
+ import argparse
4
+ import json
5
+ import math
6
+ import os
7
+ import random
8
+ import sys
9
+ from typing import Any, Callable, Dict, Generator, List, Literal, Optional, Set, Tuple
10
+
11
+ import bpy
12
+ import numpy as np
13
+ from mathutils import Matrix, Vector
14
+
15
+ IMPORT_FUNCTIONS: Dict[str, Callable] = {
16
+ "obj": bpy.ops.import_scene.obj,
17
+ "glb": bpy.ops.import_scene.gltf,
18
+ "gltf": bpy.ops.import_scene.gltf,
19
+ "usd": bpy.ops.import_scene.usd,
20
+ "fbx": bpy.ops.import_scene.fbx,
21
+ "stl": bpy.ops.import_mesh.stl,
22
+ "usda": bpy.ops.import_scene.usda,
23
+ "dae": bpy.ops.wm.collada_import,
24
+ "ply": bpy.ops.import_mesh.ply,
25
+ "abc": bpy.ops.wm.alembic_import,
26
+ "blend": bpy.ops.wm.append,
27
+ }
28
+
29
+
30
+ def reset_cameras() -> None:
31
+ """Resets the cameras in the scene to a single default camera."""
32
+ # Delete all existing cameras
33
+ bpy.ops.object.select_all(action="DESELECT")
34
+ bpy.ops.object.select_by_type(type="CAMERA")
35
+ bpy.ops.object.delete()
36
+
37
+ # Create a new camera with default properties
38
+ bpy.ops.object.camera_add()
39
+
40
+ # Rename the new camera to 'NewDefaultCamera'
41
+ new_camera = bpy.context.active_object
42
+ new_camera.name = "Camera"
43
+
44
+ # Set the new camera as the active camera for the scene
45
+ scene.camera = new_camera
46
+
47
+
48
+ def sample_point_on_sphere(radius: float) -> Tuple[float, float, float]:
49
+ """Samples a point on a sphere with the given radius.
50
+
51
+ Args:
52
+ radius (float): Radius of the sphere.
53
+
54
+ Returns:
55
+ Tuple[float, float, float]: A point on the sphere.
56
+ """
57
+ theta = random.random() * 2 * math.pi
58
+ phi = math.acos(2 * random.random() - 1)
59
+ return (
60
+ radius * math.sin(phi) * math.cos(theta),
61
+ radius * math.sin(phi) * math.sin(theta),
62
+ radius * math.cos(phi),
63
+ )
64
+
65
+
66
+ def _sample_spherical(
67
+ radius_min: float = 1.5,
68
+ radius_max: float = 2.0,
69
+ maxz: float = 1.6,
70
+ minz: float = -0.75,
71
+ ) -> np.ndarray:
72
+ """Sample a random point in a spherical shell.
73
+
74
+ Args:
75
+ radius_min (float): Minimum radius of the spherical shell.
76
+ radius_max (float): Maximum radius of the spherical shell.
77
+ maxz (float): Maximum z value of the spherical shell.
78
+ minz (float): Minimum z value of the spherical shell.
79
+
80
+ Returns:
81
+ np.ndarray: A random (x, y, z) point in the spherical shell.
82
+ """
83
+ correct = False
84
+ vec = np.array([0, 0, 0])
85
+ while not correct:
86
+ vec = np.random.uniform(-1, 1, 3)
87
+ # vec[2] = np.abs(vec[2])
88
+ radius = np.random.uniform(radius_min, radius_max, 1)
89
+ vec = vec / np.linalg.norm(vec, axis=0) * radius[0]
90
+ if maxz > vec[2] > minz:
91
+ correct = True
92
+ return vec
93
+
94
+
95
+ def randomize_camera(
96
+ radius_min: float = 1.5,
97
+ radius_max: float = 2.2,
98
+ maxz: float = 2.2,
99
+ minz: float = -2.2,
100
+ only_northern_hemisphere: bool = False,
101
+ ) -> bpy.types.Object:
102
+ """Randomizes the camera location and rotation inside of a spherical shell.
103
+
104
+ Args:
105
+ radius_min (float, optional): Minimum radius of the spherical shell. Defaults to
106
+ 1.5.
107
+ radius_max (float, optional): Maximum radius of the spherical shell. Defaults to
108
+ 2.0.
109
+ maxz (float, optional): Maximum z value of the spherical shell. Defaults to 1.6.
110
+ minz (float, optional): Minimum z value of the spherical shell. Defaults to
111
+ -0.75.
112
+ only_northern_hemisphere (bool, optional): Whether to only sample points in the
113
+ northern hemisphere. Defaults to False.
114
+
115
+ Returns:
116
+ bpy.types.Object: The camera object.
117
+ """
118
+
119
+ x, y, z = _sample_spherical(
120
+ radius_min=radius_min, radius_max=radius_max, maxz=maxz, minz=minz
121
+ )
122
+ camera = bpy.data.objects["Camera"]
123
+
124
+ # only positive z
125
+ if only_northern_hemisphere:
126
+ z = abs(z)
127
+
128
+ camera.location = Vector(np.array([x, y, z]))
129
+
130
+ direction = -camera.location
131
+ rot_quat = direction.to_track_quat("-Z", "Y")
132
+ camera.rotation_euler = rot_quat.to_euler()
133
+
134
+ return camera
135
+
136
+
137
+ def _set_camera_at_size(i: int, scale: float = 1.5) -> bpy.types.Object:
138
+ """Debugging function to set the camera on the 6 faces of a cube.
139
+
140
+ Args:
141
+ i (int): Index of the face of the cube.
142
+ scale (float, optional): Scale of the cube. Defaults to 1.5.
143
+
144
+ Returns:
145
+ bpy.types.Object: The camera object.
146
+ """
147
+ if i == 0:
148
+ x, y, z = scale, 0, 0
149
+ elif i == 1:
150
+ x, y, z = -scale, 0, 0
151
+ elif i == 2:
152
+ x, y, z = 0, scale, 0
153
+ elif i == 3:
154
+ x, y, z = 0, -scale, 0
155
+ elif i == 4:
156
+ x, y, z = 0, 0, scale
157
+ elif i == 5:
158
+ x, y, z = 0, 0, -scale
159
+ else:
160
+ raise ValueError(f"Invalid index: i={i}, must be int in range [0, 5].")
161
+ camera = bpy.data.objects["Camera"]
162
+ camera.location = Vector(np.array([x, y, z]))
163
+ direction = -camera.location
164
+ rot_quat = direction.to_track_quat("-Z", "Y")
165
+ camera.rotation_euler = rot_quat.to_euler()
166
+ return camera
167
+
168
+
169
+ def _create_light(
170
+ name: str,
171
+ light_type: Literal["POINT", "SUN", "SPOT", "AREA"],
172
+ location: Tuple[float, float, float],
173
+ rotation: Tuple[float, float, float],
174
+ energy: float,
175
+ use_shadow: bool = False,
176
+ specular_factor: float = 1.0,
177
+ ):
178
+ """Creates a light object.
179
+
180
+ Args:
181
+ name (str): Name of the light object.
182
+ light_type (Literal["POINT", "SUN", "SPOT", "AREA"]): Type of the light.
183
+ location (Tuple[float, float, float]): Location of the light.
184
+ rotation (Tuple[float, float, float]): Rotation of the light.
185
+ energy (float): Energy of the light.
186
+ use_shadow (bool, optional): Whether to use shadows. Defaults to False.
187
+ specular_factor (float, optional): Specular factor of the light. Defaults to 1.0.
188
+
189
+ Returns:
190
+ bpy.types.Object: The light object.
191
+ """
192
+
193
+ light_data = bpy.data.lights.new(name=name, type=light_type)
194
+ light_object = bpy.data.objects.new(name, light_data)
195
+ bpy.context.collection.objects.link(light_object)
196
+ light_object.location = location
197
+ light_object.rotation_euler = rotation
198
+ light_data.use_shadow = use_shadow
199
+ light_data.specular_factor = specular_factor
200
+ light_data.energy = energy
201
+ return light_object
202
+
203
+
204
+ def randomize_lighting() -> Dict[str, bpy.types.Object]:
205
+ """Randomizes the lighting in the scene.
206
+
207
+ Returns:
208
+ Dict[str, bpy.types.Object]: Dictionary of the lights in the scene. The keys are
209
+ "key_light", "fill_light", "rim_light", and "bottom_light".
210
+ """
211
+
212
+ # Clear existing lights
213
+ bpy.ops.object.select_all(action="DESELECT")
214
+ bpy.ops.object.select_by_type(type="LIGHT")
215
+ bpy.ops.object.delete()
216
+
217
+ # Create key light
218
+ key_light = _create_light(
219
+ name="Key_Light",
220
+ light_type="SUN",
221
+ location=(0, 0, 0),
222
+ rotation=(0.785398, 0, -0.785398),
223
+ energy=random.choice([3, 4, 5]),
224
+ )
225
+
226
+ # Create fill light
227
+ fill_light = _create_light(
228
+ name="Fill_Light",
229
+ light_type="SUN",
230
+ location=(0, 0, 0),
231
+ rotation=(0.785398, 0, 2.35619),
232
+ energy=random.choice([2, 3, 4]),
233
+ )
234
+
235
+ # Create rim light
236
+ rim_light = _create_light(
237
+ name="Rim_Light",
238
+ light_type="SUN",
239
+ location=(0, 0, 0),
240
+ rotation=(-0.785398, 0, -3.92699),
241
+ energy=random.choice([3, 4, 5]),
242
+ )
243
+
244
+ # Create bottom light
245
+ bottom_light = _create_light(
246
+ name="Bottom_Light",
247
+ light_type="SUN",
248
+ location=(0, 0, 0),
249
+ rotation=(3.14159, 0, 0),
250
+ energy=random.choice([1, 2, 3]),
251
+ )
252
+
253
+ return dict(
254
+ key_light=key_light,
255
+ fill_light=fill_light,
256
+ rim_light=rim_light,
257
+ bottom_light=bottom_light,
258
+ )
259
+
260
+
261
+ def reset_scene() -> None:
262
+ """Resets the scene to a clean state.
263
+
264
+ Returns:
265
+ None
266
+ """
267
+ # delete everything that isn't part of a camera or a light
268
+ for obj in bpy.data.objects:
269
+ if obj.type not in {"CAMERA", "LIGHT"}:
270
+ bpy.data.objects.remove(obj, do_unlink=True)
271
+
272
+ # delete all the materials
273
+ for material in bpy.data.materials:
274
+ bpy.data.materials.remove(material, do_unlink=True)
275
+
276
+ # delete all the textures
277
+ for texture in bpy.data.textures:
278
+ bpy.data.textures.remove(texture, do_unlink=True)
279
+
280
+ # delete all the images
281
+ for image in bpy.data.images:
282
+ bpy.data.images.remove(image, do_unlink=True)
283
+
284
+
285
+ def load_object(object_path: str) -> None:
286
+ """Loads a model with a supported file extension into the scene.
287
+
288
+ Args:
289
+ object_path (str): Path to the model file.
290
+
291
+ Raises:
292
+ ValueError: If the file extension is not supported.
293
+
294
+ Returns:
295
+ None
296
+ """
297
+ file_extension = object_path.split(".")[-1].lower()
298
+ if file_extension is None:
299
+ raise ValueError(f"Unsupported file type: {object_path}")
300
+
301
+ if file_extension == "usdz":
302
+ # install usdz io package
303
+ dirname = os.path.dirname(os.path.realpath(__file__))
304
+ usdz_package = os.path.join(dirname, "io_scene_usdz.zip")
305
+ bpy.ops.preferences.addon_install(filepath=usdz_package)
306
+ # enable it
307
+ addon_name = "io_scene_usdz"
308
+ bpy.ops.preferences.addon_enable(module=addon_name)
309
+ # import the usdz
310
+ from io_scene_usdz.import_usdz import import_usdz
311
+
312
+ import_usdz(context, filepath=object_path, materials=True, animations=True)
313
+ return None
314
+
315
+ # load from existing import functions
316
+ import_function = IMPORT_FUNCTIONS[file_extension]
317
+
318
+ if file_extension == "blend":
319
+ import_function(directory=object_path, link=False)
320
+ elif file_extension in {"glb", "gltf"}:
321
+ import_function(filepath=object_path, merge_vertices=True)
322
+ else:
323
+ import_function(filepath=object_path)
324
+
325
+
326
+ def scene_bbox(
327
+ single_obj: Optional[bpy.types.Object] = None, ignore_matrix: bool = False
328
+ ) -> Tuple[Vector, Vector]:
329
+ """Returns the bounding box of the scene.
330
+
331
+ Taken from Shap-E rendering script
332
+ (https://github.com/openai/shap-e/blob/main/shap_e/rendering/blender/blender_script.py#L68-L82)
333
+
334
+ Args:
335
+ single_obj (Optional[bpy.types.Object], optional): If not None, only computes
336
+ the bounding box for the given object. Defaults to None.
337
+ ignore_matrix (bool, optional): Whether to ignore the object's matrix. Defaults
338
+ to False.
339
+
340
+ Raises:
341
+ RuntimeError: If there are no objects in the scene.
342
+
343
+ Returns:
344
+ Tuple[Vector, Vector]: The minimum and maximum coordinates of the bounding box.
345
+ """
346
+ bbox_min = (math.inf,) * 3
347
+ bbox_max = (-math.inf,) * 3
348
+ found = False
349
+ for obj in get_scene_meshes() if single_obj is None else [single_obj]:
350
+ found = True
351
+ for coord in obj.bound_box:
352
+ coord = Vector(coord)
353
+ if not ignore_matrix:
354
+ coord = obj.matrix_world @ coord
355
+ bbox_min = tuple(min(x, y) for x, y in zip(bbox_min, coord))
356
+ bbox_max = tuple(max(x, y) for x, y in zip(bbox_max, coord))
357
+
358
+ if not found:
359
+ raise RuntimeError("no objects in scene to compute bounding box for")
360
+
361
+ return Vector(bbox_min), Vector(bbox_max)
362
+
363
+
364
+ def get_scene_root_objects() -> Generator[bpy.types.Object, None, None]:
365
+ """Returns all root objects in the scene.
366
+
367
+ Yields:
368
+ Generator[bpy.types.Object, None, None]: Generator of all root objects in the
369
+ scene.
370
+ """
371
+ for obj in bpy.context.scene.objects.values():
372
+ if not obj.parent:
373
+ yield obj
374
+
375
+
376
+ def get_scene_meshes() -> Generator[bpy.types.Object, None, None]:
377
+ """Returns all meshes in the scene.
378
+
379
+ Yields:
380
+ Generator[bpy.types.Object, None, None]: Generator of all meshes in the scene.
381
+ """
382
+ for obj in bpy.context.scene.objects.values():
383
+ if isinstance(obj.data, (bpy.types.Mesh)):
384
+ yield obj
385
+
386
+
387
+ def get_3x4_RT_matrix_from_blender(cam: bpy.types.Object) -> Matrix:
388
+ """Returns the 3x4 RT matrix from the given camera.
389
+
390
+ Taken from Zero123, which in turn was taken from
391
+ https://github.com/panmari/stanford-shapenet-renderer/blob/master/render_blender.py
392
+
393
+ Args:
394
+ cam (bpy.types.Object): The camera object.
395
+
396
+ Returns:
397
+ Matrix: The 3x4 RT matrix from the given camera.
398
+ """
399
+ # Use matrix_world instead to account for all constraints
400
+ location, rotation = cam.matrix_world.decompose()[0:2]
401
+ R_world2bcam = rotation.to_matrix().transposed()
402
+
403
+ # Use location from matrix_world to account for constraints:
404
+ T_world2bcam = -1 * R_world2bcam @ location
405
+
406
+ # put into 3x4 matrix
407
+ RT = Matrix(
408
+ (
409
+ R_world2bcam[0][:] + (T_world2bcam[0],),
410
+ R_world2bcam[1][:] + (T_world2bcam[1],),
411
+ R_world2bcam[2][:] + (T_world2bcam[2],),
412
+ )
413
+ )
414
+ return RT
415
+
416
+
417
+ def delete_invisible_objects() -> None:
418
+ """Deletes all invisible objects in the scene.
419
+
420
+ Returns:
421
+ None
422
+ """
423
+ bpy.ops.object.select_all(action="DESELECT")
424
+ for obj in scene.objects:
425
+ if obj.hide_viewport or obj.hide_render:
426
+ obj.hide_viewport = False
427
+ obj.hide_render = False
428
+ obj.hide_select = False
429
+ obj.select_set(True)
430
+ bpy.ops.object.delete()
431
+
432
+ # Delete invisible collections
433
+ invisible_collections = [col for col in bpy.data.collections if col.hide_viewport]
434
+ for col in invisible_collections:
435
+ bpy.data.collections.remove(col)
436
+
437
+
438
+ def normalize_scene() -> None:
439
+ """Normalizes the scene by scaling and translating it to fit in a unit cube centered
440
+ at the origin.
441
+
442
+ Mostly taken from the Point-E / Shap-E rendering script
443
+ (https://github.com/openai/point-e/blob/main/point_e/evals/scripts/blender_script.py#L97-L112),
444
+ but fix for multiple root objects: (see bug report here:
445
+ https://github.com/openai/shap-e/pull/60).
446
+
447
+ Returns:
448
+ None
449
+ """
450
+ if len(list(get_scene_root_objects())) > 1:
451
+ # create an empty object to be used as a parent for all root objects
452
+ parent_empty = bpy.data.objects.new("ParentEmpty", None)
453
+ bpy.context.scene.collection.objects.link(parent_empty)
454
+
455
+ # parent all root objects to the empty object
456
+ for obj in get_scene_root_objects():
457
+ if obj != parent_empty:
458
+ obj.parent = parent_empty
459
+
460
+ bbox_min, bbox_max = scene_bbox()
461
+ scale = 1 / max(bbox_max - bbox_min)
462
+ for obj in get_scene_root_objects():
463
+ obj.scale = obj.scale * scale
464
+
465
+ # Apply scale to matrix_world.
466
+ bpy.context.view_layer.update()
467
+ bbox_min, bbox_max = scene_bbox()
468
+ offset = -(bbox_min + bbox_max) / 2
469
+ for obj in get_scene_root_objects():
470
+ obj.matrix_world.translation += offset
471
+ bpy.ops.object.select_all(action="DESELECT")
472
+
473
+ # unparent the camera
474
+ bpy.data.objects["Camera"].parent = None
475
+
476
+
477
+ def delete_missing_textures() -> Dict[str, Any]:
478
+ """Deletes all missing textures in the scene.
479
+
480
+ Returns:
481
+ Dict[str, Any]: Dictionary with keys "count", "files", and "file_path_to_color".
482
+ "count" is the number of missing textures, "files" is a list of the missing
483
+ texture file paths, and "file_path_to_color" is a dictionary mapping the
484
+ missing texture file paths to a random color.
485
+ """
486
+ missing_file_count = 0
487
+ out_files = []
488
+ file_path_to_color = {}
489
+
490
+ # Check all materials in the scene
491
+ for material in bpy.data.materials:
492
+ if material.use_nodes:
493
+ for node in material.node_tree.nodes:
494
+ if node.type == "TEX_IMAGE":
495
+ image = node.image
496
+ if image is not None:
497
+ file_path = bpy.path.abspath(image.filepath)
498
+ if file_path == "":
499
+ # means it's embedded
500
+ continue
501
+
502
+ if not os.path.exists(file_path):
503
+ # Find the connected Principled BSDF node
504
+ connected_node = node.outputs[0].links[0].to_node
505
+
506
+ if connected_node.type == "BSDF_PRINCIPLED":
507
+ if file_path not in file_path_to_color:
508
+ # Set a random color for the unique missing file path
509
+ random_color = [random.random() for _ in range(3)]
510
+ file_path_to_color[file_path] = random_color + [1]
511
+
512
+ connected_node.inputs[
513
+ "Base Color"
514
+ ].default_value = file_path_to_color[file_path]
515
+
516
+ # Delete the TEX_IMAGE node
517
+ material.node_tree.nodes.remove(node)
518
+ missing_file_count += 1
519
+ out_files.append(image.filepath)
520
+ return {
521
+ "count": missing_file_count,
522
+ "files": out_files,
523
+ "file_path_to_color": file_path_to_color,
524
+ }
525
+
526
+
527
+ def _get_random_color() -> Tuple[float, float, float, float]:
528
+ """Generates a random RGB-A color.
529
+
530
+ The alpha value is always 1.
531
+
532
+ Returns:
533
+ Tuple[float, float, float, float]: A random RGB-A color. Each value is in the
534
+ range [0, 1].
535
+ """
536
+ return (random.random(), random.random(), random.random(), 1)
537
+
538
+
539
+ def _apply_color_to_object(
540
+ obj: bpy.types.Object, color: Tuple[float, float, float, float]
541
+ ) -> None:
542
+ """Applies the given color to the object.
543
+
544
+ Args:
545
+ obj (bpy.types.Object): The object to apply the color to.
546
+ color (Tuple[float, float, float, float]): The color to apply to the object.
547
+
548
+ Returns:
549
+ None
550
+ """
551
+ mat = bpy.data.materials.new(name=f"RandomMaterial_{obj.name}")
552
+ mat.use_nodes = True
553
+ nodes = mat.node_tree.nodes
554
+ principled_bsdf = nodes.get("Principled BSDF")
555
+ if principled_bsdf:
556
+ principled_bsdf.inputs["Base Color"].default_value = color
557
+ obj.data.materials.append(mat)
558
+
559
+
560
+ def apply_single_random_color_to_all_objects() -> Tuple[float, float, float, float]:
561
+ """Applies a single random color to all objects in the scene.
562
+
563
+ Returns:
564
+ Tuple[float, float, float, float]: The random color that was applied to all
565
+ objects.
566
+ """
567
+ rand_color = _get_random_color()
568
+ for obj in bpy.context.scene.objects:
569
+ if obj.type == "MESH":
570
+ _apply_color_to_object(obj, rand_color)
571
+ return rand_color
572
+
573
+
574
+ class MetadataExtractor:
575
+ """Class to extract metadata from a Blender scene."""
576
+
577
+ def __init__(
578
+ self, object_path: str, scene: bpy.types.Scene, bdata: bpy.types.BlendData
579
+ ) -> None:
580
+ """Initializes the MetadataExtractor.
581
+
582
+ Args:
583
+ object_path (str): Path to the object file.
584
+ scene (bpy.types.Scene): The current scene object from `bpy.context.scene`.
585
+ bdata (bpy.types.BlendData): The current blender data from `bpy.data`.
586
+
587
+ Returns:
588
+ None
589
+ """
590
+ self.object_path = object_path
591
+ self.scene = scene
592
+ self.bdata = bdata
593
+
594
+ def get_poly_count(self) -> int:
595
+ """Returns the total number of polygons in the scene."""
596
+ total_poly_count = 0
597
+ for obj in self.scene.objects:
598
+ if obj.type == "MESH":
599
+ total_poly_count += len(obj.data.polygons)
600
+ return total_poly_count
601
+
602
+ def get_vertex_count(self) -> int:
603
+ """Returns the total number of vertices in the scene."""
604
+ total_vertex_count = 0
605
+ for obj in self.scene.objects:
606
+ if obj.type == "MESH":
607
+ total_vertex_count += len(obj.data.vertices)
608
+ return total_vertex_count
609
+
610
+ def get_edge_count(self) -> int:
611
+ """Returns the total number of edges in the scene."""
612
+ total_edge_count = 0
613
+ for obj in self.scene.objects:
614
+ if obj.type == "MESH":
615
+ total_edge_count += len(obj.data.edges)
616
+ return total_edge_count
617
+
618
+ def get_lamp_count(self) -> int:
619
+ """Returns the number of lamps in the scene."""
620
+ return sum(1 for obj in self.scene.objects if obj.type == "LIGHT")
621
+
622
+ def get_mesh_count(self) -> int:
623
+ """Returns the number of meshes in the scene."""
624
+ return sum(1 for obj in self.scene.objects if obj.type == "MESH")
625
+
626
+ def get_material_count(self) -> int:
627
+ """Returns the number of materials in the scene."""
628
+ return len(self.bdata.materials)
629
+
630
+ def get_object_count(self) -> int:
631
+ """Returns the number of objects in the scene."""
632
+ return len(self.bdata.objects)
633
+
634
+ def get_animation_count(self) -> int:
635
+ """Returns the number of animations in the scene."""
636
+ return len(self.bdata.actions)
637
+
638
+ def get_linked_files(self) -> List[str]:
639
+ """Returns the filepaths of all linked files."""
640
+ image_filepaths = self._get_image_filepaths()
641
+ material_filepaths = self._get_material_filepaths()
642
+ linked_libraries_filepaths = self._get_linked_libraries_filepaths()
643
+
644
+ all_filepaths = (
645
+ image_filepaths | material_filepaths | linked_libraries_filepaths
646
+ )
647
+ if "" in all_filepaths:
648
+ all_filepaths.remove("")
649
+ return list(all_filepaths)
650
+
651
+ def _get_image_filepaths(self) -> Set[str]:
652
+ """Returns the filepaths of all images used in the scene."""
653
+ filepaths = set()
654
+ for image in self.bdata.images:
655
+ if image.source == "FILE":
656
+ filepaths.add(bpy.path.abspath(image.filepath))
657
+ return filepaths
658
+
659
+ def _get_material_filepaths(self) -> Set[str]:
660
+ """Returns the filepaths of all images used in materials."""
661
+ filepaths = set()
662
+ for material in self.bdata.materials:
663
+ if material.use_nodes:
664
+ for node in material.node_tree.nodes:
665
+ if node.type == "TEX_IMAGE":
666
+ image = node.image
667
+ if image is not None:
668
+ filepaths.add(bpy.path.abspath(image.filepath))
669
+ return filepaths
670
+
671
+ def _get_linked_libraries_filepaths(self) -> Set[str]:
672
+ """Returns the filepaths of all linked libraries."""
673
+ filepaths = set()
674
+ for library in self.bdata.libraries:
675
+ filepaths.add(bpy.path.abspath(library.filepath))
676
+ return filepaths
677
+
678
+ def get_scene_size(self) -> Dict[str, list]:
679
+ """Returns the size of the scene bounds in meters."""
680
+ bbox_min, bbox_max = scene_bbox()
681
+ return {"bbox_max": list(bbox_max), "bbox_min": list(bbox_min)}
682
+
683
+ def get_shape_key_count(self) -> int:
684
+ """Returns the number of shape keys in the scene."""
685
+ total_shape_key_count = 0
686
+ for obj in self.scene.objects:
687
+ if obj.type == "MESH":
688
+ shape_keys = obj.data.shape_keys
689
+ if shape_keys is not None:
690
+ total_shape_key_count += (
691
+ len(shape_keys.key_blocks) - 1
692
+ ) # Subtract 1 to exclude the Basis shape key
693
+ return total_shape_key_count
694
+
695
+ def get_armature_count(self) -> int:
696
+ """Returns the number of armatures in the scene."""
697
+ total_armature_count = 0
698
+ for obj in self.scene.objects:
699
+ if obj.type == "ARMATURE":
700
+ total_armature_count += 1
701
+ return total_armature_count
702
+
703
+ def read_file_size(self) -> int:
704
+ """Returns the size of the file in bytes."""
705
+ return os.path.getsize(self.object_path)
706
+
707
+ def get_metadata(self) -> Dict[str, Any]:
708
+ """Returns the metadata of the scene.
709
+
710
+ Returns:
711
+ Dict[str, Any]: Dictionary of the metadata with keys for "file_size",
712
+ "poly_count", "vert_count", "edge_count", "material_count", "object_count",
713
+ "lamp_count", "mesh_count", "animation_count", "linked_files", "scene_size",
714
+ "shape_key_count", and "armature_count".
715
+ """
716
+ return {
717
+ "file_size": self.read_file_size(),
718
+ "poly_count": self.get_poly_count(),
719
+ "vert_count": self.get_vertex_count(),
720
+ "edge_count": self.get_edge_count(),
721
+ "material_count": self.get_material_count(),
722
+ "object_count": self.get_object_count(),
723
+ "lamp_count": self.get_lamp_count(),
724
+ "mesh_count": self.get_mesh_count(),
725
+ "animation_count": self.get_animation_count(),
726
+ "linked_files": self.get_linked_files(),
727
+ "scene_size": self.get_scene_size(),
728
+ "shape_key_count": self.get_shape_key_count(),
729
+ "armature_count": self.get_armature_count(),
730
+ }
731
+
732
+
733
+ def render_object(
734
+ object_file: str,
735
+ num_renders: int,
736
+ only_northern_hemisphere: bool,
737
+ output_dir: str,
738
+ ) -> None:
739
+ """Saves rendered images with its camera matrix and metadata of the object.
740
+
741
+ Args:
742
+ object_file (str): Path to the object file.
743
+ num_renders (int): Number of renders to save of the object.
744
+ only_northern_hemisphere (bool): Whether to only render sides of the object that
745
+ are in the northern hemisphere. This is useful for rendering objects that
746
+ are photogrammetrically scanned, as the bottom of the object often has
747
+ holes.
748
+ output_dir (str): Path to the directory where the rendered images and metadata
749
+ will be saved.
750
+
751
+ Returns:
752
+ None
753
+ """
754
+ os.makedirs(output_dir, exist_ok=True)
755
+
756
+ # load the object
757
+ if object_file.endswith(".blend"):
758
+ bpy.ops.object.mode_set(mode="OBJECT")
759
+ reset_cameras()
760
+ delete_invisible_objects()
761
+ else:
762
+ reset_scene()
763
+ load_object(object_file)
764
+
765
+ # Set up cameras
766
+ cam = scene.objects["Camera"]
767
+ cam.data.lens = 35
768
+ cam.data.sensor_width = 32
769
+
770
+ # Set up camera constraints
771
+ cam_constraint = cam.constraints.new(type="TRACK_TO")
772
+ cam_constraint.track_axis = "TRACK_NEGATIVE_Z"
773
+ cam_constraint.up_axis = "UP_Y"
774
+ empty = bpy.data.objects.new("Empty", None)
775
+ scene.collection.objects.link(empty)
776
+ cam_constraint.target = empty
777
+
778
+ # Extract the metadata. This must be done before normalizing the scene to get
779
+ # accurate bounding box information.
780
+ metadata_extractor = MetadataExtractor(
781
+ object_path=object_file, scene=scene, bdata=bpy.data
782
+ )
783
+ metadata = metadata_extractor.get_metadata()
784
+
785
+ # delete all objects that are not meshes
786
+ if object_file.lower().endswith(".usdz"):
787
+ # don't delete missing textures on usdz files, lots of them are embedded
788
+ missing_textures = None
789
+ else:
790
+ missing_textures = delete_missing_textures()
791
+ metadata["missing_textures"] = missing_textures
792
+
793
+ # possibly apply a random color to all objects
794
+ if object_file.endswith(".stl") or object_file.endswith(".ply"):
795
+ assert len(bpy.context.selected_objects) == 1
796
+ rand_color = apply_single_random_color_to_all_objects()
797
+ metadata["random_color"] = rand_color
798
+ else:
799
+ metadata["random_color"] = None
800
+
801
+ # save metadata
802
+ metadata_path = os.path.join(output_dir, "metadata.json")
803
+ os.makedirs(os.path.dirname(metadata_path), exist_ok=True)
804
+ with open(metadata_path, "w", encoding="utf-8") as f:
805
+ json.dump(metadata, f, sort_keys=True, indent=2)
806
+
807
+ # normalize the scene
808
+ normalize_scene()
809
+
810
+ # randomize the lighting
811
+ randomize_lighting()
812
+
813
+ # render the images
814
+ for i in range(num_renders):
815
+ # set camera
816
+ camera = randomize_camera(
817
+ only_northern_hemisphere=only_northern_hemisphere,
818
+ )
819
+
820
+ # render the image
821
+ render_path = os.path.join(output_dir, f"{i:03d}.png")
822
+ scene.render.filepath = render_path
823
+ bpy.ops.render.render(write_still=True)
824
+
825
+ # save camera RT matrix
826
+ rt_matrix = get_3x4_RT_matrix_from_blender(camera)
827
+ rt_matrix_path = os.path.join(output_dir, f"{i:03d}.npy")
828
+ np.save(rt_matrix_path, rt_matrix)
829
+
830
+
831
+ if __name__ == "__main__":
832
+ parser = argparse.ArgumentParser()
833
+ parser.add_argument(
834
+ "--object_path",
835
+ type=str,
836
+ required=True,
837
+ help="Path to the object file",
838
+ )
839
+ parser.add_argument(
840
+ "--output_dir",
841
+ type=str,
842
+ required=True,
843
+ help="Path to the directory where the rendered images and metadata will be saved.",
844
+ )
845
+ parser.add_argument(
846
+ "--engine",
847
+ type=str,
848
+ default="BLENDER_EEVEE",
849
+ choices=["CYCLES", "BLENDER_EEVEE"],
850
+ )
851
+ parser.add_argument(
852
+ "--only_northern_hemisphere",
853
+ action="store_true",
854
+ help="Only render the northern hemisphere of the object.",
855
+ default=False,
856
+ )
857
+ parser.add_argument(
858
+ "--num_renders",
859
+ type=int,
860
+ default=12,
861
+ help="Number of renders to save of the object.",
862
+ )
863
+ argv = sys.argv[sys.argv.index("--") + 1 :]
864
+ args = parser.parse_args(argv)
865
+
866
+ context = bpy.context
867
+ scene = context.scene
868
+ render = scene.render
869
+
870
+ # Set render settings
871
+ render.engine = args.engine
872
+ render.image_settings.file_format = "PNG"
873
+ render.image_settings.color_mode = "RGBA"
874
+ render.resolution_x = 512
875
+ render.resolution_y = 512
876
+ render.resolution_percentage = 100
877
+
878
+ # Set cycles settings
879
+ scene.cycles.device = "GPU"
880
+ scene.cycles.samples = 128
881
+ scene.cycles.diffuse_bounces = 1
882
+ scene.cycles.glossy_bounces = 1
883
+ scene.cycles.transparent_max_bounces = 3
884
+ scene.cycles.transmission_bounces = 3
885
+ scene.cycles.filter_width = 0.01
886
+ scene.cycles.use_denoising = True
887
+ scene.render.film_transparent = True
888
+ bpy.context.preferences.addons["cycles"].preferences.get_devices()
889
+ bpy.context.preferences.addons[
890
+ "cycles"
891
+ ].preferences.compute_device_type = "CUDA" # or "OPENCL"
892
+
893
+ # Render the images
894
+ render_object(
895
+ object_file=args.object_path,
896
+ num_renders=args.num_renders,
897
+ only_northern_hemisphere=args.only_northern_hemisphere,
898
+ output_dir=args.output_dir,
899
+ )
scripts/rendering/io_scene_usdz.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ec07ab6125fe0a021ed08c64169eceda126330401aba3d494d5203d26ac4b093
3
+ size 34685
scripts/rendering/main.py ADDED
@@ -0,0 +1,404 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import glob
2
+ import json
3
+ import multiprocessing
4
+ import os
5
+ import platform
6
+ import random
7
+ import subprocess
8
+ import tempfile
9
+ import zipfile
10
+ import time
11
+ from functools import partial
12
+ from typing import List, Literal, Optional, Union
13
+
14
+ import fire
15
+ import fsspec
16
+ import GPUtil
17
+ import pandas as pd
18
+ from loguru import logger
19
+
20
+ from objaverse_xl.github import download_github_objects
21
+ from objaverse_xl.utils import get_uid_from_str
22
+
23
+
24
+ def log_processed_object(object_id: str, sha256: str, csv_filename: str) -> None:
25
+ """Log when an object is done being used.
26
+
27
+ Args:
28
+ object_id (str): ID of the object.
29
+ sha256 (str): SHA256 of the object.
30
+ csv_filename (str): Name of the CSV file to save the logs to.
31
+
32
+ Returns:
33
+ None
34
+ """
35
+ # log that this object was rendered successfully
36
+ # saving locally to avoid excessive writes to the cloud
37
+ dirname = os.path.expanduser(f"~/.objaverse/github/logs/")
38
+ os.makedirs(dirname, exist_ok=True)
39
+ with open(os.path.join(dirname, csv_filename), "a", encoding="utf-8") as f:
40
+ f.write(f"{time.time()},{object_id},{sha256}\n")
41
+
42
+
43
+ def zipdir(path: str, ziph: zipfile.ZipFile) -> None:
44
+ """Zip up a directory with an arcname structure.
45
+
46
+ Args:
47
+ path (str): Path to the directory to zip.
48
+ ziph (zipfile.ZipFile): ZipFile handler object to write to.
49
+
50
+ Returns:
51
+ None
52
+ """
53
+ # ziph is zipfile handle
54
+ for root, dirs, files in os.walk(path):
55
+ for file in files:
56
+ # this ensures the structure inside the zip starts at folder/
57
+ arcname = os.path.join(os.path.basename(root), file)
58
+ ziph.write(os.path.join(root, file), arcname=arcname)
59
+
60
+
61
+ def handle_found_object(
62
+ file: str,
63
+ github_url: str,
64
+ sha256: str,
65
+ repo: str,
66
+ organization: str,
67
+ num_renders: int,
68
+ render_dir: str,
69
+ only_northern_hemisphere: bool,
70
+ gpu_devices: Union[int, List[int]],
71
+ render_timeout: int,
72
+ successful_log_file: str = "handle-found-object-successful.csv",
73
+ failed_log_file: str = "handle-found-object-failed.csv",
74
+ ) -> None:
75
+ """Called when an object is successfully found and downloaded.
76
+
77
+ Here, the object has the same sha256 as the one that was downloaded with
78
+ Objaverse-XL. If None, the object will be downloaded, but nothing will be done with
79
+ it.
80
+
81
+ Args:
82
+ file (str): Local path to the downloaded 3D object.
83
+ github_url (str): GitHub URL of the 3D object.
84
+ sha256 (str): SHA256 of the contents of the 3D object.
85
+ repo (str): Name of the GitHub repo where the 3D object comes from.
86
+ organization (str): Name of the GitHub organization where the 3D object
87
+ comes from.
88
+ num_renders (int): Number of renders to save of the object.
89
+ render_dir (str): Directory where the objects will be rendered.
90
+ only_northern_hemisphere (bool): Only render the northern hemisphere of the
91
+ object.
92
+ gpu_devices (Union[int, List[int]]): GPU device(s) to use for rendering. If
93
+ an int, the GPU device will be randomly selected from 0 to gpu_devices - 1.
94
+ If a list, the GPU device will be randomly selected from the list.
95
+ If 0, the CPU will be used for rendering.
96
+ render_timeout (int): Number of seconds to wait for the rendering job to
97
+ complete.
98
+ successful_log_file (str): Name of the log file to save successful renders to.
99
+ failed_log_file (str): Name of the log file to save failed renders to.
100
+
101
+ Returns:
102
+ None
103
+ """
104
+ save_uid = get_uid_from_str(github_url)
105
+ args = f"--object_path '{file}' --num_renders {num_renders}"
106
+
107
+ # get the GPU to use for rendering
108
+ using_gpu: bool = True
109
+ gpu_i = 0
110
+ if isinstance(gpu_devices, int) and gpu_devices > 0:
111
+ num_gpus = gpu_devices
112
+ gpu_i = random.randint(0, num_gpus - 1)
113
+ elif isinstance(gpu_devices, list):
114
+ gpu_i = random.choice(gpu_devices)
115
+ elif isinstance(gpu_devices, int) and gpu_devices == 0:
116
+ using_gpu = False
117
+ else:
118
+ raise ValueError(
119
+ f"gpu_devices must be an int > 0, 0, or a list of ints. Got {gpu_devices}."
120
+ )
121
+
122
+ with tempfile.TemporaryDirectory() as temp_dir:
123
+ # get the target directory for the rendering job
124
+ target_directory = os.path.join(temp_dir, save_uid)
125
+ os.makedirs(target_directory, exist_ok=True)
126
+ args += f" --output_dir {target_directory}"
127
+
128
+ # check for Linux / Ubuntu or MacOS
129
+ if platform.system() == "Linux" and using_gpu:
130
+ args += " --engine BLENDER_EEVEE"
131
+ elif platform.system() == "Darwin" or (
132
+ platform.system() == "Linux" and not using_gpu
133
+ ):
134
+ # As far as I know, MacOS does not support BLENER_EEVEE, which uses GPU
135
+ # rendering. Generally, I'd only recommend using MacOS for debugging and
136
+ # small rendering jobs, since CYCLES is much slower than BLENDER_EEVEE.
137
+ args += " --engine CYCLES"
138
+ else:
139
+ raise NotImplementedError(f"Platform {platform.system()} is not supported.")
140
+
141
+ # check if we should only render the northern hemisphere
142
+ if only_northern_hemisphere:
143
+ args += " --only_northern_hemisphere"
144
+
145
+ # get the command to run
146
+ command = f"blender-3.2.2-linux-x64/blender --background --python blender_script.py -- {args}"
147
+ if using_gpu:
148
+ command = f"export DISPLAY=:0.{gpu_i} && {command}"
149
+
150
+ # render the object
151
+ subprocess.run(["bash", "-c", command], timeout=render_timeout, check=False)
152
+
153
+ # check that the renders were saved successfully
154
+ png_files = glob.glob(os.path.join(target_directory, "*.png"))
155
+ metadata_files = glob.glob(os.path.join(target_directory, "*.json"))
156
+ npy_files = glob.glob(os.path.join(target_directory, "*.npy"))
157
+ if (
158
+ (len(png_files) != num_renders)
159
+ or (len(npy_files) != num_renders)
160
+ or (len(metadata_files) != 1)
161
+ ):
162
+ logger.error(f"Found object {github_url} was not rendered successfully!")
163
+ log_processed_object(github_url, sha256, failed_log_file)
164
+ return
165
+
166
+ # update the metadata
167
+ with open(f"{target_directory}/metadata.json", "r", encoding="utf-8") as f:
168
+ metadata = json.load(f)
169
+ metadata["sha256"] = sha256
170
+ metadata["file_identifier"] = github_url
171
+ metadata["save_uid"] = save_uid
172
+ with open(f"{target_directory}/metadata.json", "w", encoding="utf-8") as f:
173
+ json.dump(metadata, f, sort_keys=True, indent=2)
174
+
175
+ # Make a zip of the target_directory.
176
+ # Keeps the {save_uid} directory structure when unzipped
177
+ with zipfile.ZipFile(
178
+ f"{target_directory}.zip", "w", zipfile.ZIP_DEFLATED
179
+ ) as zipf:
180
+ zipdir(target_directory, zipf)
181
+
182
+ # move the zip to the render_dir
183
+ fs, path = fsspec.core.url_to_fs(render_dir)
184
+
185
+ # move the zip to the render_dir
186
+ fs.makedirs(os.path.join(path, "github", "renders"), exist_ok=True)
187
+ fs.put(
188
+ os.path.join(f"{target_directory}.zip"),
189
+ os.path.join(path, "github", "renders", f"{save_uid}.zip"),
190
+ )
191
+
192
+ # log that this object was rendered successfully
193
+ log_processed_object(github_url, sha256, successful_log_file)
194
+
195
+
196
+ def handle_new_object(
197
+ file: str,
198
+ github_url: str,
199
+ sha256: str,
200
+ repo: str,
201
+ organization: str,
202
+ log_file: str = "handle-new-object.csv",
203
+ ) -> None:
204
+ """Called when a new object is found.
205
+
206
+ Here, the object is not used in Objaverse-XL, but is still downloaded with the
207
+ repository. The object may have not been used because it does not successfully
208
+ import into Blender. If None, the object will be downloaded, but nothing will be
209
+ done with it.
210
+
211
+ Args:
212
+ file (str): Local path to the downloaded 3D object.
213
+ github_url (str): GitHub URL of the 3D object.
214
+ sha256 (str): SHA256 of the contents of the 3D object.
215
+ repo (str): Name of the GitHub repo where the 3D object comes from.
216
+ organization (str): Name of the GitHub organization where the 3D object
217
+ comes from.
218
+ log_file (str): Name of the log file to save the handle_new_object logs to.
219
+
220
+ Returns:
221
+ None
222
+ """
223
+ # log the new object
224
+ log_processed_object(github_url, sha256, log_file)
225
+
226
+
227
+ def handle_modified_object(
228
+ file: str,
229
+ github_url: str,
230
+ sha256: str,
231
+ repo: str,
232
+ organization: str,
233
+ num_renders: int,
234
+ render_dir: str,
235
+ only_northern_hemisphere: bool,
236
+ gpu_devices: Union[int, List[int]],
237
+ render_timeout: int,
238
+ ) -> None:
239
+ """Called when a modified object is found and downloaded.
240
+
241
+ Here, the object is successfully downloaded, but it has a different sha256 than the
242
+ one that was downloaded with Objaverse-XL. This is not expected to happen very
243
+ often, because the same commit hash is used for each repo. If None, the object will
244
+ be downloaded, but nothing will be done with it.
245
+
246
+ Args:
247
+ file (str): Local path to the downloaded 3D object.
248
+ github_url (str): GitHub URL of the 3D object.
249
+ sha256 (str): SHA256 of the contents of the 3D object.
250
+ repo (str): Name of the GitHub repo where the 3D object comes from.
251
+ organization (str): Name of the GitHub organization where the 3D object
252
+ comes from.
253
+ num_renders (int): Number of renders to save of the object.
254
+ render_dir (str): Directory where the objects will be rendered.
255
+ only_northern_hemisphere (bool): Only render the northern hemisphere of the
256
+ object.
257
+ gpu_devices (Union[int, List[int]]): GPU device(s) to use for rendering. If
258
+ an int, the GPU device will be randomly selected from 0 to gpu_devices - 1.
259
+ If a list, the GPU device will be randomly selected from the list.
260
+ If 0, the CPU will be used for rendering.
261
+ render_timeout (int): Number of seconds to wait for the rendering job to
262
+ complete.
263
+
264
+ Returns:
265
+ None
266
+ """
267
+ handle_found_object(
268
+ file=file,
269
+ github_url=github_url,
270
+ sha256=sha256,
271
+ repo=repo,
272
+ organization=organization,
273
+ num_renders=num_renders,
274
+ render_dir=render_dir,
275
+ only_northern_hemisphere=only_northern_hemisphere,
276
+ gpu_devices=gpu_devices,
277
+ render_timeout=render_timeout,
278
+ successful_log_file="handle-modified-object-successful.csv",
279
+ failed_log_file="handle-modified-object-failed.csv",
280
+ )
281
+
282
+
283
+ def handle_missing_object(
284
+ github_url: str,
285
+ sha256: str,
286
+ repo: str,
287
+ organization: str,
288
+ log_file: str = "handle-missing-object.csv",
289
+ ) -> None:
290
+ """Called when an object that is in Objaverse-XL is not found.
291
+
292
+ Here, it is likely that the repository was deleted or renamed. If None, nothing
293
+ will be done with the missing object.
294
+
295
+ Args:
296
+ github_url (str): GitHub URL of the 3D object.
297
+ sha256 (str): SHA256 of the contents of the original 3D object.
298
+ repo (str): Name of the GitHub repo where the 3D object comes from.
299
+ organization (str): Name of the GitHub organization where the 3D object comes from.
300
+ log_file (str): Name of the log file to save missing renders to.
301
+
302
+ Returns:
303
+ None
304
+ """
305
+ # log the missing object
306
+ log_processed_object(github_url, sha256, log_file)
307
+
308
+
309
+ def get_example_objects() -> pd.DataFrame:
310
+ """Returns a DataFrame of example objects to use for debugging."""
311
+ return pd.DataFrame(
312
+ [
313
+ {
314
+ "githubUrl": "https://github.com/mattdeitke/objaverse-xl-test-files/blob/6928b08a2501aa7a4a4aabac1f888b66e7782056/example.fbx",
315
+ "license": None,
316
+ "sha256": "7037575f47816118e5a34e7c0da9927e1be7be3f5b4adfac337710822eb50fa9",
317
+ },
318
+ {
319
+ "githubUrl": "https://github.com/mattdeitke/objaverse-xl-test-files/blob/6928b08a2501aa7a4a4aabac1f888b66e7782056/example.glb",
320
+ "license": None,
321
+ "sha256": "04e6377317d6818e32c5cbd1951e76deb3641bbf4f6db6933046221d5fbf1c5c",
322
+ },
323
+ {
324
+ "githubUrl": "https://github.com/mattdeitke/objaverse-xl-test-files/blob/6928b08a2501aa7a4a4aabac1f888b66e7782056/example.obj",
325
+ "license": None,
326
+ "sha256": "d2b9a5d7c47dc93526082c9b630157ab6bce4fd8669610d942176f4a36444e71",
327
+ },
328
+ ]
329
+ )
330
+
331
+
332
+ def render_github_objects(
333
+ render_dir: str = "~/.objaverse",
334
+ num_renders: int = 12,
335
+ processes: Optional[int] = None,
336
+ save_repo_format: Optional[Literal["zip", "tar", "tar.gz"]] = None,
337
+ only_northern_hemisphere: bool = False,
338
+ render_timeout: int = 300,
339
+ gpu_devices: Optional[Union[int, List[int]]] = None,
340
+ ) -> None:
341
+ """Renders all GitHub objects in the Objaverse-XL dataset.
342
+
343
+ Args:
344
+ render_dir (str): Directory where the objects will be rendered.
345
+ num_renders (int): Number of renders to save of the object.
346
+ processes (Optional[int]): Number of processes to use for downloading the
347
+ objects. If None, defaults to multiprocessing.cpu_count() * 3.
348
+ save_repo_format (Optional[Literal["zip", "tar", "tar.gz"]]): If not None,
349
+ the GitHub repo will be deleted after rendering each object from it.
350
+ only_northern_hemisphere (bool): Only render the northern hemisphere of the
351
+ object. Useful for rendering objects that are obtained from photogrammetry,
352
+ since the southern hemisphere is often has holes.
353
+ render_timeout (int): Number of seconds to wait for the rendering job to
354
+ complete.
355
+ gpu_devices (Optional[Union[int, List[int]]]): GPU device(s) to use for
356
+ rendering. If an int, the GPU device will be randomly selected from 0 to
357
+ gpu_devices - 1. If a list, the GPU device will be randomly selected from
358
+ the list. If 0, the CPU will be used for rendering. If None, defaults to
359
+ use all available GPUs.
360
+
361
+ Returns:
362
+ None
363
+ """
364
+ if platform.system() not in ["Linux", "Darwin"]:
365
+ raise NotImplementedError(
366
+ f"Platform {platform.system()} is not supported. Use Linux or MacOS."
367
+ )
368
+
369
+ # get the gpu devices to use
370
+ parsed_gpu_devices: Union[int, List[int]] = 0
371
+ if gpu_devices is None:
372
+ parsed_gpu_devices = len(GPUtil.getGPUs())
373
+
374
+ if processes is None:
375
+ processes = multiprocessing.cpu_count() * 3
376
+
377
+ objects = get_example_objects()
378
+ download_github_objects(
379
+ objects=objects,
380
+ processes=processes,
381
+ save_repo_format=save_repo_format,
382
+ download_dir=render_dir, # only used when save_repo_format is not None
383
+ handle_found_object=partial(
384
+ handle_found_object,
385
+ render_dir=render_dir,
386
+ num_renders=num_renders,
387
+ only_northern_hemisphere=only_northern_hemisphere,
388
+ gpu_devices=parsed_gpu_devices,
389
+ render_timeout=render_timeout,
390
+ ),
391
+ handle_new_object=handle_new_object,
392
+ handle_modified_object=partial(
393
+ handle_modified_object,
394
+ render_dir=render_dir,
395
+ num_renders=num_renders,
396
+ gpu_devices=parsed_gpu_devices,
397
+ only_northern_hemisphere=only_northern_hemisphere,
398
+ ),
399
+ handle_missing_object=handle_missing_object,
400
+ )
401
+
402
+
403
+ if __name__ == "__main__":
404
+ fire.Fire(render_github_objects)
scripts/rendering/start_x_server.py ADDED
@@ -0,0 +1,272 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Taken from https://github.com/allenai/ai2thor/blob/main/scripts/ai2thor-xorg
2
+ # Starts an x-server to support running Blender on a headless machine with
3
+ # dedicated NVIDIA GPUs
4
+
5
+ import argparse
6
+
7
+ #!/usr/bin/env python3
8
+ import os
9
+ import platform
10
+ import re
11
+ import shlex
12
+ import signal
13
+ import subprocess
14
+ import sys
15
+ import time
16
+
17
+ # Turning off automatic black formatting for this script as it breaks quotes.
18
+ # fmt: off
19
+ from typing import List
20
+
21
+ PID_FILE = "/var/run/ai2thor-xorg.pid"
22
+ CONFIG_FILE = "/tmp/ai2thor-xorg.conf"
23
+
24
+ DEFAULT_HEIGHT = 768
25
+ DEFAULT_WIDTH = 1024
26
+
27
+
28
+ def process_alive(pid):
29
+ """
30
+ Use kill(0) to determine if pid is alive
31
+ :param pid: process id
32
+ :rtype: bool
33
+ """
34
+ try:
35
+ os.kill(pid, 0)
36
+ except OSError:
37
+ return False
38
+
39
+ return True
40
+
41
+
42
+ def find_devices(excluded_device_ids):
43
+ devices = []
44
+ id_counter = 0
45
+ for r in pci_records():
46
+ if r.get("Vendor", "") == "NVIDIA Corporation" and r["Class"] in [
47
+ "VGA compatible controller",
48
+ "3D controller",
49
+ ]:
50
+ bus_id = "PCI:" + ":".join(
51
+ map(lambda x: str(int(x, 16)), re.split(r"[:\.]", r["Slot"]))
52
+ )
53
+
54
+ if id_counter not in excluded_device_ids:
55
+ devices.append(bus_id)
56
+
57
+ id_counter += 1
58
+
59
+ if not devices:
60
+ print("Error: ai2thor-xorg requires at least one NVIDIA device")
61
+ sys.exit(1)
62
+
63
+ return devices
64
+
65
+ def active_display_bus_ids():
66
+ # this determines whether a monitor is connected to the GPU
67
+ # if one is, the following Option is added for the Screen "UseDisplayDevice" "None"
68
+ command = "nvidia-smi --query-gpu=pci.bus_id,display_active --format=csv,noheader"
69
+ active_bus_ids = set()
70
+ result = subprocess.run(command, shell=True, stdout=subprocess.PIPE)
71
+ if result.returncode == 0:
72
+ for line in result.stdout.decode().strip().split("\n"):
73
+ nvidia_bus_id, display_status = re.split(r",\s?", line.strip())
74
+ bus_id = "PCI:" + ":".join(
75
+ map(lambda x: str(int(x, 16)), re.split(r"[:\.]", nvidia_bus_id)[1:])
76
+ )
77
+ if display_status.lower() == "enabled":
78
+ active_bus_ids.add(bus_id)
79
+
80
+ return active_bus_ids
81
+
82
+ def pci_records():
83
+ records = []
84
+ command = shlex.split("lspci -vmm")
85
+ output = subprocess.check_output(command).decode()
86
+
87
+ for devices in output.strip().split("\n\n"):
88
+ record = {}
89
+ records.append(record)
90
+ for row in devices.split("\n"):
91
+ key, value = row.split("\t")
92
+ record[key.split(":")[0]] = value
93
+
94
+ return records
95
+
96
+
97
+ def read_pid():
98
+ if os.path.isfile(PID_FILE):
99
+ with open(PID_FILE) as f:
100
+ return int(f.read())
101
+ else:
102
+ return None
103
+
104
+
105
+ def start(display: str, excluded_device_ids: List[int], width: int, height: int):
106
+ pid = read_pid()
107
+
108
+ if pid and process_alive(pid):
109
+ print("Error: ai2thor-xorg is already running with pid: %s" % pid)
110
+ sys.exit(1)
111
+
112
+ with open(CONFIG_FILE, "w") as f:
113
+ f.write(generate_xorg_conf(excluded_device_ids, width=width, height=height))
114
+
115
+ log_file = "/var/log/ai2thor-xorg.%s.log" % display
116
+ error_log_file = "/var/log/ai2thor-xorg-error.%s.log" % display
117
+ command = shlex.split(
118
+ "Xorg -quiet -maxclients 1024 -noreset +extension GLX +extension RANDR +extension RENDER -logfile %s -config %s :%s"
119
+ % (log_file, CONFIG_FILE, display)
120
+ )
121
+
122
+ pid = None
123
+ with open(error_log_file, "w") as error_log_f:
124
+ proc = subprocess.Popen(command, stdin=subprocess.DEVNULL, stdout=subprocess.DEVNULL, stderr=error_log_f)
125
+ pid = proc.pid
126
+ try:
127
+ proc.wait(timeout=0.25)
128
+ except subprocess.TimeoutExpired:
129
+ pass
130
+
131
+ if pid and process_alive(pid):
132
+ with open(PID_FILE, "w") as f:
133
+ f.write(str(proc.pid))
134
+ else:
135
+ print("Error: error with command '%s'" % " ".join(command))
136
+ with open(error_log_file, "r") as f:
137
+ print(f.read())
138
+
139
+
140
+ def print_config(excluded_device_ids: List[int], width: int, height: int):
141
+ print(generate_xorg_conf(excluded_device_ids, width=width, height=height))
142
+
143
+
144
+ def stop():
145
+ pid = read_pid()
146
+ if pid and process_alive(pid):
147
+ os.kill(pid, signal.SIGTERM)
148
+
149
+ for i in range(10):
150
+ time.sleep(0.2)
151
+ if not process_alive(pid):
152
+ os.unlink(PID_FILE)
153
+ break
154
+
155
+
156
+ def generate_xorg_conf(
157
+ excluded_device_ids: List[int], width: int, height: int
158
+ ):
159
+ devices = find_devices(excluded_device_ids)
160
+ active_display_devices = active_display_bus_ids()
161
+
162
+ xorg_conf = []
163
+
164
+ device_section = """
165
+ Section "Device"
166
+ Identifier "Device{device_id}"
167
+ Driver "nvidia"
168
+ VendorName "NVIDIA Corporation"
169
+ BusID "{bus_id}"
170
+ EndSection
171
+ """
172
+ server_layout_section = """
173
+ Section "ServerLayout"
174
+ Identifier "Layout0"
175
+ {screen_records}
176
+ EndSection
177
+ """
178
+ screen_section = """
179
+ Section "Screen"
180
+ Identifier "Screen{screen_id}"
181
+ Device "Device{device_id}"
182
+ DefaultDepth 24
183
+ Option "AllowEmptyInitialConfiguration" "True"
184
+ Option "Interactive" "False"
185
+ {extra_options}
186
+ SubSection "Display"
187
+ Depth 24
188
+ Virtual {width} {height}
189
+ EndSubSection
190
+ EndSection
191
+ """
192
+ screen_records = []
193
+ for i, bus_id in enumerate(devices):
194
+ extra_options = ""
195
+ if bus_id in active_display_devices:
196
+ # See https://github.com/allenai/ai2thor/pull/990
197
+ # when a monitor is connected, this option must be used otherwise
198
+ # Xorg will fail to start
199
+ extra_options = 'Option "UseDisplayDevice" "None"'
200
+ xorg_conf.append(device_section.format(device_id=i, bus_id=bus_id))
201
+ xorg_conf.append(screen_section.format(device_id=i, screen_id=i, width=width, height=height, extra_options=extra_options))
202
+ screen_records.append(
203
+ 'Screen {screen_id} "Screen{screen_id}" 0 0'.format(screen_id=i)
204
+ )
205
+
206
+ xorg_conf.append(
207
+ server_layout_section.format(screen_records="\n ".join(screen_records))
208
+ )
209
+
210
+ output = "\n".join(xorg_conf)
211
+ return output
212
+
213
+
214
+ # fmt: on
215
+
216
+ if __name__ == "__main__":
217
+ if os.geteuid() != 0:
218
+ path = os.path.abspath(__file__)
219
+ print("Executing ai2thor-xorg with sudo")
220
+ args = ["--", path] + sys.argv[1:]
221
+ os.execvp("sudo", args)
222
+
223
+ if platform.system() != "Linux":
224
+ print("Error: Can only run ai2thor-xorg on linux")
225
+ sys.exit(1)
226
+
227
+ parser = argparse.ArgumentParser()
228
+ parser.add_argument(
229
+ "--exclude-device",
230
+ help="exclude a specific GPU device",
231
+ action="append",
232
+ type=int,
233
+ default=[],
234
+ )
235
+ parser.add_argument(
236
+ "--width",
237
+ help="width of the screen to start (should be greater than the maximum"
238
+ f" width of any ai2thor instance you will start) [default: {DEFAULT_WIDTH}]",
239
+ type=int,
240
+ default=DEFAULT_WIDTH,
241
+ )
242
+ parser.add_argument(
243
+ "--height",
244
+ help="height of the screen to start (should be greater than the maximum"
245
+ f" height of any ai2thor instance you will start) [default: {DEFAULT_HEIGHT}]",
246
+ type=int,
247
+ default=DEFAULT_HEIGHT,
248
+ )
249
+ parser.add_argument(
250
+ "command",
251
+ help="command to be executed",
252
+ choices=["start", "stop", "print-config"],
253
+ )
254
+ parser.add_argument(
255
+ "display", help="display to be used", nargs="?", type=int, default=0
256
+ )
257
+ args = parser.parse_args()
258
+ if args.command == "start":
259
+ start(
260
+ display=args.display,
261
+ excluded_device_ids=args.exclude_device,
262
+ height=args.height,
263
+ width=args.width,
264
+ )
265
+ elif args.command == "stop":
266
+ stop()
267
+ elif args.command == "print-config":
268
+ print_config(
269
+ excluded_device_ids=args.exclude_device,
270
+ width=args.width,
271
+ height=args.height,
272
+ )
tests/test_api.py CHANGED
@@ -7,10 +7,6 @@ import pytest
7
 
8
  from objaverse_xl.github import _process_repo, download_github_objects
9
 
10
- # @pytest.mark.parametrize("message", ["hello", "world"])
11
- # def test_hello_world(message):
12
- # hello_world(message)
13
-
14
 
15
  def test_github_process_repo():
16
  download_dir = "~/.objaverse-tests"
 
7
 
8
  from objaverse_xl.github import _process_repo, download_github_objects
9
 
 
 
 
 
10
 
11
  def test_github_process_repo():
12
  download_dir = "~/.objaverse-tests"