mattdeitke commited on
Commit
5c02574
1 Parent(s): 96eeaba

refactor so source files are separate from api files

Browse files
.github/workflows/ci.yaml DELETED
@@ -1,51 +0,0 @@
1
- name: Continuous integration
2
-
3
- on:
4
- push:
5
- branches:
6
- - main
7
- pull_request:
8
- branches:
9
- - main
10
-
11
- jobs:
12
- lint:
13
- runs-on: ubuntu-latest
14
- steps:
15
- - uses: actions/checkout@v2
16
- - name: Set up Python 3.8
17
- uses: actions/setup-python@v2
18
- with:
19
- python-version: 3.8
20
- - name: Install
21
- run: |
22
- python3 -m venv .env
23
- source .env/bin/activate
24
- python -m pip install -U pip
25
- make install-dev
26
- - name: Lint
27
- run: |
28
- source .env/bin/activate
29
- make lint
30
- tests:
31
- runs-on: ubuntu-latest
32
- strategy:
33
- matrix:
34
- python-version: [3.8]
35
-
36
- steps:
37
- - uses: actions/checkout@v2
38
- - name: Set up Python ${{ matrix.python-version }}
39
- uses: actions/setup-python@v2
40
- with:
41
- python-version: ${{ matrix.python-version }}
42
- - name: Install
43
- run: |
44
- python3 -m venv .env
45
- source .env/bin/activate
46
- make install
47
- make install-dev
48
- - name: Unit tests
49
- run: |
50
- source .env/bin/activate
51
- make test
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
.isort.cfg DELETED
@@ -1,3 +0,0 @@
1
- [settings]
2
- profile = black
3
- known_third_party = pandas,numpy # Modify with your actual third party libraries
 
 
 
 
.pylintrc DELETED
@@ -1,331 +0,0 @@
1
- [MASTER]
2
-
3
- # Specify a configuration file.
4
- #rcfile=
5
-
6
- # Python code to execute, usually for sys.path manipulation such as
7
- # pygtk.require().
8
- #init-hook=
9
-
10
- # Profiled execution.
11
- profile=no
12
-
13
- # Add files or directories to the blacklist. They should be base names, not
14
- # paths.
15
- ignore=CVS
16
-
17
- # Pickle collected data for later comparisons.
18
- persistent=yes
19
-
20
- # List of plugins (as comma separated values of python modules names) to load,
21
- # usually to register additional checkers.
22
- load-plugins=
23
-
24
-
25
- [MESSAGES CONTROL]
26
-
27
- # Enable the message, report, category or checker with the given id(s). You can
28
- # either give multiple identifier separated by comma (,) or put this option
29
- # multiple time. See also the "--disable" option for examples.
30
- enable=indexing-exception,old-raise-syntax
31
-
32
- # Disable the message, report, category or checker with the given id(s). You
33
- # can either give multiple identifiers separated by comma (,) or put this
34
- # option multiple times (only on the command line, not in the configuration
35
- # file where it should appear only once).You can also use "--disable=all" to
36
- # disable everything first and then reenable specific checks. For example, if
37
- # you want to run only the similarities checker, you can use "--disable=all
38
- # --enable=similarities". If you want to run only the classes checker, but have
39
- # no Warning level messages displayed, use"--disable=all --enable=classes
40
- # --disable=W"
41
- disable=design,similarities,no-self-use,attribute-defined-outside-init,locally-disabled,star-args,pointless-except,bad-option-value,global-statement,fixme,suppressed-message,useless-suppression,locally-enabled,no-member,no-name-in-module,import-error,unsubscriptable-object,unbalanced-tuple-unpacking,undefined-variable,not-context-manager,no-else-return,wrong-import-order,unnecessary-pass,logging-fstring-interpolation,logging-format-interpolation,C0330
42
-
43
-
44
- # Set the cache size for astng objects.
45
- cache-size=500
46
-
47
-
48
- [REPORTS]
49
-
50
- # Set the output format. Available formats are text, parseable, colorized, msvs
51
- # (visual studio) and html. You can also give a reporter class, eg
52
- # mypackage.mymodule.MyReporterClass.
53
- output-format=text
54
-
55
- # Put messages in a separate file for each module / package specified on the
56
- # command line instead of printing them on stdout. Reports (if any) will be
57
- # written in a file name "pylint_global.[txt|html]".
58
- files-output=no
59
-
60
- # Tells whether to display a full report or only the messages
61
- reports=no
62
-
63
- # Python expression which should return a note less than 10 (10 is the highest
64
- # note). You have access to the variables errors warning, statement which
65
- # respectively contain the number of errors / warnings messages and the total
66
- # number of statements analyzed. This is used by the global evaluation report
67
- # (RP0004).
68
- evaluation=10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10)
69
-
70
- # Add a comment according to your evaluation note. This is used by the global
71
- # evaluation report (RP0004).
72
- comment=no
73
-
74
- # Template used to display messages. This is a python new-style format string
75
- # used to format the message information. See doc for all details
76
- #msg-template=
77
-
78
-
79
- [TYPECHECK]
80
-
81
- # Tells whether missing members accessed in mixin class should be ignored. A
82
- # mixin class is detected if its name ends with "mixin" (case insensitive).
83
- ignore-mixin-members=yes
84
-
85
- # List of classes names for which member attributes should not be checked
86
- # (useful for classes with attributes dynamically set).
87
- ignored-classes=SQLObject
88
-
89
- # When zope mode is activated, add a predefined set of Zope acquired attributes
90
- # to generated-members.
91
- zope=no
92
-
93
- # List of members which are set dynamically and missed by pylint inference
94
- # system, and so shouldn't trigger E0201 when accessed. Python regular
95
- # expressions are accepted.
96
- generated-members=REQUEST,acl_users,aq_parent
97
-
98
- # List of decorators that create context managers from functions, such as
99
- # contextlib.contextmanager.
100
- contextmanager-decorators=contextlib.contextmanager,contextlib2.contextmanager
101
-
102
-
103
- [VARIABLES]
104
-
105
- # Tells whether we should check for unused import in __init__ files.
106
- init-import=no
107
-
108
- # A regular expression matching the beginning of the name of dummy variables
109
- # (i.e. not used).
110
- dummy-variables-rgx=^\*{0,2}(_$|unused_|dummy_)
111
-
112
- # List of additional names supposed to be defined in builtins. Remember that
113
- # you should avoid to define new builtins when possible.
114
- additional-builtins=
115
-
116
-
117
- [BASIC]
118
-
119
- # Required attributes for module, separated by a comma
120
- required-attributes=
121
-
122
- # List of builtins function names that should not be used, separated by a comma
123
- bad-functions=apply,input,reduce
124
-
125
-
126
- # Disable the report(s) with the given id(s).
127
- # All non-Google reports are disabled by default.
128
- disable-report=R0001,R0002,R0003,R0004,R0101,R0102,R0201,R0202,R0220,R0401,R0402,R0701,R0801,R0901,R0902,R0903,R0904,R0911,R0912,R0913,R0914,R0915,R0921,R0922,R0923
129
-
130
- # Regular expression which should only match correct module names
131
- module-rgx=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$
132
-
133
- # Regular expression which should only match correct module level names
134
- const-rgx=^(_?[A-Z][A-Z0-9_]*|__[a-z0-9_]+__|_?[a-z][a-z0-9_]*)$
135
-
136
- # Regular expression which should only match correct class names
137
- class-rgx=^_?[A-Z][a-zA-Z0-9]*$
138
-
139
- # Regular expression which should only match correct function names
140
- function-rgx=^(?:(?P<camel_case>_?[A-Z][a-zA-Z0-9]*)|(?P<snake_case>_?[a-z][a-z0-9_]*))$
141
-
142
- # Regular expression which should only match correct method names
143
- method-rgx=^(?:(?P<exempt>__[a-z0-9_]+__|next)|(?P<camel_case>_{0,2}[A-Z][a-zA-Z0-9]*)|(?P<snake_case>_{0,2}[a-z][a-z0-9_]*))$
144
-
145
- # Regular expression which should only match correct instance attribute names
146
- attr-rgx=^_{0,2}[a-z][a-z0-9_]*$
147
-
148
- # Regular expression which should only match correct argument names
149
- argument-rgx=^[a-z][a-z0-9_]*$
150
-
151
- # Regular expression which should only match correct variable names
152
- variable-rgx=^[a-z][a-z0-9_]*$
153
-
154
- # Regular expression which should only match correct attribute names in class
155
- # bodies
156
- class-attribute-rgx=^(_?[A-Z][A-Z0-9_]*|__[a-z0-9_]+__|_?[a-z][a-z0-9_]*)$
157
-
158
- # Regular expression which should only match correct list comprehension /
159
- # generator expression variable names
160
- inlinevar-rgx=^[a-z][a-z0-9_]*$
161
-
162
- # Good variable names which should always be accepted, separated by a comma
163
- good-names=main,_
164
-
165
- # Bad variable names which should always be refused, separated by a comma
166
- bad-names=
167
-
168
- # Regular expression which should only match function or class names that do
169
- # not require a docstring.
170
- no-docstring-rgx=(__.*__|main)
171
-
172
- # Minimum line length for functions/classes that require docstrings, shorter
173
- # ones are exempt.
174
- docstring-min-length=10
175
-
176
-
177
- [FORMAT]
178
-
179
- # Maximum number of characters on a single line.
180
- max-line-length=120
181
-
182
- # Regexp for a line that is allowed to be longer than the limit.
183
- ignore-long-lines=(?x)
184
- (^\s*(import|from)\s
185
- |\$Id:\s\/\/depot\/.+#\d+\s\$
186
- |^[a-zA-Z_][a-zA-Z0-9_]*\s*=\s*("[^"]\S+"|'[^']\S+')
187
- |^\s*\#\ LINT\.ThenChange
188
- |^[^#]*\#\ type:\ [a-zA-Z_][a-zA-Z0-9_.,[\] ]*$
189
- |pylint
190
- |"""
191
- |\#
192
- |lambda
193
- |(https?|ftp):)
194
-
195
- # Allow the body of an if to be on the same line as the test if there is no
196
- # else.
197
- single-line-if-stmt=y
198
-
199
- # List of optional constructs for which whitespace checking is disabled
200
- no-space-check=
201
-
202
- # Maximum number of lines in a module
203
- max-module-lines=99999
204
-
205
- # String used as indentation unit. This is usually " " (4 spaces) or "\t" (1
206
- # tab).
207
- indent-string=' '
208
-
209
-
210
- [SIMILARITIES]
211
-
212
- # Minimum lines number of a similarity.
213
- min-similarity-lines=4
214
-
215
- # Ignore comments when computing similarities.
216
- ignore-comments=yes
217
-
218
- # Ignore docstrings when computing similarities.
219
- ignore-docstrings=yes
220
-
221
- # Ignore imports when computing similarities.
222
- ignore-imports=no
223
-
224
-
225
- [MISCELLANEOUS]
226
-
227
- # List of note tags to take in consideration, separated by a comma.
228
- notes=
229
-
230
-
231
- [IMPORTS]
232
-
233
- # Deprecated modules which should not be used, separated by a comma
234
- deprecated-modules=regsub,TERMIOS,Bastion,rexec,sets
235
-
236
- # Create a graph of every (i.e. internal and external) dependencies in the
237
- # given file (report RP0402 must not be disabled)
238
- import-graph=
239
-
240
- # Create a graph of external dependencies in the given file (report RP0402 must
241
- # not be disabled)
242
- ext-import-graph=
243
-
244
- # Create a graph of internal dependencies in the given file (report RP0402 must
245
- # not be disabled)
246
- int-import-graph=
247
-
248
- extension-pkg-whitelist=_jsonnet
249
-
250
-
251
- [CLASSES]
252
-
253
- # List of interface methods to ignore, separated by a comma. This is used for
254
- # instance to not check methods defines in Zope's Interface base class.
255
- ignore-iface-methods=isImplementedBy,deferred,extends,names,namesAndDescriptions,queryDescriptionFor,getBases,getDescriptionFor,getDoc,getName,getTaggedValue,getTaggedValueTags,isEqualOrExtendedBy,setTaggedValue,isImplementedByInstancesOf,adaptWith,is_implemented_by
256
-
257
- # List of method names used to declare (i.e. assign) instance attributes.
258
- defining-attr-methods=__init__,__new__,setUp
259
-
260
- # List of valid names for the first argument in a class method.
261
- valid-classmethod-first-arg=cls,class_
262
-
263
- # List of valid names for the first argument in a metaclass class method.
264
- valid-metaclass-classmethod-first-arg=mcs
265
-
266
-
267
- [DESIGN]
268
-
269
- # Maximum number of arguments for function / method
270
- max-args=5
271
-
272
- # Argument names that match this expression will be ignored. Default to name
273
- # with leading underscore
274
- ignored-argument-names=_.*
275
-
276
- # Maximum number of locals for function / method body
277
- max-locals=15
278
-
279
- # Maximum number of return / yield for function / method body
280
- max-returns=6
281
-
282
- # Maximum number of branch for function / method body
283
- max-branches=12
284
-
285
- # Maximum number of statements in function / method body
286
- max-statements=50
287
-
288
- # Maximum number of parents for a class (see R0901).
289
- max-parents=7
290
-
291
- # Maximum number of attributes for a class (see R0902).
292
- max-attributes=7
293
-
294
- # Minimum number of public methods for a class (see R0903).
295
- min-public-methods=2
296
-
297
- # Maximum number of public methods for a class (see R0904).
298
- max-public-methods=20
299
-
300
-
301
- [EXCEPTIONS]
302
-
303
- # Exceptions that will emit a warning when being caught. Defaults to
304
- # "Exception"
305
- overgeneral-exceptions=Exception,StandardError,BaseException
306
-
307
-
308
- [AST]
309
-
310
- # Maximum line length for lambdas
311
- short-func-length=1
312
-
313
- # List of module members that should be marked as deprecated.
314
- # All of the string functions are listed in 4.1.4 Deprecated string functions
315
- # in the Python 2.4 docs.
316
- deprecated-members=string.atof,string.atoi,string.atol,string.capitalize,string.expandtabs,string.find,string.rfind,string.index,string.rindex,string.count,string.lower,string.split,string.rsplit,string.splitfields,string.join,string.joinfields,string.lstrip,string.rstrip,string.strip,string.swapcase,string.translate,string.upper,string.ljust,string.rjust,string.center,string.zfill,string.replace,sys.exitfunc
317
-
318
-
319
- [DOCSTRING]
320
-
321
- # List of exceptions that do not need to be mentioned in the Raises section of
322
- # a docstring.
323
- ignore-exceptions=AssertionError,NotImplementedError,StopIteration,TypeError
324
-
325
-
326
-
327
- [TOKENS]
328
-
329
- # Number of spaces of indent required when the last token on the preceding line
330
- # is an open (, [, or {.
331
- indent-after-paren=4
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
Makefile DELETED
@@ -1,28 +0,0 @@
1
- install: ## [Local development] Upgrade pip, install requirements, install package.
2
- python -m pip install -U pip
3
- python -m pip install -e .
4
-
5
- install-dev: ## [Local development] Install test requirements
6
- python -m pip install -r requirements-test.txt
7
-
8
- lint: ## [Local development] Run mypy, pylint and black
9
- python -m mypy objaverse_xl
10
- python -m pylint objaverse_xl
11
- python -m black --check objaverse_xl
12
- python -m isort --check-only objaverse_xl
13
- python -m black --check scripts --exclude scripts/rendering/blender-3.2.2-linux-x64/
14
- python -m isort --check-only scripts/**/*.py --skip scripts/rendering/blender-3.2.2-linux-x64/
15
-
16
- format: ## [Local development] Auto-format python code using black, don't include blender
17
- python -m isort objaverse_xl
18
- python -m black objaverse_xl
19
- python -m isort scripts/**/*.py --skip scripts/rendering/blender-3.2.2-linux-x64/
20
- python -m black scripts --exclude scripts/rendering/blender-3.2.2-linux-x64/
21
-
22
- test: ## [Local development] Run unit tests
23
- JUPYTER_PLATFORM_DIRS=1 python -m pytest -x -s -v tests
24
-
25
- .PHONY: help
26
-
27
- help: # Run `make help` to get help on the make commands
28
- @grep -E '^[0-9a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) | sort | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}'
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
github/{github-urls.parquet → github.parquet} RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ac5dba4285a7d4e233f6cff0f538d7e9f6f7410bcbd6e47d565ce0338de5ee75
3
- size 797464991
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f14bf4daceac97072eee5721489ad160089b9fc1514e6a9e096574c3e4276da6
3
+ size 797466051
mypy.ini DELETED
@@ -1,5 +0,0 @@
1
- # Global options:
2
-
3
- [mypy]
4
- python_version = 3.8
5
- ignore_missing_imports = True
 
 
 
 
 
 
objaverse_xl/__init__.py DELETED
@@ -1 +0,0 @@
1
- """A package for downloading and processing Objaverse-XL."""
 
 
objaverse_xl/abstract.py DELETED
@@ -1,97 +0,0 @@
1
- """Abstract class for Objaverse-XL sources."""
2
-
3
- from abc import ABC, abstractmethod
4
- from typing import Callable, Dict, Optional
5
-
6
- import pandas as pd
7
-
8
-
9
- class ObjaverseSource(ABC):
10
- """Abstract class for Objaverse-XL sources."""
11
-
12
- @abstractmethod
13
- def get_annotations(self, download_dir: str = "~/.objaverse") -> pd.DataFrame:
14
- """Loads the 3D object metadata as a Pandas DataFrame.
15
-
16
- Args:
17
- download_dir (str, optional): Directory to download the parquet metadata
18
- file. Supports all file systems supported by fsspec. Defaults to
19
- "~/.objaverse".
20
-
21
- Returns:
22
- pd.DataFrame: Metadata of the 3D objects as a Pandas DataFrame with columns
23
- for the object "fileIdentifier", "license", "source", "fileType",
24
- "sha256", and "metadata".
25
- """
26
-
27
- @abstractmethod
28
- def download_objects(
29
- self,
30
- objects: pd.DataFrame,
31
- download_dir: str = "~/.objaverse",
32
- processes: Optional[int] = None,
33
- handle_found_object: Optional[Callable] = None,
34
- handle_modified_object: Optional[Callable] = None,
35
- handle_missing_object: Optional[Callable] = None,
36
- **kwargs
37
- ) -> Dict[str, str]:
38
- """Downloads all objects from the source.
39
-
40
- Args:
41
- objects (pd.DataFrame): Objects to download. Must have columns for
42
- the object "fileIdentifier" and "sha256". Use the `get_annotations`
43
- function to get the metadata.
44
- processes (Optional[int], optional): Number of processes to use for
45
- downloading. If None, will use the number of CPUs on the machine.
46
- Defaults to None.
47
- download_dir (str, optional): Directory to download the objects to.
48
- Supports all file systems supported by fsspec. Defaults to
49
- "~/.objaverse".
50
- save_repo_format (Optional[Literal["zip", "tar", "tar.gz", "files"]],
51
- optional): Format to save the repository. If None, the repository will
52
- not be saved. If "files" is specified, each file will be saved
53
- individually. Otherwise, the repository can be saved as a "zip", "tar",
54
- or "tar.gz" file. Defaults to None.
55
- handle_found_object (Optional[Callable], optional): Called when an object is
56
- successfully found and downloaded. Here, the object has the same sha256
57
- as the one that was downloaded with Objaverse-XL. If None, the object
58
- will be downloaded, but nothing will be done with it. Args for the
59
- function include:
60
- - local_path (str): Local path to the downloaded 3D object.
61
- - file_identifier (str): File identifier of the 3D object.
62
- - sha256 (str): SHA256 of the contents of the 3D object.
63
- - metadata (Dict[Hashable, Any]): Metadata about the 3D object,
64
- including the GitHub organization and repo names.
65
- Return is not used. Defaults to None.
66
- handle_modified_object (Optional[Callable], optional): Called when a
67
- modified object is found and downloaded. Here, the object is
68
- successfully downloaded, but it has a different sha256 than the one that
69
- was downloaded with Objaverse-XL. This is not expected to happen very
70
- often, because the same commit hash is used for each repo. If None, the
71
- object will be downloaded, but nothing will be done with it. Args for
72
- the function include:
73
- - local_path (str): Local path to the downloaded 3D object.
74
- - file_identifier (str): File identifier of the 3D object.
75
- - new_sha256 (str): SHA256 of the contents of the newly downloaded 3D
76
- object.
77
- - old_sha256 (str): Expected SHA256 of the contents of the 3D object as
78
- it was when it was downloaded with Objaverse-XL.
79
- - metadata (Dict[Hashable, Any]): Metadata about the 3D object, which is
80
- particular to the souce.
81
- Return is not used. Defaults to None.
82
- handle_missing_object (Optional[Callable], optional): Called when an object
83
- that is in Objaverse-XL is not found. Here, it is likely that the
84
- repository was deleted or renamed. If None, nothing will be done with
85
- the missing object.
86
- Args for the function include:
87
- - file_identifier (str): File identifier of the 3D object.
88
- - sha256 (str): SHA256 of the contents of the original 3D object.
89
- - metadata (Dict[Hashable, Any]): Metadata about the 3D object, which is
90
- particular to the source.
91
- Return is not used. Defaults to None.
92
-
93
- Returns:
94
- Dict[str, str]: Mapping of file identifiers to local paths of the downloaded
95
- 3D objects.
96
- """
97
- pass
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
objaverse_xl/github.py DELETED
@@ -1,597 +0,0 @@
1
- """Script to download objects from GitHub."""
2
-
3
- import json
4
- import multiprocessing
5
- import os
6
- import shutil
7
- import subprocess
8
- import tarfile
9
- import tempfile
10
- from multiprocessing import Pool
11
- from typing import Callable, Dict, List, Literal, Optional
12
-
13
- import fsspec
14
- import pandas as pd
15
- import requests
16
- from loguru import logger
17
- from tqdm import tqdm
18
-
19
- from objaverse_xl.abstract import ObjaverseSource
20
- from objaverse_xl.utils import get_file_hash
21
-
22
- FILE_EXTENSIONS = [
23
- ".obj",
24
- ".glb",
25
- ".gltf",
26
- ".usdz",
27
- ".usd",
28
- ".fbx",
29
- ".stl",
30
- ".usda",
31
- ".dae",
32
- ".ply",
33
- ".abc",
34
- ".blend",
35
- ]
36
-
37
-
38
- class GitHubDownloader(ObjaverseSource):
39
- """Script to download objects from GitHub."""
40
-
41
- def get_annotations(self, download_dir: str = "~/.objaverse") -> pd.DataFrame:
42
- """Loads the GitHub 3D object metadata as a Pandas DataFrame.
43
-
44
- Args:
45
- download_dir (str, optional): Directory to download the parquet metadata
46
- file. Supports all file systems supported by fsspec. Defaults to
47
- "~/.objaverse".
48
-
49
- Returns:
50
- pd.DataFrame: GitHub 3D object metadata as a Pandas DataFrame with columns
51
- for the object "fileIdentifier", "license", "source", "fileType",
52
- "sha256", and "metadata".
53
- """
54
- filename = os.path.join(download_dir, "github", "github-urls.parquet")
55
- fs, path = fsspec.core.url_to_fs(filename)
56
- fs.makedirs(os.path.dirname(path), exist_ok=True)
57
-
58
- # download the parquet file if it doesn't exist
59
- if not fs.exists(path):
60
- url = "https://huggingface.co/datasets/allenai/objaverse-xl/resolve/main/github/github-urls.parquet"
61
-
62
- response = requests.get(url)
63
- response.raise_for_status()
64
- with fs.open(path, "wb") as file:
65
- file.write(response.content)
66
-
67
- # load the parquet file with fsspec
68
- with fs.open(path) as f:
69
- df = pd.read_parquet(f)
70
-
71
- df["metadata"] = "{}"
72
-
73
- return df
74
-
75
- def _get_repo_id_with_hash(self, item: pd.Series) -> str:
76
- org, repo = item["fileIdentifier"].split("/")[3:5]
77
- commit_hash = item["fileIdentifier"].split("/")[6]
78
- return f"{org}/{repo}/{commit_hash}"
79
-
80
- def _git_shallow_clone(self, repo_url: str, target_directory: str) -> bool:
81
- """Helper function to shallow clone a repo with git.
82
-
83
- Args:
84
- repo_url (str): URL of the repo to clone.
85
- target_directory (str): Directory to clone the repo to.
86
-
87
- Returns:
88
- bool: True if the clone was successful, False otherwise.
89
- """
90
- return self._run_command_with_check(
91
- ["git", "clone", "--depth", "1", repo_url, target_directory],
92
- )
93
-
94
- def _run_command_with_check(
95
- self, command: List[str], cwd: Optional[str] = None
96
- ) -> bool:
97
- """Helper function to run a command and check if it was successful.
98
-
99
- Args:
100
- command (List[str]): Command to run.
101
- cwd (Optional[str], optional): Current working directory to run the command
102
- in. Defaults to None.
103
-
104
- Returns:
105
- bool: True if the command was successful, False otherwise.
106
- """
107
- try:
108
- subprocess.run(
109
- command,
110
- cwd=cwd,
111
- check=True,
112
- stdout=subprocess.DEVNULL,
113
- stderr=subprocess.DEVNULL,
114
- )
115
- return True
116
- except subprocess.CalledProcessError as e:
117
- logger.error("Error:", e)
118
- logger.error(e.stdout)
119
- logger.error(e.stderr)
120
- return False
121
-
122
- def _process_repo(
123
- self,
124
- repo_id: str,
125
- fs: fsspec.AbstractFileSystem,
126
- base_dir: str,
127
- save_repo_format: Optional[Literal["zip", "tar", "tar.gz", "files"]],
128
- expected_objects: Dict[str, str],
129
- handle_found_object: Optional[Callable],
130
- handle_modified_object: Optional[Callable],
131
- handle_missing_object: Optional[Callable],
132
- handle_new_object: Optional[Callable],
133
- commit_hash: Optional[str],
134
- ) -> Dict[str, str]:
135
- """Process a single repo.
136
-
137
- Args:
138
- repo_id (str): GitHub repo ID in the format of organization/repo.
139
- fs (fsspec.AbstractFileSystem): File system to use for saving the repo.
140
- base_dir (str): Base directory to save the repo to.
141
- expected_objects (Dict[str, str]): Dictionary of objects that one expects to
142
- find in the repo. Keys are the "fileIdentifier" (i.e., the GitHub URL in
143
- this case) and values are the "sha256" of the objects.
144
- {and the rest of the args are the same as download_objects}
145
-
146
- Returns:
147
- Dict[str, str]: A dictionary that maps from the "fileIdentifier" to the path
148
- of the downloaded object.
149
- """
150
- # NOTE: assuming that the user has already checked that the repo doesn't exist,
151
- org, repo = repo_id.split("/")
152
-
153
- out = {}
154
- with tempfile.TemporaryDirectory() as temp_dir:
155
- # clone the repo to a temp directory
156
- target_directory = os.path.join(temp_dir, repo)
157
- successful_clone = self._git_shallow_clone(
158
- f"https://github.com/{org}/{repo}.git", target_directory
159
- )
160
- if not successful_clone:
161
- logger.error(f"Could not clone {repo_id}")
162
- if handle_missing_object is not None:
163
- for github_url, sha256 in expected_objects.items():
164
- handle_missing_object(
165
- file_identifier=github_url,
166
- sha256=sha256,
167
- metadata=dict(github_organization=org, github_repo=repo),
168
- )
169
- return {}
170
-
171
- # use the commit hash if specified
172
- repo_commit_hash = self._get_commit_hash_from_local_git_dir(
173
- target_directory
174
- )
175
- if commit_hash is not None:
176
- keep_going = True
177
- if repo_commit_hash != commit_hash:
178
- # run git reset --hard && git checkout 37f4d8d287e201ce52c048bf74d46d6a09d26b2c
179
- if not self._run_command_with_check(
180
- ["git", "fetch", "origin", commit_hash], target_directory
181
- ):
182
- logger.error(
183
- f"Error in git fetch! Sticking with {repo_commit_hash=} instead of {commit_hash=}"
184
- )
185
- keep_going = False
186
-
187
- if keep_going and not self._run_command_with_check(
188
- ["git", "reset", "--hard"], target_directory
189
- ):
190
- logger.error(
191
- f"Error in git reset! Sticking with {repo_commit_hash=} instead of {commit_hash=}"
192
- )
193
- keep_going = False
194
-
195
- if keep_going:
196
- if self._run_command_with_check(
197
- ["git", "checkout", commit_hash], target_directory
198
- ):
199
- repo_commit_hash = commit_hash
200
- else:
201
- logger.error(
202
- f"Error in git checkout! Sticking with {repo_commit_hash=} instead of {commit_hash=}"
203
- )
204
-
205
- # pull the lfs files
206
- self._pull_lfs_files(target_directory)
207
-
208
- # get all the files in the repo
209
- files = self._list_files(target_directory)
210
- files_with_3d_extension = [
211
- file
212
- for file in files
213
- if any(file.lower().endswith(ext) for ext in FILE_EXTENSIONS)
214
- ]
215
-
216
- # get the sha256 for each file
217
- file_hashes = []
218
- for file in tqdm(files_with_3d_extension, desc="Handling 3D object files"):
219
- file_hash = get_file_hash(file)
220
- # remove the temp_dir from the file path
221
- github_url = file.replace(
222
- target_directory,
223
- f"https://github.com/{org}/{repo}/blob/{repo_commit_hash}",
224
- )
225
- file_hashes.append(dict(sha256=file_hash, fileIdentifier=github_url))
226
-
227
- # handle the object under different conditions
228
- if github_url in expected_objects:
229
- out[github_url] = file[len(target_directory) + 1 :]
230
- if expected_objects[github_url] == file_hash:
231
- if handle_found_object is not None:
232
- handle_found_object(
233
- local_path=file,
234
- file_identifier=github_url,
235
- sha256=file_hash,
236
- metadata=dict(
237
- github_organization=org, github_repo=repo
238
- ),
239
- )
240
- else:
241
- if handle_modified_object is not None:
242
- handle_modified_object(
243
- local_path=file,
244
- file_identifier=github_url,
245
- new_sha256=file_hash,
246
- old_sha256=expected_objects[github_url],
247
- metadata=dict(
248
- github_organization=org, github_repo=repo
249
- ),
250
- )
251
- elif handle_new_object is not None:
252
- handle_new_object(
253
- local_path=file,
254
- file_identifier=github_url,
255
- sha256=file_hash,
256
- metadata=dict(github_organization=org, github_repo=repo),
257
- )
258
-
259
- # save the file hashes to a json file
260
- with open(
261
- os.path.join(target_directory, ".objaverse-file-hashes.json"),
262
- "w",
263
- encoding="utf-8",
264
- ) as f:
265
- json.dump(file_hashes, f, indent=2)
266
-
267
- # remove the .git directory
268
- shutil.rmtree(os.path.join(target_directory, ".git"))
269
-
270
- if save_repo_format is None:
271
- # remove the paths, since it's not downloaded
272
- out = {}
273
- else:
274
- logger.debug(f"Saving {org}/{repo} as {save_repo_format}")
275
- # save the repo to a zip file
276
- if save_repo_format == "zip":
277
- shutil.make_archive(target_directory, "zip", target_directory)
278
- elif save_repo_format == "tar":
279
- with tarfile.open(
280
- os.path.join(temp_dir, f"{repo}.tar"), "w"
281
- ) as tar:
282
- tar.add(target_directory, arcname=repo)
283
- elif save_repo_format == "tar.gz":
284
- with tarfile.open(
285
- os.path.join(temp_dir, f"{repo}.tar.gz"), "w:gz"
286
- ) as tar:
287
- tar.add(target_directory, arcname=repo)
288
- elif save_repo_format == "files":
289
- pass
290
- else:
291
- raise ValueError(
292
- f"save_repo_format must be one of zip, tar, tar.gz, files. Got {save_repo_format}"
293
- )
294
-
295
- dirname = os.path.join(base_dir, "repos", org)
296
- fs.makedirs(dirname, exist_ok=True)
297
- if save_repo_format != "files":
298
- # move the repo to the correct location (with put)
299
- fs.put(
300
- os.path.join(temp_dir, f"{repo}.{save_repo_format}"),
301
- os.path.join(dirname, f"{repo}.{save_repo_format}"),
302
- )
303
-
304
- for file_identifier in out.copy():
305
- out[file_identifier] = os.path.join(
306
- dirname, f"{repo}.{save_repo_format}", out[file_identifier]
307
- )
308
- else:
309
- # move the repo to the correct location (with put)
310
- fs.put(target_directory, dirname, recursive=True)
311
-
312
- for file_identifier in out.copy():
313
- out[file_identifier] = os.path.join(
314
- dirname, repo, out[file_identifier]
315
- )
316
-
317
- # get each object that was missing from the expected objects
318
- if handle_missing_object is not None:
319
- obtained_urls = {x["fileIdentifier"] for x in file_hashes}
320
- for github_url, sha256 in expected_objects.items():
321
- if github_url not in obtained_urls:
322
- handle_missing_object(
323
- file_identifier=github_url,
324
- sha256=sha256,
325
- metadata=dict(github_organization=org, github_repo=repo),
326
- )
327
-
328
- return out
329
-
330
- def _list_files(self, root_dir: str) -> List[str]:
331
- return [
332
- os.path.join(root, f)
333
- for root, dirs, files in os.walk(root_dir)
334
- for f in files
335
- ]
336
-
337
- def _pull_lfs_files(self, repo_dir: str) -> None:
338
- if self._has_lfs_files(repo_dir):
339
- subprocess.run(["git", "lfs", "pull"], cwd=repo_dir, check=True)
340
-
341
- def _has_lfs_files(self, repo_dir: str) -> bool:
342
- gitattributes_path = os.path.join(repo_dir, ".gitattributes")
343
- if not os.path.exists(gitattributes_path):
344
- return False
345
- with open(gitattributes_path, "r", encoding="utf-8") as f:
346
- for line in f:
347
- if "filter=lfs" in line:
348
- return True
349
- return False
350
-
351
- def _get_commit_hash_from_local_git_dir(self, local_git_dir: str) -> str:
352
- # get the git hash of the repo
353
- result = subprocess.run(
354
- ["git", "rev-parse", "HEAD"],
355
- cwd=local_git_dir,
356
- capture_output=True,
357
- check=True,
358
- )
359
- commit_hash = result.stdout.strip().decode("utf-8")
360
- return commit_hash
361
-
362
- def _parallel_process_repo(self, args) -> Dict[str, str]:
363
- """Helper function to process a repo in parallel.
364
-
365
- Note: This function is used to parallelize the processing of repos. It is not
366
- intended to be called directly.
367
-
368
- Args:
369
- args (Tuple): Tuple of arguments to pass to _process_repo.
370
-
371
- Returns:
372
- Dict[str, str]: A dictionary that maps from the "fileIdentifier" to the path
373
- of the downloaded object.
374
- """
375
-
376
- (
377
- repo_id_hash,
378
- fs,
379
- base_dir,
380
- save_repo_format,
381
- expected_objects,
382
- handle_found_object,
383
- handle_modified_object,
384
- handle_missing_object,
385
- handle_new_object,
386
- ) = args
387
- repo_id = "/".join(repo_id_hash.split("/")[:2])
388
- commit_hash = repo_id_hash.split("/")[2]
389
- return self._process_repo(
390
- repo_id=repo_id,
391
- fs=fs,
392
- base_dir=base_dir,
393
- save_repo_format=save_repo_format,
394
- expected_objects=expected_objects,
395
- handle_found_object=handle_found_object,
396
- handle_modified_object=handle_modified_object,
397
- handle_missing_object=handle_missing_object,
398
- handle_new_object=handle_new_object,
399
- commit_hash=commit_hash,
400
- )
401
-
402
- def _process_group(self, group):
403
- key, group_df = group
404
- return key, group_df.set_index("fileIdentifier")["sha256"].to_dict()
405
-
406
- def download_objects(
407
- self,
408
- objects: pd.DataFrame,
409
- download_dir: Optional[str] = "~/.objaverse",
410
- processes: Optional[int] = None,
411
- handle_found_object: Optional[Callable] = None,
412
- handle_modified_object: Optional[Callable] = None,
413
- handle_missing_object: Optional[Callable] = None,
414
- **kwargs,
415
- ) -> Dict[str, str]:
416
- """Download the specified GitHub objects.
417
-
418
- Args:
419
- objects (pd.DataFrame): GitHub objects to download. Must have columns for
420
- the object "fileIdentifier" and "sha256". Use the `get_annotations`
421
- function to get the metadata.
422
- download_dir (Optional[str], optional): Directory to download the GitHub
423
- objects to. Supports all file systems supported by fsspec. If None, the
424
- repository will not be saved (note that save_repo_format must also be
425
- None in this case, otherwise a ValueError is raised). Defaults to
426
- "~/.objaverse".
427
- processes (Optional[int], optional): Number of processes to use for
428
- downloading. If None, will use the number of CPUs on the machine.
429
- Defaults to None.
430
- handle_found_object (Optional[Callable], optional): Called when an object is
431
- successfully found and downloaded. Here, the object has the same sha256
432
- as the one that was downloaded with Objaverse-XL. If None, the object
433
- will be downloaded, but nothing will be done with it. Args for the
434
- function include:
435
- - local_path (str): Local path to the downloaded 3D object.
436
- - file_identifier (str): GitHub URL of the 3D object.
437
- - sha256 (str): SHA256 of the contents of the 3D object.
438
- - metadata (Dict[str, Any]): Metadata about the 3D object, including the
439
- GitHub organization and repo names.
440
- Return is not used. Defaults to None.
441
- handle_modified_object (Optional[Callable], optional): Called when a
442
- modified object is found and downloaded. Here, the object is
443
- successfully downloaded, but it has a different sha256 than the one that
444
- was downloaded with Objaverse-XL. This is not expected to happen very
445
- often, because the same commit hash is used for each repo. If None, the
446
- object will be downloaded, but nothing will be done with it. Args for
447
- the function include:
448
- - local_path (str): Local path to the downloaded 3D object.
449
- - file_identifier (str): GitHub URL of the 3D object.
450
- - new_sha256 (str): SHA256 of the contents of the newly downloaded 3D
451
- object.
452
- - old_sha256 (str): Expected SHA256 of the contents of the 3D object as
453
- it was when it was downloaded with Objaverse-XL.
454
- - metadata (Dict[str, Any]): Metadata about the 3D object, including the
455
- GitHub organization and repo names.
456
- Return is not used. Defaults to None.
457
- handle_missing_object (Optional[Callable], optional): Called when an object
458
- that is in Objaverse-XL is not found. Here, it is likely that the
459
- repository was deleted or renamed. If None, nothing will be done with
460
- the missing object. Args for the function include:
461
- - file_identifier (str): GitHub URL of the 3D object.
462
- - sha256 (str): SHA256 of the contents of the original 3D object.
463
- - metadata (Dict[str, Any]): Metadata about the 3D object, including the
464
- GitHub organization and repo names.
465
- Return is not used. Defaults to None.
466
- save_repo_format (Optional[Literal["zip", "tar", "tar.gz", "files"]],
467
- optional): Format to save the repository. If None, the repository will
468
- not be saved. If "files" is specified, each file will be saved
469
- individually. Otherwise, the repository can be saved as a "zip", "tar",
470
- or "tar.gz" file. Defaults to None.
471
- handle_new_object (Optional[Callable], optional): Called when a new object
472
- is found. Here, the object is not used in Objaverse-XL, but is still
473
- downloaded with the repository. The object may have not been used
474
- because it does not successfully import into Blender. If None, the
475
- object will be downloaded, but nothing will be done with it. Args for
476
- the function include:
477
- - local_path (str): Local path to the downloaded 3D object.
478
- - file_identifier (str): GitHub URL of the 3D object.
479
- - sha256 (str): SHA256 of the contents of the 3D object.
480
- - metadata (Dict[str, Any]): Metadata about the 3D object, including the
481
- GitHub organization and repo names.
482
- Return is not used. Defaults to None.
483
-
484
- Raises:
485
- ValueError: If download_dir is None and save_repo_format is not None.
486
- Otherwise, we don't know where to save the repo!
487
-
488
- Returns:
489
- Dict[str, str]: A dictionary that maps from the "fileIdentifier" to the path
490
- of the downloaded object.
491
- """
492
- save_repo_format = kwargs.get("save_repo_format", None)
493
- handle_new_object = kwargs.get("handle_new_object", None)
494
-
495
- if processes is None:
496
- processes = multiprocessing.cpu_count()
497
- if download_dir is None:
498
- if save_repo_format is not None:
499
- raise ValueError(
500
- f"If {save_repo_format=} is not None, {download_dir=} must be specified."
501
- )
502
- # path doesn't matter if we're not saving the repo
503
- download_dir = "~/.objaverse"
504
-
505
- base_download_dir = os.path.join(download_dir, "github")
506
- fs, path = fsspec.core.url_to_fs(base_download_dir)
507
- fs.makedirs(path, exist_ok=True)
508
-
509
- # Getting immediate subdirectories of root_path
510
- if save_repo_format == "files":
511
- downloaded_repo_dirs = fs.glob(base_download_dir + "/repos/*/*/")
512
- downloaded_repo_ids = {
513
- "/".join(x.split("/")[-2:]) for x in downloaded_repo_dirs
514
- }
515
- else:
516
- downloaded_repo_dirs = fs.glob(
517
- base_download_dir + f"/repos/*/*.{save_repo_format}"
518
- )
519
- downloaded_repo_ids = set()
520
- for x in downloaded_repo_dirs:
521
- org, repo = x.split("/")[-2:]
522
- repo = repo[: -len(f".{save_repo_format}")]
523
- repo_id = f"{org}/{repo}"
524
- downloaded_repo_ids.add(repo_id)
525
-
526
- # make copy of objects
527
- objects = objects.copy()
528
-
529
- # get the unique repoIds
530
- objects["repoIdHash"] = objects.apply(self._get_repo_id_with_hash, axis=1)
531
- repo_id_hashes = set(objects["repoIdHash"].unique().tolist())
532
- repo_ids = {
533
- "/".join(repo_id_hash.split("/")[:2]) for repo_id_hash in repo_id_hashes
534
- }
535
- assert len(repo_id_hashes) == len(repo_ids), (
536
- f"More than 1 commit hash per repoId!"
537
- f" {len(repo_id_hashes)=}, {len(repo_ids)=}"
538
- )
539
-
540
- logger.info(
541
- f"Provided {len(repo_ids)} repoIds with {len(objects)} objects to process."
542
- )
543
-
544
- # remove repoIds that have already been downloaded
545
- repo_ids_to_download = repo_ids - downloaded_repo_ids
546
- repo_id_hashes_to_download = [
547
- repo_id_hash
548
- for repo_id_hash in repo_id_hashes
549
- if "/".join(repo_id_hash.split("/")[:2]) in repo_ids_to_download
550
- ]
551
-
552
- logger.info(
553
- f"Found {len(repo_ids_to_download)} repoIds not yet downloaded. Downloading now..."
554
- )
555
-
556
- # get the objects to download
557
- groups = list(objects.groupby("repoIdHash"))
558
- with Pool(processes=processes) as pool:
559
- out_list = list(
560
- tqdm(
561
- pool.imap_unordered(self._process_group, groups),
562
- total=len(groups),
563
- desc="Grouping objects by repository",
564
- )
565
- )
566
- objects_per_repo_id_hash = dict(out_list)
567
-
568
- all_args = [
569
- (
570
- repo_id_hash,
571
- fs,
572
- path,
573
- save_repo_format,
574
- objects_per_repo_id_hash[repo_id_hash],
575
- handle_found_object,
576
- handle_missing_object,
577
- handle_modified_object,
578
- handle_new_object,
579
- )
580
- for repo_id_hash in repo_id_hashes_to_download
581
- ]
582
-
583
- with Pool(processes=processes) as pool:
584
- # use tqdm to show progress
585
- out = list(
586
- tqdm(
587
- pool.imap_unordered(self._parallel_process_repo, all_args),
588
- total=len(all_args),
589
- desc="Downloading repositories",
590
- )
591
- )
592
-
593
- out_dict = {}
594
- for x in out:
595
- out_dict.update(x)
596
-
597
- return out_dict
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
objaverse_xl/objaverse_v1.py DELETED
@@ -1,537 +0,0 @@
1
- """Script to download objects from Objaverse 1.0."""
2
-
3
- import gzip
4
- import json
5
- import multiprocessing
6
- import os
7
- import tempfile
8
- import urllib.request
9
- from multiprocessing import Pool
10
- from typing import Any, Callable, Dict, List, Optional, Tuple
11
-
12
- import fsspec
13
- import pandas as pd
14
- import requests
15
- from loguru import logger
16
- from tqdm import tqdm
17
-
18
- from objaverse_xl.abstract import ObjaverseSource
19
- from objaverse_xl.utils import get_file_hash
20
-
21
-
22
- class SketchfabDownloader(ObjaverseSource):
23
- """A class for downloading and processing Objaverse 1.0."""
24
-
25
- def get_annotations(self, download_dir: str = "~/.objaverse") -> pd.DataFrame:
26
- """Load the annotations from the given directory.
27
-
28
- Args:
29
- download_dir (str, optional): The directory to load the annotations from.
30
- Supports all file systems supported by fsspec. Defaults to
31
- "~/.objaverse".
32
-
33
- Returns:
34
- pd.DataFrame: The annotations, which includes the columns "thingId", "fileId",
35
- "filename", and "license".
36
- """
37
- remote_url = "https://huggingface.co/datasets/allenai/objaverse-xl/resolve/main/objaverse_v1/object-metadata.parquet"
38
- download_path = os.path.join(
39
- download_dir, "hf-objaverse-v1", "thingiverse-objects.parquet"
40
- )
41
- fs, path = fsspec.core.url_to_fs(download_path)
42
-
43
- if not fs.exists(path):
44
- fs.makedirs(os.path.dirname(path), exist_ok=True)
45
- logger.info(f"Downloading {remote_url} to {download_path}")
46
- response = requests.get(remote_url)
47
- response.raise_for_status()
48
- with fs.open(path, "wb") as file:
49
- file.write(response.content)
50
-
51
- # read the file with pandas and fsspec
52
- with fs.open(download_path, "rb") as f:
53
- annotations_df = pd.read_parquet(f)
54
-
55
- annotations_df["metadata"] = "{}"
56
-
57
- return annotations_df
58
-
59
- def load_full_annotations(
60
- self,
61
- uids: Optional[List[str]] = None,
62
- download_dir: str = "~/.objaverse",
63
- ) -> Dict[str, Any]:
64
- """Load the full metadata of all objects in the dataset.
65
-
66
- Args:
67
- uids: A list of uids with which to load metadata. If None, it loads
68
- the metadata for all uids.
69
- download_dir: The base directory to download the annotations to. Supports all
70
- file systems supported by fsspec. Defaults to "~/.objaverse".
71
-
72
- Returns:
73
- A dictionary of the metadata for each object. The keys are the uids and the
74
- values are the metadata for that object.
75
- """
76
- # make the metadata dir if it doesn't exist
77
- metadata_path = os.path.join(download_dir, "hf-objaverse-v1", "metadata")
78
- fs, _ = fsspec.core.url_to_fs(metadata_path)
79
- fs.makedirs(metadata_path, exist_ok=True)
80
-
81
- # get the dir ids that need to be loaded if only downloading a subset of uids
82
- object_paths = self._load_object_paths(download_dir=download_dir)
83
- dir_ids = (
84
- {object_paths[uid].split("/")[1] for uid in uids}
85
- if uids is not None
86
- else {f"{i // 1000:03d}-{i % 1000:03d}" for i in range(160)}
87
- )
88
-
89
- # get the existing metadata files
90
- existing_metadata_files = fs.glob(
91
- os.path.join(metadata_path, "*.json.gz"), refresh=True
92
- )
93
- existing_dir_ids = {
94
- file.split("/")[-1].split(".")[0]
95
- for file in existing_metadata_files
96
- if file.endswith(".json.gz") # note partial files end with .json.gz.tmp
97
- }
98
- downloaded_dir_ids = existing_dir_ids.intersection(dir_ids)
99
- logger.info(
100
- f"Found {len(downloaded_dir_ids)} metadata files already downloaded"
101
- )
102
-
103
- # download the metadata from the missing dir_ids
104
- dir_ids_to_download = dir_ids - existing_dir_ids
105
- logger.info(f"Downloading {len(dir_ids_to_download)} metadata files")
106
-
107
- # download the metadata file if it doesn't exist
108
- if len(dir_ids_to_download) > 0:
109
- for i_id in tqdm(dir_ids_to_download, desc="Downloading metadata files"):
110
- # get the path to the json file
111
- path = os.path.join(metadata_path, f"{i_id}.json.gz")
112
-
113
- # get the url to the remote json file
114
- hf_url = f"https://huggingface.co/datasets/allenai/objaverse/resolve/main/metadata/{i_id}.json.gz"
115
-
116
- # download the file to a tmp path to avoid partial downloads on interruption
117
- tmp_path = f"{path}.tmp"
118
- with fs.open(tmp_path, "wb") as f:
119
- with urllib.request.urlopen(hf_url) as response:
120
- f.write(response.read())
121
- fs.rename(tmp_path, path)
122
-
123
- out = {}
124
- for i_id in tqdm(dir_ids, desc="Reading metadata files"):
125
- # get the path to the json file
126
- path = os.path.join(metadata_path, f"{i_id}.json.gz")
127
-
128
- # read the json file of the metadata chunk
129
- with fs.open(path, "rb") as f:
130
- with gzip.GzipFile(fileobj=f) as gfile:
131
- content = gfile.read()
132
- data = json.loads(content)
133
-
134
- # filter the data to only include the uids we want
135
- if uids is not None:
136
- data = {uid: data[uid] for uid in uids if uid in data}
137
-
138
- # add the data to the out dict
139
- out.update(data)
140
-
141
- return out
142
-
143
- def _load_object_paths(self, download_dir: str) -> Dict[str, str]:
144
- """Load the object paths from the dataset.
145
-
146
- The object paths specify the location of where the object is located in the
147
- Hugging Face repo.
148
-
149
- Returns:
150
- A dictionary mapping the uid to the object path.
151
- """
152
- object_paths_file = "object-paths.json.gz"
153
- local_path = os.path.join(download_dir, "hf-objaverse-v1", object_paths_file)
154
-
155
- # download the object_paths file if it doesn't exist
156
- fs, path = fsspec.core.url_to_fs(local_path)
157
- if not fs.exists(path):
158
- hf_url = f"https://huggingface.co/datasets/allenai/objaverse/resolve/main/{object_paths_file}"
159
- fs.makedirs(os.path.dirname(path), exist_ok=True)
160
-
161
- # download the file to a tmp path to avoid partial downloads on interruption
162
- tmp_path = f"{path}.tmp"
163
- with fs.open(tmp_path, "wb") as f:
164
- with urllib.request.urlopen(hf_url) as response:
165
- f.write(response.read())
166
- fs.rename(tmp_path, path)
167
-
168
- # read the object_paths
169
- with fs.open(path, "rb") as f:
170
- with gzip.GzipFile(fileobj=f) as gfile:
171
- content = gfile.read()
172
- object_paths = json.loads(content)
173
-
174
- return object_paths
175
-
176
- def load_uids(self, download_dir: str = "~/.objaverse") -> List[str]:
177
- """Load the uids from the dataset.
178
-
179
- Returns:
180
- A list of all the UIDs from the dataset.
181
- """
182
- return list(self._load_object_paths(download_dir=download_dir).keys())
183
-
184
- def _download_object(
185
- self,
186
- file_identifier: str,
187
- hf_object_path: str,
188
- download_dir: Optional[str],
189
- expected_sha256: str,
190
- handle_found_object: Optional[Callable] = None,
191
- handle_modified_object: Optional[Callable] = None,
192
- ) -> Tuple[str, Optional[str]]:
193
- """Download the object for the given uid.
194
-
195
- Args:
196
- file_identifier: The file identifier of the object.
197
- hf_object_path: The path to the object in the Hugging Face repo. Here,
198
- hf_object_path is the part that comes after "main" in the Hugging Face
199
- repo url:
200
- https://huggingface.co/datasets/allenai/objaverse/resolve/main/{hf_object_path}
201
- download_dir: The base directory to download the object to. Supports all
202
- file systems supported by fsspec. Defaults to "~/.objaverse".
203
- expected_sha256 (str): The expected SHA256 of the contents of the downloade
204
- object.
205
- handle_found_object (Optional[Callable]): Called when an object is
206
- successfully found and downloaded. Here, the object has the same sha256
207
- as the one that was downloaded with Objaverse-XL. If None, the object
208
- will be downloaded, but nothing will be done with it. Args for the
209
- function include:
210
- - local_path (str): Local path to the downloaded 3D object.
211
- - file_identifier (str): GitHub URL of the 3D object.
212
- - sha256 (str): SHA256 of the contents of the 3D object.
213
- - metadata (Dict[str, Any]): Metadata about the 3D object, including the
214
- GitHub organization and repo names.
215
- Return is not used.
216
- handle_modified_object (Optional[Callable]): Called when a modified object
217
- is found and downloaded. Here, the object is successfully downloaded,
218
- but it has a different sha256 than the one that was downloaded with
219
- Objaverse-XL. This is not expected to happen very often, because the
220
- same commit hash is used for each repo. If None, the object will be
221
- downloaded, but nothing will be done with it. Args for the function
222
- include:
223
- - local_path (str): Local path to the downloaded 3D object.
224
- - file_identifier (str): GitHub URL of the 3D object.
225
- - new_sha256 (str): SHA256 of the contents of the newly downloaded 3D
226
- object.
227
- - old_sha256 (str): Expected SHA256 of the contents of the 3D object as
228
- it was when it was downloaded with Objaverse-XL.
229
- - metadata (Dict[str, Any]): Metadata about the 3D object, including the
230
- GitHub organization and repo names.
231
- Return is not used.
232
- handle_missing_object (Optional[Callable]): Called when an object that is in
233
- Objaverse-XL is not found. Here, it is likely that the repository was
234
- deleted or renamed. If None, nothing will be done with the missing
235
- object. Args for the function include:
236
- - file_identifier (str): GitHub URL of the 3D object.
237
- - sha256 (str): SHA256 of the contents of the original 3D object.
238
- - metadata (Dict[str, Any]): Metadata about the 3D object, including the
239
- GitHub organization and repo names.
240
- Return is not used.
241
-
242
-
243
- Returns:
244
- A tuple of the uid and the path to where the downloaded object. If
245
- download_dir is None, the path will be None.
246
- """
247
- hf_url = f"https://huggingface.co/datasets/allenai/objaverse/resolve/main/{hf_object_path}"
248
-
249
- with tempfile.TemporaryDirectory() as temp_dir:
250
- # download the file locally
251
- temp_path = os.path.join(temp_dir, hf_object_path)
252
- os.makedirs(os.path.dirname(temp_path), exist_ok=True)
253
- temp_path_tmp = f"{temp_path}.tmp"
254
- with open(temp_path_tmp, "wb") as file:
255
- with urllib.request.urlopen(hf_url) as response:
256
- file.write(response.read())
257
- os.rename(temp_path_tmp, temp_path)
258
-
259
- # get the sha256 of the downloaded file
260
- sha256 = get_file_hash(temp_path)
261
-
262
- if sha256 == expected_sha256:
263
- if handle_found_object is not None:
264
- handle_found_object(
265
- local_path=temp_path,
266
- file_identifier=file_identifier,
267
- sha256=sha256,
268
- metadata={},
269
- )
270
- else:
271
- if handle_modified_object is not None:
272
- handle_modified_object(
273
- local_path=temp_path,
274
- file_identifier=file_identifier,
275
- new_sha256=sha256,
276
- old_sha256=expected_sha256,
277
- metadata={},
278
- )
279
-
280
- if download_dir is not None:
281
- filename = os.path.join(download_dir, "hf-objaverse-v1", hf_object_path)
282
- fs, path = fsspec.core.url_to_fs(filename)
283
- fs.makedirs(os.path.dirname(path), exist_ok=True)
284
- fs.put(temp_path, path)
285
- else:
286
- path = None
287
-
288
- return file_identifier, path
289
-
290
- def _parallel_download_object(self, args):
291
- # workaround since starmap doesn't work well with tqdm
292
- return self._download_object(*args)
293
-
294
- def _get_uid(self, item: pd.Series) -> str:
295
- file_identifier = item["fileIdentifier"]
296
- return file_identifier.split("/")[-1]
297
-
298
- def uid_to_file_identifier(self, uid: str) -> str:
299
- """Convert the uid to the file identifier.
300
-
301
- Args:
302
- uid (str): The uid of the object.
303
-
304
- Returns:
305
- The file identifier of the object.
306
- """
307
- return f"https://sketchfab.com/3d-models/{uid}"
308
-
309
- def file_identifier_to_uid(self, file_identifier: str) -> str:
310
- """Convert the file identifier to the uid.
311
-
312
- Args:
313
- file_identifier (str): The file identifier of the object.
314
-
315
- Returns:
316
- The uid of the object.
317
- """
318
- return file_identifier.split("/")[-1]
319
-
320
- def download_objects(
321
- self,
322
- objects: pd.DataFrame,
323
- download_dir: Optional[str] = "~/.objaverse",
324
- processes: Optional[int] = None,
325
- handle_found_object: Optional[Callable] = None,
326
- handle_modified_object: Optional[Callable] = None,
327
- handle_missing_object: Optional[Callable] = None,
328
- **kwargs,
329
- ) -> Dict[str, str]:
330
- """Return the path to the object files for the given uids.
331
-
332
- If the object is not already downloaded, it will be downloaded.
333
-
334
- Args:
335
- objects (pd.DataFrame): Objects to download. Must have columns for
336
- the object "fileIdentifier" and "sha256". Use the `get_annotations`
337
- function to get the metadata.
338
- download_dir (Optional[str], optional): The base directory to download the
339
- object to. Supports all file systems supported by fsspec. If None, the
340
- objects will be removed after downloading. Defaults to "~/.objaverse".
341
- processes (Optional[int], optional): The number of processes to use to
342
- download the objects. If None, the number of processes will be set to
343
- the number of CPUs on the machine (multiprocessing.cpu_count()).
344
- Defaults to None.
345
- handle_found_object (Optional[Callable], optional): Called when an object is
346
- successfully found and downloaded. Here, the object has the same sha256
347
- as the one that was downloaded with Objaverse-XL. If None, the object
348
- will be downloaded, but nothing will be done with it. Args for the
349
- function include:
350
- - local_path (str): Local path to the downloaded 3D object.
351
- - file_identifier (str): File identifier of the 3D object.
352
- - sha256 (str): SHA256 of the contents of the 3D object.
353
- - metadata (Dict[Hashable, Any]): Metadata about the 3D object,
354
- including the GitHub organization and repo names.
355
- Return is not used. Defaults to None.
356
- handle_modified_object (Optional[Callable], optional): Called when a
357
- modified object is found and downloaded. Here, the object is
358
- successfully downloaded, but it has a different sha256 than the one that
359
- was downloaded with Objaverse-XL. This is not expected to happen very
360
- often, because the same commit hash is used for each repo. If None, the
361
- object will be downloaded, but nothing will be done with it. Args for
362
- the function include:
363
- - local_path (str): Local path to the downloaded 3D object.
364
- - file_identifier (str): File identifier of the 3D object.
365
- - new_sha256 (str): SHA256 of the contents of the newly downloaded 3D
366
- object.
367
- - old_sha256 (str): Expected SHA256 of the contents of the 3D object as
368
- it was when it was downloaded with Objaverse-XL.
369
- - metadata (Dict[Hashable, Any]): Metadata about the 3D object, which is
370
- particular to the souce.
371
- Return is not used. Defaults to None.
372
- handle_missing_object (Optional[Callable], optional): Called when an object
373
- that is in Objaverse-XL is not found. Here, it is likely that the
374
- repository was deleted or renamed. If None, nothing will be done with
375
- the missing object.
376
- Args for the function include:
377
- - file_identifier (str): File identifier of the 3D object.
378
- - sha256 (str): SHA256 of the contents of the original 3D object.
379
- - metadata (Dict[Hashable, Any]): Metadata about the 3D object, which is
380
- particular to the source.
381
- Return is not used. Defaults to None.
382
-
383
-
384
- Returns:
385
- A dictionary mapping the object fileIdentifier to the local path of where
386
- the object downloaded.
387
- """
388
- hf_object_paths = self._load_object_paths(
389
- download_dir=download_dir if download_dir is not None else "~/.objaverse"
390
- )
391
- if processes is None:
392
- processes = multiprocessing.cpu_count()
393
-
394
- # make a copy of the objects so we don't modify the original
395
- objects = objects.copy()
396
- objects["uid"] = objects.apply(self._get_uid, axis=1)
397
- uids_to_sha256 = dict(zip(objects["uid"], objects["sha256"]))
398
- uids_set = set(uids_to_sha256.keys())
399
-
400
- # create a new df where the uids are the index
401
- objects_uid_index = objects.set_index("uid")
402
-
403
- out = {}
404
- objects_to_download = []
405
- if download_dir is None:
406
- for _, item in objects.iterrows():
407
- uid = item["uid"]
408
- if uid not in hf_object_paths:
409
- logger.error(f"Could not find object with uid {uid}!")
410
- if handle_missing_object is not None:
411
- handle_missing_object(
412
- file_identifier=item["fileIdentifier"],
413
- sha256=item["sha256"],
414
- metadata={},
415
- )
416
- continue
417
- objects_to_download.append(
418
- (item["fileIdentifier"], hf_object_paths[uid], item["sha256"])
419
- )
420
- else:
421
- versioned_dirname = os.path.join(download_dir, "hf-objaverse-v1")
422
- fs, path = fsspec.core.url_to_fs(versioned_dirname)
423
-
424
- # Get the existing file paths. This is much faster than calling fs.exists() for each
425
- # file. `glob()` is like walk, but returns a list of files instead of the nested
426
- # directory structure. glob() is also faster than find() / walk() since it doesn't
427
- # need to traverse the entire directory structure.
428
- existing_file_paths = fs.glob(
429
- os.path.join(path, "glbs", "*", "*.glb"), refresh=True
430
- )
431
- existing_uids = {
432
- file.split("/")[-1].split(".")[0]
433
- for file in existing_file_paths
434
- if file.endswith(".glb") # note partial files end with .glb.tmp
435
- }
436
-
437
- # add the existing downloaded uids to the return dict
438
- already_downloaded_uids = uids_set.intersection(existing_uids)
439
- for uid in already_downloaded_uids:
440
- hf_object_path = hf_object_paths[uid]
441
- fs_abs_object_path = os.path.join(versioned_dirname, hf_object_path)
442
- out[self.uid_to_file_identifier(uid)] = fs_abs_object_path
443
-
444
- logger.info(
445
- f"Found {len(already_downloaded_uids)} objects already downloaded"
446
- )
447
-
448
- # get the uids that need to be downloaded
449
- remaining_uids = uids_set - existing_uids
450
- for uid in remaining_uids:
451
- item = objects_uid_index.loc[uid]
452
- if uid not in hf_object_paths:
453
- logger.error(f"Could not find object with uid {uid}. Skipping it.")
454
- if handle_missing_object is not None:
455
- handle_missing_object(
456
- file_identifier=item["fileIdentifier"],
457
- sha256=item["sha256"],
458
- metadata={},
459
- )
460
- continue
461
- objects_to_download.append(
462
- (item["fileIdentifier"], hf_object_paths[uid], item["sha256"])
463
- )
464
-
465
- logger.info(
466
- f"Downloading {len(objects_to_download)} new objects across {processes} processes"
467
- )
468
-
469
- # check if all objects are already downloaded
470
- if len(objects_to_download) == 0:
471
- return out
472
-
473
- args = [
474
- (
475
- file_identifier,
476
- hf_object_path,
477
- download_dir,
478
- sha256,
479
- handle_found_object,
480
- handle_modified_object,
481
- )
482
- for file_identifier, hf_object_path, sha256 in objects_to_download
483
- ]
484
-
485
- # download the objects in parallel
486
- with Pool(processes) as pool:
487
- new_object_downloads = list(
488
- tqdm(
489
- pool.imap_unordered(self._parallel_download_object, args),
490
- total=len(args),
491
- )
492
- )
493
-
494
- for file_identifier, local_path in new_object_downloads:
495
- out[file_identifier] = local_path
496
-
497
- return out
498
-
499
- def load_lvis_annotations(
500
- self,
501
- download_dir: str = "~/.objaverse",
502
- ) -> Dict[str, List[str]]:
503
- """Load the LVIS annotations.
504
-
505
- If the annotations are not already downloaded, they will be downloaded.
506
-
507
- Args:
508
- download_dir: The base directory to download the annotations to. Supports all
509
- file systems supported by fsspec. Defaults to "~/.objaverse".
510
-
511
- Returns:
512
- A dictionary mapping the LVIS category to the list of uids in that category.
513
- """
514
- hf_url = "https://huggingface.co/datasets/allenai/objaverse/resolve/main/lvis-annotations.json.gz"
515
-
516
- download_path = os.path.join(
517
- download_dir, "hf-objaverse-v1", "lvis-annotations.json.gz"
518
- )
519
-
520
- # use fsspec
521
- fs, path = fsspec.core.url_to_fs(download_path)
522
- if not fs.exists(path):
523
- # make dir if it doesn't exist
524
- fs.makedirs(os.path.dirname(path), exist_ok=True)
525
-
526
- # download the file
527
- with fs.open(path, "wb") as f:
528
- with urllib.request.urlopen(hf_url) as response:
529
- f.write(response.read())
530
-
531
- # load the gzip file
532
- with fs.open(path, "rb") as f:
533
- with gzip.GzipFile(fileobj=f) as gfile:
534
- content = gfile.read()
535
- data = json.loads(content)
536
-
537
- return data
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
objaverse_xl/objaverse_xl_downloader.py DELETED
@@ -1,136 +0,0 @@
1
- """Downloads 3D objects from all Objaverse-XL sources."""
2
-
3
- from typing import Callable, Dict, Optional
4
-
5
- import pandas as pd
6
-
7
- from objaverse_xl.abstract import ObjaverseSource
8
- from objaverse_xl.github import GitHubDownloader
9
- from objaverse_xl.objaverse_v1 import SketchfabDownloader
10
- from objaverse_xl.smithsonian import SmithsonianDownloader
11
- from objaverse_xl.thingiverse import ThingiverseDownloader
12
-
13
-
14
- class ObjaverseXLDownloader(ObjaverseSource):
15
- """Downloads 3D objects from all Objaverse-XL sources."""
16
-
17
- def __init__(self):
18
- super().__init__()
19
-
20
- self.downloaders = {
21
- "github": GitHubDownloader(),
22
- "thingiverse": ThingiverseDownloader(),
23
- "smithsonian": SmithsonianDownloader(),
24
- "sketchfab": SketchfabDownloader(),
25
- }
26
-
27
- def get_annotations(self, download_dir: str = "~/.objaverse") -> pd.DataFrame:
28
- """Loads the 3D object metadata as a Pandas DataFrame.
29
-
30
- Args:
31
- download_dir (str, optional): Directory to download the parquet metadata
32
- file. Supports all file systems supported by fsspec. Defaults to
33
- "~/.objaverse".
34
-
35
- Returns:
36
- pd.DataFrame: Metadata of the 3D objects as a Pandas DataFrame with columns
37
- for the object "fileIdentifier", "license", "source", "fileType",
38
- "sha256", and "metadata".
39
- """
40
- annotations = [
41
- downloader.get_annotations(download_dir)
42
- for downloader in self.downloaders.values()
43
- ]
44
- return pd.concat(annotations, ignore_index=True)
45
-
46
- def download_objects(
47
- self,
48
- objects: pd.DataFrame,
49
- download_dir: str = "~/.objaverse",
50
- processes: Optional[int] = None,
51
- handle_found_object: Optional[Callable] = None,
52
- handle_modified_object: Optional[Callable] = None,
53
- handle_missing_object: Optional[Callable] = None,
54
- **kwargs,
55
- ) -> Dict[str, str]:
56
- """Downloads all objects from the source.
57
-
58
- Args:
59
- objects (pd.DataFrame): Objects to download. Must have columns for
60
- the object "fileIdentifier" and "sha256". Use the `get_annotations`
61
- function to get the metadata.
62
- download_dir (str, optional): Directory to download the objects to.
63
- Supports all file systems supported by fsspec. Defaults to
64
- "~/.objaverse".
65
- processes (Optional[int], optional): Number of processes to use for
66
- downloading. If None, will use the number of CPUs on the machine.
67
- Defaults to None.
68
- save_repo_format (Optional[Literal["zip", "tar", "tar.gz", "files"]],
69
- optional): Format to save the repository. If None, the repository will
70
- not be saved. If "files" is specified, each file will be saved
71
- individually. Otherwise, the repository can be saved as a "zip", "tar",
72
- or "tar.gz" file. Defaults to None.
73
- handle_found_object (Optional[Callable], optional): Called when an object is
74
- successfully found and downloaded. Here, the object has the same sha256
75
- as the one that was downloaded with Objaverse-XL. If None, the object
76
- will be downloaded, but nothing will be done with it. Args for the
77
- function include:
78
- - local_path (str): Local path to the downloaded 3D object.
79
- - file_identifier (str): File identifier of the 3D object.
80
- - sha256 (str): SHA256 of the contents of the 3D object.
81
- - metadata (Dict[Hashable, Any]): Metadata about the 3D object,
82
- including the GitHub organization and repo names.
83
- Return is not used. Defaults to None.
84
- handle_modified_object (Optional[Callable], optional): Called when a
85
- modified object is found and downloaded. Here, the object is
86
- successfully downloaded, but it has a different sha256 than the one that
87
- was downloaded with Objaverse-XL. This is not expected to happen very
88
- often, because the same commit hash is used for each repo. If None, the
89
- object will be downloaded, but nothing will be done with it. Args for
90
- the function include:
91
- - local_path (str): Local path to the downloaded 3D object.
92
- - file_identifier (str): File identifier of the 3D object.
93
- - new_sha256 (str): SHA256 of the contents of the newly downloaded 3D
94
- object.
95
- - old_sha256 (str): Expected SHA256 of the contents of the 3D object as
96
- it was when it was downloaded with Objaverse-XL.
97
- - metadata (Dict[Hashable, Any]): Metadata about the 3D object, which is
98
- particular to the souce.
99
- Return is not used. Defaults to None.
100
- handle_missing_object (Optional[Callable], optional): Called when an object
101
- that is in Objaverse-XL is not found. Here, it is likely that the
102
- repository was deleted or renamed. If None, nothing will be done with
103
- the missing object.
104
- Args for the function include:
105
- - file_identifier (str): File identifier of the 3D object.
106
- - sha256 (str): SHA256 of the contents of the original 3D object.
107
- - metadata (Dict[Hashable, Any]): Metadata about the 3D object, which is
108
- particular to the source.
109
- Return is not used. Defaults to None.
110
-
111
- Returns:
112
- Dict[str, str]: Mapping of file identifiers to local paths of the downloaded
113
- 3D objects.
114
- """
115
- sources = set(objects["source"].unique().tolist())
116
- all_sources = {"github", "thingiverse", "smithsonian", "sketchfab"}
117
-
118
- if not sources.issubset(all_sources):
119
- raise ValueError(
120
- f"Invalid sources: {sources}. Must be a subset of {all_sources}."
121
- )
122
-
123
- downloaded_objects = {}
124
- for source in sources:
125
- source_downloads = self.downloaders[source].download_objects(
126
- objects[objects["source"] == source],
127
- download_dir,
128
- processes,
129
- handle_found_object,
130
- handle_modified_object,
131
- handle_missing_object,
132
- **kwargs,
133
- )
134
- downloaded_objects.update(source_downloads)
135
-
136
- return downloaded_objects
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
objaverse_xl/smithsonian.py DELETED
@@ -1,316 +0,0 @@
1
- """Script to download 3D objects from the Smithsonian Institution."""
2
-
3
- import multiprocessing
4
- import os
5
- import tempfile
6
- from multiprocessing import Pool
7
- from typing import Callable, Dict, Optional, Tuple
8
-
9
- import fsspec
10
- import pandas as pd
11
- import requests
12
- from loguru import logger
13
- from tqdm import tqdm
14
-
15
- from objaverse_xl.abstract import ObjaverseSource
16
- from objaverse_xl.utils import get_file_hash, get_uid_from_str
17
-
18
-
19
- class SmithsonianDownloader(ObjaverseSource):
20
- """Script to download objects from the Smithsonian Institute."""
21
-
22
- def get_annotations(self, download_dir: str = "~/.objaverse") -> pd.DataFrame:
23
- """Loads the Smithsonian Object Metadata dataset as a Pandas DataFrame.
24
-
25
- Args:
26
- download_dir (str, optional): Directory to download the parquet metadata file.
27
- Supports all file systems supported by fsspec. Defaults to "~/.objaverse".
28
-
29
- Returns:
30
- pd.DataFrame: Smithsonian Object Metadata dataset as a Pandas DataFrame with
31
- columns for the object "title", "url", "quality", "file_type", "uid", and
32
- "license". The quality is always Medium and the file_type is always glb.
33
- """
34
- filename = os.path.join(download_dir, "smithsonian", "object-metadata.parquet")
35
- fs, path = fsspec.core.url_to_fs(filename)
36
- fs.makedirs(os.path.dirname(path), exist_ok=True)
37
-
38
- # download the parquet file if it doesn't exist
39
- if not fs.exists(path):
40
- url = "https://huggingface.co/datasets/allenai/objaverse-xl/resolve/main/smithsonian/object-metadata.parquet"
41
- response = requests.get(url)
42
- response.raise_for_status()
43
- with fs.open(path, "wb") as file:
44
- file.write(response.content)
45
-
46
- # load the parquet file with fsspec
47
- with fs.open(path) as f:
48
- df = pd.read_parquet(f)
49
-
50
- return df
51
-
52
- def _download_smithsonian_object(
53
- self,
54
- file_identifier: str,
55
- download_dir: Optional[str],
56
- expected_sha256: str,
57
- handle_found_object: Optional[Callable],
58
- handle_modified_object: Optional[Callable],
59
- handle_missing_object: Optional[Callable],
60
- ) -> Tuple[str, Optional[str]]:
61
- """Downloads a Smithsonian Object from a URL.
62
-
63
- Overwrites the file if it already exists and assumes this was previous checked.
64
-
65
- Args:
66
- file_identifier (str): URL to download the Smithsonian Object from.
67
- download_dir (Optional[str]): Directory to download the Smithsonian Object
68
- to. Supports all file systems supported by fsspec. If None, the
69
- Smithsonian Object will be deleted after it is downloaded and processed
70
- with the handler functions.
71
- expected_sha256 (str): The expected SHA256 of the contents of the downloaded
72
- object.
73
- handle_found_object (Optional[Callable]): Called when an object is
74
- successfully found and downloaded. Here, the object has the same sha256
75
- as the one that was downloaded with Objaverse-XL. If None, the object
76
- will be downloaded, but nothing will be done with it. Args for the
77
- function include:
78
- - local_path (str): Local path to the downloaded 3D object.
79
- - file_identifier (str): File identifier of the 3D object.
80
- - sha256 (str): SHA256 of the contents of the 3D object.
81
- - metadata (Dict[str, Any]): Metadata about the 3D object, including the
82
- GitHub organization and repo names.
83
- Return is not used.
84
- handle_modified_object (Optional[Callable]): Called when a modified object
85
- is found and downloaded. Here, the object is successfully downloaded,
86
- but it has a different sha256 than the one that was downloaded with
87
- Objaverse-XL. This is not expected to happen very often, because the
88
- same commit hash is used for each repo. If None, the object will be
89
- downloaded, but nothing will be done with it. Args for the function
90
- include:
91
- - local_path (str): Local path to the downloaded 3D object.
92
- - file_identifier (str): File identifier of the 3D object.
93
- - new_sha256 (str): SHA256 of the contents of the newly downloaded 3D
94
- object.
95
- - old_sha256 (str): Expected SHA256 of the contents of the 3D object as
96
- it was when it was downloaded with Objaverse-XL.
97
- - metadata (Dict[str, Any]): Metadata about the 3D object, including the
98
- GitHub organization and repo names.
99
- Return is not used.
100
- handle_missing_object (Optional[Callable]): Called when an object that is in
101
- Objaverse-XL is not found. Here, it is likely that the repository was
102
- deleted or renamed. If None, nothing will be done with the missing
103
- object. Args for the function include:
104
- - file_identifier (str): File identifier of the 3D object.
105
- - sha256 (str): SHA256 of the contents of the original 3D object.
106
- - metadata (Dict[str, Any]): Metadata about the 3D object, including the
107
- GitHub organization and repo names.
108
- Return is not used.
109
-
110
-
111
- Returns:
112
- Tuple[str, Optional[str]]: Tuple of the URL and the path to the downloaded
113
- Smithsonian Object. If the Smithsonian Object was not downloaded, the path
114
- will be None.
115
- """
116
- uid = get_uid_from_str(file_identifier)
117
-
118
- with tempfile.TemporaryDirectory() as temp_dir:
119
- temp_path = os.path.join(temp_dir, f"{uid}.glb")
120
- temp_path_tmp = f"{temp_path}.tmp"
121
-
122
- response = requests.get(file_identifier)
123
-
124
- # check if the path is valid
125
- if response.status_code == 404:
126
- logger.warning(f"404 for {file_identifier}")
127
- if handle_missing_object is not None:
128
- handle_missing_object(
129
- file_identifier=file_identifier,
130
- sha256=expected_sha256,
131
- metadata={},
132
- )
133
- return file_identifier, None
134
-
135
- with open(temp_path_tmp, "wb") as file:
136
- for chunk in response.iter_content(chunk_size=8192):
137
- file.write(chunk)
138
-
139
- # rename to temp_path
140
- os.rename(temp_path_tmp, temp_path)
141
-
142
- # check the sha256
143
- sha256 = get_file_hash(temp_path)
144
-
145
- if sha256 == expected_sha256:
146
- if handle_found_object is not None:
147
- handle_found_object(
148
- local_path=temp_path,
149
- file_identifier=file_identifier,
150
- sha256=sha256,
151
- metadata={},
152
- )
153
- else:
154
- if handle_modified_object is not None:
155
- handle_modified_object(
156
- local_path=temp_path,
157
- file_identifier=file_identifier,
158
- new_sha256=sha256,
159
- old_sha256=expected_sha256,
160
- metadata={},
161
- )
162
-
163
- if download_dir is not None:
164
- filename = os.path.join(
165
- download_dir, "smithsonian", "objects", f"{uid}.glb"
166
- )
167
- fs, path = fsspec.core.url_to_fs(filename)
168
- fs.makedirs(os.path.dirname(path), exist_ok=True)
169
- fs.put(temp_path, path)
170
- else:
171
- path = None
172
-
173
- return file_identifier, path
174
-
175
- def _parallel_download_object(self, args):
176
- # workaround since starmap doesn't work well with tqdm
177
- return self._download_smithsonian_object(*args)
178
-
179
- def download_objects(
180
- self,
181
- objects: pd.DataFrame,
182
- download_dir: Optional[str] = "~/.objaverse",
183
- processes: Optional[int] = None,
184
- handle_found_object: Optional[Callable] = None,
185
- handle_modified_object: Optional[Callable] = None,
186
- handle_missing_object: Optional[Callable] = None,
187
- **kwargs,
188
- ) -> Dict[str, str]:
189
- """Downloads all Smithsonian Objects.
190
-
191
- Args:
192
- objects (pd.DataFrmae): Objects to download. Must have columns for
193
- the object "fileIdentifier" and "sha256". Use the `get_annotations`
194
- function to get the metadata.
195
- download_dir (Optional[str], optional): Directory to download the
196
- Smithsonian Objects to. Supports all file systems supported by fsspec.
197
- If None, the Smithsonian Objects will be deleted after they are
198
- downloaded and processed with the handler functions. Defaults to
199
- "~/.objaverse".
200
- processes (Optional[int], optional): Number of processes to use for
201
- downloading the Smithsonian Objects. If None, the number of processes
202
- will be set to the number of CPUs on the machine
203
- (multiprocessing.cpu_count()). Defaults to None.
204
- handle_found_object (Optional[Callable], optional): Called when an object is
205
- successfully found and downloaded. Here, the object has the same sha256
206
- as the one that was downloaded with Objaverse-XL. If None, the object
207
- will be downloaded, but nothing will be done with it. Args for the
208
- function include:
209
- - local_path (str): Local path to the downloaded 3D object.
210
- - file_identifier (str): File identifier of the 3D object.
211
- - sha256 (str): SHA256 of the contents of the 3D object.
212
- - metadata (Dict[Hashable, Any]): Metadata about the 3D object,
213
- including the GitHub organization and repo names.
214
- Return is not used. Defaults to None.
215
- handle_modified_object (Optional[Callable], optional): Called when a
216
- modified object is found and downloaded. Here, the object is
217
- successfully downloaded, but it has a different sha256 than the one that
218
- was downloaded with Objaverse-XL. This is not expected to happen very
219
- often, because the same commit hash is used for each repo. If None, the
220
- object will be downloaded, but nothing will be done with it. Args for
221
- the function include:
222
- - local_path (str): Local path to the downloaded 3D object.
223
- - file_identifier (str): File identifier of the 3D object.
224
- - new_sha256 (str): SHA256 of the contents of the newly downloaded 3D
225
- object.
226
- - old_sha256 (str): Expected SHA256 of the contents of the 3D object as
227
- it was when it was downloaded with Objaverse-XL.
228
- - metadata (Dict[Hashable, Any]): Metadata about the 3D object, which is
229
- particular to the souce.
230
- Return is not used. Defaults to None.
231
- handle_missing_object (Optional[Callable], optional): Called when an object
232
- that is in Objaverse-XL is not found. Here, it is likely that the
233
- repository was deleted or renamed. If None, nothing will be done with
234
- the missing object.
235
- Args for the function include:
236
- - file_identifier (str): File identifier of the 3D object.
237
- - sha256 (str): SHA256 of the contents of the original 3D object.
238
- - metadata (Dict[Hashable, Any]): Metadata about the 3D object, which is
239
- particular to the source.
240
- Return is not used. Defaults to None.
241
-
242
- Returns:
243
- Dict[str, str]: A dictionary mapping from the fileIdentifier to the
244
- download_path.
245
- """
246
- if processes is None:
247
- processes = multiprocessing.cpu_count()
248
-
249
- out = {}
250
- objects_to_download = []
251
- if download_dir is not None:
252
- objects_dir = os.path.join(download_dir, "smithsonian", "objects")
253
- fs, path = fsspec.core.url_to_fs(objects_dir)
254
- fs.makedirs(path, exist_ok=True)
255
-
256
- # get the existing glb files
257
- existing_glb_files = fs.glob(
258
- os.path.join(objects_dir, "*.glb"), refresh=True
259
- )
260
- existing_uids = set(
261
- os.path.basename(file).split(".")[0] for file in existing_glb_files
262
- )
263
-
264
- # find the urls that need to be downloaded
265
- already_downloaded_objects = set()
266
- for _, item in objects.iterrows():
267
- file_identifier = item["fileIdentifier"]
268
- uid = get_uid_from_str(file_identifier)
269
- if uid not in existing_uids:
270
- objects_to_download.append(item)
271
- else:
272
- already_downloaded_objects.add(file_identifier)
273
- out[file_identifier] = os.path.join(
274
- os.path.expanduser(objects_dir), f"{uid}.glb"
275
- )
276
- else:
277
- existing_uids = set()
278
- objects_to_download = [item for _, item in objects.iterrows()]
279
- already_downloaded_objects = set()
280
- out = {}
281
-
282
- logger.info(
283
- f"Found {len(already_downloaded_objects)} Smithsonian Objects already downloaded"
284
- )
285
- logger.info(
286
- f"Downloading {len(objects_to_download)} Smithsonian Objects with {processes} processes"
287
- )
288
-
289
- if len(objects_to_download) == 0:
290
- return out
291
-
292
- args = [
293
- [
294
- item["fileIdentifier"],
295
- download_dir,
296
- item["sha256"],
297
- handle_found_object,
298
- handle_modified_object,
299
- handle_missing_object,
300
- ]
301
- for item in objects_to_download
302
- ]
303
- with Pool(processes=processes) as pool:
304
- results = list(
305
- tqdm(
306
- pool.imap_unordered(self._parallel_download_object, args),
307
- total=len(objects_to_download),
308
- desc="Downloading Smithsonian Objects",
309
- )
310
- )
311
-
312
- for file_identifier, download_path in results:
313
- if download_path is not None:
314
- out[file_identifier] = download_path
315
-
316
- return out
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
objaverse_xl/thingiverse.py DELETED
@@ -1,380 +0,0 @@
1
- """Script to download objects from Thingiverse."""
2
-
3
- import multiprocessing
4
- import os
5
- import tempfile
6
- import time
7
- from multiprocessing import Pool
8
- from typing import Callable, Dict, Optional, Tuple
9
-
10
- import fsspec
11
- import pandas as pd
12
- import requests
13
- from loguru import logger
14
- from tqdm import tqdm
15
-
16
- from objaverse_xl.abstract import ObjaverseSource
17
- from objaverse_xl.utils import get_file_hash
18
-
19
-
20
- class ThingiverseDownloader(ObjaverseSource):
21
- """Script to download objects from Thingiverse."""
22
-
23
- def get_annotations(self, download_dir: str = "~/.objaverse") -> pd.DataFrame:
24
- """Load the annotations from the given directory.
25
-
26
- Args:
27
- download_dir (str, optional): The directory to load the annotations from.
28
- Supports all file systems supported by fsspec. Defaults to
29
- "~/.objaverse".
30
-
31
- Returns:
32
- pd.DataFrame: The annotations, which includes the columns "thingId", "fileId",
33
- "filename", and "license".
34
- """
35
- remote_url = "https://huggingface.co/datasets/allenai/objaverse-xl/resolve/main/thingiverse/thingiverse-objects.parquet"
36
- download_path = os.path.join(
37
- download_dir, "thingiverse", "thingiverse-objects.parquet"
38
- )
39
- fs, path = fsspec.core.url_to_fs(download_path)
40
-
41
- if not fs.exists(path):
42
- fs.makedirs(os.path.dirname(path), exist_ok=True)
43
- logger.info(f"Downloading {remote_url} to {download_path}")
44
- response = requests.get(remote_url)
45
- response.raise_for_status()
46
- with fs.open(path, "wb") as file:
47
- file.write(response.content)
48
-
49
- # read the file with pandas and fsspec
50
- with fs.open(download_path, "rb") as f:
51
- annotations_df = pd.read_parquet(f)
52
-
53
- return annotations_df
54
-
55
- def _get_response_with_retries(
56
- self, url: str, max_retries: int = 3, retry_delay: int = 5
57
- ) -> Optional[requests.models.Response]:
58
- """Get a response from a URL with retries.
59
-
60
- Args:
61
- url (str): The URL to get a response from.
62
- max_retries (int, optional): The maximum number of retries. Defaults to 3.
63
- retry_delay (int, optional): The delay between retries in seconds. Defaults to 5.
64
-
65
- Returns:
66
- Optional[requests.models.Response]: The response from the URL. If there was an error, returns None.
67
- """
68
-
69
- for i in range(max_retries):
70
- try:
71
- response = requests.get(url, stream=True)
72
- # if successful, break out of loop
73
- if response.status_code not in {200, 404}:
74
- time.sleep(retry_delay)
75
- continue
76
- break
77
- except ConnectionError:
78
- if i < max_retries - 1: # i.e. not on the last try
79
- time.sleep(retry_delay)
80
- else:
81
- return None
82
-
83
- return response
84
-
85
- def _download_item(
86
- self,
87
- thingi_file_id: str,
88
- thingi_thing_id: str,
89
- file_identifier: str,
90
- download_dir: Optional[str],
91
- expected_sha256: str,
92
- handle_found_object: Optional[Callable],
93
- handle_modified_object: Optional[Callable],
94
- handle_missing_object: Optional[Callable],
95
- ) -> Tuple[str, Optional[str]]:
96
- """Download the given item.
97
-
98
- Args:
99
- thingi_file_id (str): The Thingiverse file ID of the object.
100
- thingi_thing_id (str): The Thingiverse thing ID of the object.
101
- file_identifier (str): File identifier of the Thingiverse object.
102
- download_dir (Optional[str]): Directory to download the Smithsonian Object
103
- to. Supports all file systems supported by fsspec. If None, the
104
- Smithsonian Object will be deleted after it is downloaded and processed
105
- with the handler functions.
106
- expected_sha256 (str): The expected SHA256 of the contents of the downloaded
107
- object.
108
- handle_found_object (Optional[Callable]): Called when an object is
109
- successfully found and downloaded. Here, the object has the same sha256
110
- as the one that was downloaded with Objaverse-XL. If None, the object
111
- will be downloaded, but nothing will be done with it. Args for the
112
- function include:
113
- - local_path (str): Local path to the downloaded 3D object.
114
- - file_identifier (str): File identifier of the 3D object.
115
- - sha256 (str): SHA256 of the contents of the 3D object.
116
- - metadata (Dict[str, Any]): Metadata about the 3D object, including the
117
- GitHub organization and repo names.
118
- Return is not used.
119
- handle_modified_object (Optional[Callable]): Called when a modified object
120
- is found and downloaded. Here, the object is successfully downloaded,
121
- but it has a different sha256 than the one that was downloaded with
122
- Objaverse-XL. This is not expected to happen very often, because the
123
- same commit hash is used for each repo. If None, the object will be
124
- downloaded, but nothing will be done with it. Args for the function
125
- include:
126
- - local_path (str): Local path to the downloaded 3D object.
127
- - file_identifier (str): File identifier of the 3D object.
128
- - new_sha256 (str): SHA256 of the contents of the newly downloaded 3D
129
- object.
130
- - old_sha256 (str): Expected SHA256 of the contents of the 3D object as
131
- it was when it was downloaded with Objaverse-XL.
132
- - metadata (Dict[str, Any]): Metadata about the 3D object, including the
133
- GitHub organization and repo names.
134
- Return is not used.
135
- handle_missing_object (Optional[Callable]): Called when an object that is in
136
- Objaverse-XL is not found. Here, it is likely that the repository was
137
- deleted or renamed. If None, nothing will be done with the missing
138
- object. Args for the function include:
139
- - file_identifier (str): File identifier of the 3D object.
140
- - sha256 (str): SHA256 of the contents of the original 3D object.
141
- - metadata (Dict[str, Any]): Metadata about the 3D object, including the
142
- GitHub organization and repo names.
143
- Return is not used.
144
-
145
-
146
- Returns:
147
- Optional[str]: The path to the downloaded file. If there was an error or 404,
148
- returns None.
149
- """
150
- url = f"https://www.thingiverse.com/download:{thingi_file_id}"
151
- response = self._get_response_with_retries(url)
152
- filename = f"thing-{thingi_thing_id}-file-{thingi_file_id}.stl"
153
-
154
- if response is None:
155
- logger.warning(
156
- f"Thingiverse file ID {thingi_file_id} could not get response from {url}"
157
- )
158
- # NOTE: the object is probably not missing, but the request failed
159
- return file_identifier, None
160
-
161
- # Check if the request was successful
162
- if response.status_code == 404:
163
- logger.warning(
164
- f"Thingiverse file ID {thingi_file_id} (404) could not find file"
165
- )
166
- if handle_missing_object is not None:
167
- handle_missing_object(
168
- file_identifier=file_identifier, sha256=expected_sha256, metadata={}
169
- )
170
- return file_identifier, None
171
-
172
- with tempfile.TemporaryDirectory() as temp_dir:
173
- temp_path = os.path.join(temp_dir, filename)
174
- temp_path_tmp = temp_path + ".tmp"
175
-
176
- with open(temp_path_tmp, "wb") as file:
177
- for chunk in response.iter_content(chunk_size=8192):
178
- file.write(chunk)
179
-
180
- # rename to temp_path
181
- os.rename(temp_path_tmp, temp_path)
182
-
183
- # check the sha256
184
- sha256 = get_file_hash(temp_path)
185
-
186
- if sha256 == expected_sha256:
187
- if handle_found_object is not None:
188
- handle_found_object(
189
- local_path=temp_path,
190
- file_identifier=file_identifier,
191
- sha256=sha256,
192
- metadata={},
193
- )
194
- else:
195
- if handle_modified_object is not None:
196
- handle_modified_object(
197
- local_path=temp_path,
198
- file_identifier=file_identifier,
199
- new_sha256=sha256,
200
- old_sha256=expected_sha256,
201
- metadata={},
202
- )
203
-
204
- if download_dir is not None:
205
- filename = os.path.join(download_dir, filename)
206
- fs, path = fsspec.core.url_to_fs(filename)
207
- fs.makedirs(os.path.dirname(path), exist_ok=True)
208
- fs.put(temp_path, path)
209
- else:
210
- path = None
211
-
212
- return file_identifier, path
213
-
214
- def _parallel_download_item(self, args):
215
- return self._download_item(*args)
216
-
217
- def get_file_id_from_file_identifier(self, file_identifier: str) -> str:
218
- """Get the thingiverse file ID from the Objaverse-XL file identifier.
219
-
220
- Args:
221
- file_identifier (str): The Objaverse-XL file identifier.
222
-
223
- Returns:
224
- str: The Thingiverse file ID.
225
- """
226
- return file_identifier.split("fileId=")[-1]
227
-
228
- def get_thing_id_from_file_identifier(self, file_identifier: str) -> str:
229
- """Get the thingiverse thing ID from the Objaverse-XL file identifier.
230
-
231
- Args:
232
- file_identifier (str): The Objaverse-XL file identifier.
233
-
234
- Returns:
235
- str: The Thingiverse thing ID.
236
- """
237
- return file_identifier.split("/")[-2].split(":")[1]
238
-
239
- def download_objects(
240
- self,
241
- objects: pd.DataFrame,
242
- download_dir: Optional[str] = "~/.objaverse",
243
- processes: Optional[int] = None,
244
- handle_found_object: Optional[Callable] = None,
245
- handle_modified_object: Optional[Callable] = None,
246
- handle_missing_object: Optional[Callable] = None,
247
- **kwargs,
248
- ) -> Dict[str, str]:
249
- """Download the objects from the given list of things and files.
250
-
251
- Args:
252
- objects (pd.DataFrame): Thingiverse objects to download. Must have columns
253
- for the object "fileIdentifier" and "sha256". Use the `get_annotations`
254
- function to get the metadata.
255
- download_dir (str, optional): The directory to save the files to. Supports
256
- all file systems supported by fsspec. Defaults to "~/.objaverse-xl".
257
- processes (int, optional): The number of processes to use. If None, maps to
258
- use all available CPUs using multiprocessing.cpu_count(). Defaults to
259
- None.
260
- handle_found_object (Optional[Callable], optional): Called when an object is
261
- successfully found and downloaded. Here, the object has the same sha256
262
- as the one that was downloaded with Objaverse-XL. If None, the object
263
- will be downloaded, but nothing will be done with it. Args for the
264
- function include:
265
- - local_path (str): Local path to the downloaded 3D object.
266
- - file_identifier (str): File identifier of the 3D object.
267
- - sha256 (str): SHA256 of the contents of the 3D object.
268
- - metadata (Dict[Hashable, Any]): Metadata about the 3D object,
269
- including the GitHub organization and repo names.
270
- Return is not used. Defaults to None.
271
- handle_modified_object (Optional[Callable], optional): Called when a
272
- modified object is found and downloaded. Here, the object is
273
- successfully downloaded, but it has a different sha256 than the one that
274
- was downloaded with Objaverse-XL. This is not expected to happen very
275
- often, because the same commit hash is used for each repo. If None, the
276
- object will be downloaded, but nothing will be done with it. Args for
277
- the function include:
278
- - local_path (str): Local path to the downloaded 3D object.
279
- - file_identifier (str): File identifier of the 3D object.
280
- - new_sha256 (str): SHA256 of the contents of the newly downloaded 3D
281
- object.
282
- - old_sha256 (str): Expected SHA256 of the contents of the 3D object as
283
- it was when it was downloaded with Objaverse-XL.
284
- - metadata (Dict[Hashable, Any]): Metadata about the 3D object, which is
285
- particular to the souce.
286
- Return is not used. Defaults to None.
287
- handle_missing_object (Optional[Callable], optional): Called when an object
288
- that is in Objaverse-XL is not found. Here, it is likely that the
289
- repository was deleted or renamed. If None, nothing will be done with
290
- the missing object.
291
- Args for the function include:
292
- - file_identifier (str): File identifier of the 3D object.
293
- - sha256 (str): SHA256 of the contents of the original 3D object.
294
- - metadata (Dict[Hashable, Any]): Metadata about the 3D object, which is
295
- particular to the source.
296
- Return is not used. Defaults to None.
297
-
298
- Returns:
299
- Dict[str, str]: A dictionary mapping from the fileIdentifier to the path of
300
- the downloaded file.
301
- """
302
- if processes is None:
303
- processes = multiprocessing.cpu_count()
304
-
305
- objects = objects.copy()
306
- objects["thingiFileId"] = objects["fileIdentifier"].apply(
307
- self.get_file_id_from_file_identifier
308
- )
309
- objects["thingiThingId"] = objects["fileIdentifier"].apply(
310
- self.get_thing_id_from_file_identifier
311
- )
312
-
313
- # create the download directory
314
- out = {}
315
- if download_dir is not None:
316
- download_dir = os.path.join(download_dir, "thingiverse")
317
- fs, path = fsspec.core.url_to_fs(download_dir)
318
- fs.makedirs(path, exist_ok=True)
319
-
320
- # check to filter out files that already exist
321
- existing_files = fs.glob(os.path.join(download_dir, "*.stl"), refresh=True)
322
- existing_file_ids = {
323
- os.path.basename(file).split(".")[0].split("-")[-1]
324
- for file in existing_files
325
- }
326
-
327
- # filter out existing files
328
- items_to_download = []
329
- already_downloaded_count = 0
330
- for _, item in objects.iterrows():
331
- if item["thingiFileId"] in existing_file_ids:
332
- already_downloaded_count += 1
333
- out[item["fileIdentifier"]] = os.path.join(
334
- os.path.expanduser(download_dir),
335
- f"thing-{item['thingiThingId']}-file-{item['thingiFileId']}.stl",
336
- )
337
- else:
338
- items_to_download.append(item)
339
-
340
- logger.info(
341
- f"Found {already_downloaded_count} Thingiverse objects downloaded"
342
- )
343
- else:
344
- items_to_download = [item for _, item in objects.iterrows()]
345
-
346
- logger.info(
347
- f"Downloading {len(items_to_download)} Thingiverse objects with {processes=}"
348
- )
349
- if len(items_to_download) == 0:
350
- return out
351
-
352
- # download the files
353
- args = [
354
- (
355
- item["thingiFileId"],
356
- item["thingiThingId"],
357
- item["fileIdentifier"],
358
- download_dir,
359
- item["sha256"],
360
- handle_found_object,
361
- handle_modified_object,
362
- handle_missing_object,
363
- )
364
- for item in items_to_download
365
- ]
366
-
367
- with Pool(processes=processes) as pool:
368
- results = list(
369
- tqdm(
370
- pool.imap_unordered(self._parallel_download_item, args),
371
- total=len(args),
372
- desc="Downloading Thingiverse Objects",
373
- )
374
- )
375
-
376
- for file_identifier, download_path in results:
377
- if download_path is not None:
378
- out[file_identifier] = download_path
379
-
380
- return out
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
objaverse_xl/utils.py DELETED
@@ -1,45 +0,0 @@
1
- """Utility functions for the objaverse_xl package."""
2
-
3
- import hashlib
4
- import os
5
- import uuid
6
-
7
-
8
- def get_uid_from_str(string: str) -> str:
9
- """Generates a UUID from a string.
10
-
11
- Args:
12
- string (str): String to generate a UUID from.
13
-
14
- Returns:
15
- str: UUID generated from the string.
16
- """
17
- namespace = uuid.NAMESPACE_DNS
18
- return str(uuid.uuid5(namespace, string))
19
-
20
-
21
- def get_file_hash(file_path: str) -> str:
22
- """Get the sha256 hash of a file.
23
-
24
- Args:
25
- file_path (str): Path to the file.
26
-
27
- Returns:
28
- str: sha256 hash of the file.
29
- """
30
- # Check if the path is a symbolic link
31
- if os.path.islink(file_path):
32
- # Resolve the symbolic link
33
- resolved_path = os.readlink(file_path)
34
- # Check if the resolved path exists
35
- if not os.path.exists(resolved_path):
36
- raise FileNotFoundError(
37
- f"The symbolic link points to a file that doesn't exist: {resolved_path}"
38
- )
39
- sha256 = hashlib.sha256()
40
- # Read the file from the path
41
- with open(file_path, "rb") as f:
42
- # Loop till the end of the file
43
- for byte_block in iter(lambda: f.read(4096), b""):
44
- sha256.update(byte_block)
45
- return sha256.hexdigest()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
requirements-test.txt DELETED
@@ -1,8 +0,0 @@
1
- black==22.3.0
2
- mypy==0.942
3
- pylint==2.13.4
4
- pytest-cov==3.0.0
5
- pytest-xdist==2.5.0
6
- pytest==7.0.1
7
- isort==5.12.0
8
- types-requests==2.31.0.2
 
 
 
 
 
 
 
 
 
requirements.txt DELETED
@@ -1,7 +0,0 @@
1
- requests
2
- pandas
3
- pyarrow
4
- tqdm
5
- loguru
6
- fsspec>=2022.11.0
7
- gputil==1.4.0
 
 
 
 
 
 
 
 
scripts/rendering/README.md DELETED
@@ -1,149 +0,0 @@
1
- # 🪐 Objaverse-XL Rendering Script
2
-
3
- ![Blender generated with MidJourney](https://github.com/allenai/objaverse-xl/assets/28768645/69064f78-a752-40d6-bd36-ea7c15ffa1ec)
4
-
5
- Scripts for rendering Objaverse-XL with [Blender](https://www.blender.org/). Rendering is the process of taking pictures of the 3D objects. These images can then be used for training AI models.
6
-
7
- ## 🖥️ Setup
8
-
9
- 1. Clone the repository and enter the rendering directory:
10
-
11
- ```bash
12
- git clone https://github.com/allenai/objaverse-xl.git && \
13
- cd objaverse-xl/scripts/rendering
14
- ```
15
-
16
- 2. Download Blender:
17
-
18
- ```bash
19
- wget https://download.blender.org/release/Blender3.2/blender-3.2.2-linux-x64.tar.xz && \
20
- tar -xf blender-3.2.2-linux-x64.tar.xz && \
21
- rm blender-3.2.2-linux-x64.tar.xz
22
- ```
23
-
24
- 3. If you're on a headless Linux server, install Xorg and start it:
25
-
26
- ```bash
27
- sudo apt-get install xserver-xorg -y && \
28
- sudo python3 start_x_server.py start
29
- ```
30
-
31
- 4. Install the Python dependencies. Note that Python >3.8 is required:
32
-
33
- ```bash
34
- cd ../.. && \
35
- pip install -r requirements.txt && \
36
- pip install -e . && \
37
- cd scripts/rendering
38
- ```
39
-
40
- ## 📸 Usage
41
-
42
- ### 🐥 Minimal Example
43
-
44
- After setup, we can start to render objects using the `main.py` script:
45
-
46
- ```bash
47
- python3 main.py
48
- ```
49
-
50
- After running this, you should see 10 zip files located in `~/.objaverse/github/renders`. Each zip file corresponds to the rendering of a unique object, in this case from [our example 3D objects repo](https://github.com/mattdeitke/objaverse-xl-test-files):
51
-
52
- ```bash
53
- > ls ~/.objaverse/github/renders
54
- 0fde27a0-99f0-5029-8e20-be9b8ecabb59.zip 54f7478b-4983-5541-8cf7-1ab2e39a842e.zip 93499b75-3ee0-5069-8f4b-1bab60d2e6d6.zip
55
- 21dd4d7b-b203-5d00-b325-0c041f43524e.zip 5babbc61-d4e1-5b5c-9b47-44994bbf958e.zip ab30e24f-1046-5257-8806-2e346f4efebe.zip
56
- 415ca2d5-9d87-568c-a5ff-73048a084229.zip 5f6d2547-3661-54d5-9895-bebc342c753d.zip
57
- 44414a2a-e8f0-5a5f-bb58-6be50d8fd034.zip 8a170083-0529-547f-90ec-ebc32eafe594.zip
58
- ```
59
-
60
- If we unzip one of the zip files:
61
-
62
- ```bash
63
- > cd ~/.objaverse/github/renders
64
- > unzip 0fde27a0-99f0-5029-8e20-be9b8ecabb59.zip
65
- ```
66
-
67
- we will see that there is a new `0fde27a0-99f0-5029-8e20-be9b8ecabb59` directory. If we look in that directory, we'll find the following files:
68
-
69
- ```bash
70
- > ls 0fde27a0-99f0-5029-8e20-be9b8ecabb59
71
- 000.npy 001.npy 002.npy 003.npy 004.npy 005.npy 006.npy 007.npy 008.npy 009.npy 010.npy 011.npy metadata.json
72
- 000.png 001.png 002.png 003.png 004.png 005.png 006.png 007.png 008.png 009.png 010.png 011.png
73
- ```
74
-
75
- Here, we see that there are 12 renders `[000-011].png`. Each render will look something like one of the 4 images shown below, but likely with the camera at a different location as its location is randomized during rendering:
76
-
77
- ![temp](https://github.com/allenai/objaverse-xl/assets/28768645/69d79e26-4df1-4bd2-854c-7d3c888adae7)
78
-
79
- Additionally, there are 12 npy files `[000-011].npy`, which include information about the camera's pose for a given render. We can read the npy files using:
80
-
81
- ```python
82
- import numpy as np
83
- array = np.load("000.npy")
84
- ```
85
-
86
- where array is now a 3x4 [camera matrix](https://en.wikipedia.org/wiki/Camera_matrix) that looks something like:
87
-
88
- ```python
89
- array([[6.07966840e-01, 7.93962419e-01, 3.18103019e-08, 2.10451518e-07],
90
- [4.75670159e-01, -3.64238620e-01, 8.00667346e-01, -5.96046448e-08],
91
- [6.35699809e-01, -4.86779213e-01, -5.99109232e-01, -1.66008198e+00]])
92
- ```
93
-
94
- Finally, we also have a `metadata.json` file, which contains metadata about the object and scene:
95
-
96
- ```json
97
- {
98
- "animation_count": 0,
99
- "armature_count": 0,
100
- "edge_count": 2492,
101
- "file_identifier": "https://github.com/mattdeitke/objaverse-xl-test-files/blob/ead0bed6a76012452273bbe18d12e4d68a881956/example.abc",
102
- "file_size": 108916,
103
- "lamp_count": 1,
104
- "linked_files": [],
105
- "material_count": 0,
106
- "mesh_count": 3,
107
- "missing_textures": {
108
- "count": 0,
109
- "file_path_to_color": {},
110
- "files": []
111
- },
112
- "object_count": 8,
113
- "poly_count": 984,
114
- "random_color": null,
115
- "save_uid": "0fde27a0-99f0-5029-8e20-be9b8ecabb59",
116
- "scene_size": {
117
- "bbox_max": [
118
- 4.999998569488525,
119
- 6.0,
120
- 1.0
121
- ],
122
- "bbox_min": [
123
- -4.999995231628418,
124
- -6.0,
125
- -1.0
126
- ]
127
- },
128
- "sha256": "879bc9d2d85e4f3866f0cfef41f5236f9fff5f973380461af9f69cdbed53a0da",
129
- "shape_key_count": 0,
130
- "vert_count": 2032
131
- }
132
- ```
133
-
134
- ### 🎛 Configuration
135
-
136
- ### 🧑‍🔬️ Experimental Features
137
-
138
- USDZ support is experimental. Since Blender does not natively support usdz, we use [this Blender addon](https://github.com/robmcrosby/BlenderUSDZ), but it doesn't work with all types of USDZs. If you have a better solution, PRs are very much welcome 😄!
139
-
140
- ## 👋 Our Team
141
-
142
- Objaverse-XL is an open-source project managed by the [PRIOR team](//prior.allenai.org) at the [Allen Institute for AI](//allenai.org) (AI2).
143
- AI2 is a non-profit institute with the mission to contribute to humanity through high-impact AI research and engineering.
144
-
145
- <br />
146
-
147
- <a href="//prior.allenai.org">
148
- <p align="center"><img width="100%" src="https://raw.githubusercontent.com/allenai/ai2thor/main/doc/static/ai2-prior.svg" /></p>
149
- </a>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
scripts/rendering/blender_script.py DELETED
@@ -1,899 +0,0 @@
1
- """Blender script to render images of 3D models."""
2
-
3
- import argparse
4
- import json
5
- import math
6
- import os
7
- import random
8
- import sys
9
- from typing import Any, Callable, Dict, Generator, List, Literal, Optional, Set, Tuple
10
-
11
- import bpy
12
- import numpy as np
13
- from mathutils import Matrix, Vector
14
-
15
- IMPORT_FUNCTIONS: Dict[str, Callable] = {
16
- "obj": bpy.ops.import_scene.obj,
17
- "glb": bpy.ops.import_scene.gltf,
18
- "gltf": bpy.ops.import_scene.gltf,
19
- "usd": bpy.ops.import_scene.usd,
20
- "fbx": bpy.ops.import_scene.fbx,
21
- "stl": bpy.ops.import_mesh.stl,
22
- "usda": bpy.ops.import_scene.usda,
23
- "dae": bpy.ops.wm.collada_import,
24
- "ply": bpy.ops.import_mesh.ply,
25
- "abc": bpy.ops.wm.alembic_import,
26
- "blend": bpy.ops.wm.append,
27
- }
28
-
29
-
30
- def reset_cameras() -> None:
31
- """Resets the cameras in the scene to a single default camera."""
32
- # Delete all existing cameras
33
- bpy.ops.object.select_all(action="DESELECT")
34
- bpy.ops.object.select_by_type(type="CAMERA")
35
- bpy.ops.object.delete()
36
-
37
- # Create a new camera with default properties
38
- bpy.ops.object.camera_add()
39
-
40
- # Rename the new camera to 'NewDefaultCamera'
41
- new_camera = bpy.context.active_object
42
- new_camera.name = "Camera"
43
-
44
- # Set the new camera as the active camera for the scene
45
- scene.camera = new_camera
46
-
47
-
48
- def sample_point_on_sphere(radius: float) -> Tuple[float, float, float]:
49
- """Samples a point on a sphere with the given radius.
50
-
51
- Args:
52
- radius (float): Radius of the sphere.
53
-
54
- Returns:
55
- Tuple[float, float, float]: A point on the sphere.
56
- """
57
- theta = random.random() * 2 * math.pi
58
- phi = math.acos(2 * random.random() - 1)
59
- return (
60
- radius * math.sin(phi) * math.cos(theta),
61
- radius * math.sin(phi) * math.sin(theta),
62
- radius * math.cos(phi),
63
- )
64
-
65
-
66
- def _sample_spherical(
67
- radius_min: float = 1.5,
68
- radius_max: float = 2.0,
69
- maxz: float = 1.6,
70
- minz: float = -0.75,
71
- ) -> np.ndarray:
72
- """Sample a random point in a spherical shell.
73
-
74
- Args:
75
- radius_min (float): Minimum radius of the spherical shell.
76
- radius_max (float): Maximum radius of the spherical shell.
77
- maxz (float): Maximum z value of the spherical shell.
78
- minz (float): Minimum z value of the spherical shell.
79
-
80
- Returns:
81
- np.ndarray: A random (x, y, z) point in the spherical shell.
82
- """
83
- correct = False
84
- vec = np.array([0, 0, 0])
85
- while not correct:
86
- vec = np.random.uniform(-1, 1, 3)
87
- # vec[2] = np.abs(vec[2])
88
- radius = np.random.uniform(radius_min, radius_max, 1)
89
- vec = vec / np.linalg.norm(vec, axis=0) * radius[0]
90
- if maxz > vec[2] > minz:
91
- correct = True
92
- return vec
93
-
94
-
95
- def randomize_camera(
96
- radius_min: float = 1.5,
97
- radius_max: float = 2.2,
98
- maxz: float = 2.2,
99
- minz: float = -2.2,
100
- only_northern_hemisphere: bool = False,
101
- ) -> bpy.types.Object:
102
- """Randomizes the camera location and rotation inside of a spherical shell.
103
-
104
- Args:
105
- radius_min (float, optional): Minimum radius of the spherical shell. Defaults to
106
- 1.5.
107
- radius_max (float, optional): Maximum radius of the spherical shell. Defaults to
108
- 2.0.
109
- maxz (float, optional): Maximum z value of the spherical shell. Defaults to 1.6.
110
- minz (float, optional): Minimum z value of the spherical shell. Defaults to
111
- -0.75.
112
- only_northern_hemisphere (bool, optional): Whether to only sample points in the
113
- northern hemisphere. Defaults to False.
114
-
115
- Returns:
116
- bpy.types.Object: The camera object.
117
- """
118
-
119
- x, y, z = _sample_spherical(
120
- radius_min=radius_min, radius_max=radius_max, maxz=maxz, minz=minz
121
- )
122
- camera = bpy.data.objects["Camera"]
123
-
124
- # only positive z
125
- if only_northern_hemisphere:
126
- z = abs(z)
127
-
128
- camera.location = Vector(np.array([x, y, z]))
129
-
130
- direction = -camera.location
131
- rot_quat = direction.to_track_quat("-Z", "Y")
132
- camera.rotation_euler = rot_quat.to_euler()
133
-
134
- return camera
135
-
136
-
137
- def _set_camera_at_size(i: int, scale: float = 1.5) -> bpy.types.Object:
138
- """Debugging function to set the camera on the 6 faces of a cube.
139
-
140
- Args:
141
- i (int): Index of the face of the cube.
142
- scale (float, optional): Scale of the cube. Defaults to 1.5.
143
-
144
- Returns:
145
- bpy.types.Object: The camera object.
146
- """
147
- if i == 0:
148
- x, y, z = scale, 0, 0
149
- elif i == 1:
150
- x, y, z = -scale, 0, 0
151
- elif i == 2:
152
- x, y, z = 0, scale, 0
153
- elif i == 3:
154
- x, y, z = 0, -scale, 0
155
- elif i == 4:
156
- x, y, z = 0, 0, scale
157
- elif i == 5:
158
- x, y, z = 0, 0, -scale
159
- else:
160
- raise ValueError(f"Invalid index: i={i}, must be int in range [0, 5].")
161
- camera = bpy.data.objects["Camera"]
162
- camera.location = Vector(np.array([x, y, z]))
163
- direction = -camera.location
164
- rot_quat = direction.to_track_quat("-Z", "Y")
165
- camera.rotation_euler = rot_quat.to_euler()
166
- return camera
167
-
168
-
169
- def _create_light(
170
- name: str,
171
- light_type: Literal["POINT", "SUN", "SPOT", "AREA"],
172
- location: Tuple[float, float, float],
173
- rotation: Tuple[float, float, float],
174
- energy: float,
175
- use_shadow: bool = False,
176
- specular_factor: float = 1.0,
177
- ):
178
- """Creates a light object.
179
-
180
- Args:
181
- name (str): Name of the light object.
182
- light_type (Literal["POINT", "SUN", "SPOT", "AREA"]): Type of the light.
183
- location (Tuple[float, float, float]): Location of the light.
184
- rotation (Tuple[float, float, float]): Rotation of the light.
185
- energy (float): Energy of the light.
186
- use_shadow (bool, optional): Whether to use shadows. Defaults to False.
187
- specular_factor (float, optional): Specular factor of the light. Defaults to 1.0.
188
-
189
- Returns:
190
- bpy.types.Object: The light object.
191
- """
192
-
193
- light_data = bpy.data.lights.new(name=name, type=light_type)
194
- light_object = bpy.data.objects.new(name, light_data)
195
- bpy.context.collection.objects.link(light_object)
196
- light_object.location = location
197
- light_object.rotation_euler = rotation
198
- light_data.use_shadow = use_shadow
199
- light_data.specular_factor = specular_factor
200
- light_data.energy = energy
201
- return light_object
202
-
203
-
204
- def randomize_lighting() -> Dict[str, bpy.types.Object]:
205
- """Randomizes the lighting in the scene.
206
-
207
- Returns:
208
- Dict[str, bpy.types.Object]: Dictionary of the lights in the scene. The keys are
209
- "key_light", "fill_light", "rim_light", and "bottom_light".
210
- """
211
-
212
- # Clear existing lights
213
- bpy.ops.object.select_all(action="DESELECT")
214
- bpy.ops.object.select_by_type(type="LIGHT")
215
- bpy.ops.object.delete()
216
-
217
- # Create key light
218
- key_light = _create_light(
219
- name="Key_Light",
220
- light_type="SUN",
221
- location=(0, 0, 0),
222
- rotation=(0.785398, 0, -0.785398),
223
- energy=random.choice([3, 4, 5]),
224
- )
225
-
226
- # Create fill light
227
- fill_light = _create_light(
228
- name="Fill_Light",
229
- light_type="SUN",
230
- location=(0, 0, 0),
231
- rotation=(0.785398, 0, 2.35619),
232
- energy=random.choice([2, 3, 4]),
233
- )
234
-
235
- # Create rim light
236
- rim_light = _create_light(
237
- name="Rim_Light",
238
- light_type="SUN",
239
- location=(0, 0, 0),
240
- rotation=(-0.785398, 0, -3.92699),
241
- energy=random.choice([3, 4, 5]),
242
- )
243
-
244
- # Create bottom light
245
- bottom_light = _create_light(
246
- name="Bottom_Light",
247
- light_type="SUN",
248
- location=(0, 0, 0),
249
- rotation=(3.14159, 0, 0),
250
- energy=random.choice([1, 2, 3]),
251
- )
252
-
253
- return dict(
254
- key_light=key_light,
255
- fill_light=fill_light,
256
- rim_light=rim_light,
257
- bottom_light=bottom_light,
258
- )
259
-
260
-
261
- def reset_scene() -> None:
262
- """Resets the scene to a clean state.
263
-
264
- Returns:
265
- None
266
- """
267
- # delete everything that isn't part of a camera or a light
268
- for obj in bpy.data.objects:
269
- if obj.type not in {"CAMERA", "LIGHT"}:
270
- bpy.data.objects.remove(obj, do_unlink=True)
271
-
272
- # delete all the materials
273
- for material in bpy.data.materials:
274
- bpy.data.materials.remove(material, do_unlink=True)
275
-
276
- # delete all the textures
277
- for texture in bpy.data.textures:
278
- bpy.data.textures.remove(texture, do_unlink=True)
279
-
280
- # delete all the images
281
- for image in bpy.data.images:
282
- bpy.data.images.remove(image, do_unlink=True)
283
-
284
-
285
- def load_object(object_path: str) -> None:
286
- """Loads a model with a supported file extension into the scene.
287
-
288
- Args:
289
- object_path (str): Path to the model file.
290
-
291
- Raises:
292
- ValueError: If the file extension is not supported.
293
-
294
- Returns:
295
- None
296
- """
297
- file_extension = object_path.split(".")[-1].lower()
298
- if file_extension is None:
299
- raise ValueError(f"Unsupported file type: {object_path}")
300
-
301
- if file_extension == "usdz":
302
- # install usdz io package
303
- dirname = os.path.dirname(os.path.realpath(__file__))
304
- usdz_package = os.path.join(dirname, "io_scene_usdz.zip")
305
- bpy.ops.preferences.addon_install(filepath=usdz_package)
306
- # enable it
307
- addon_name = "io_scene_usdz"
308
- bpy.ops.preferences.addon_enable(module=addon_name)
309
- # import the usdz
310
- from io_scene_usdz.import_usdz import import_usdz
311
-
312
- import_usdz(context, filepath=object_path, materials=True, animations=True)
313
- return None
314
-
315
- # load from existing import functions
316
- import_function = IMPORT_FUNCTIONS[file_extension]
317
-
318
- if file_extension == "blend":
319
- import_function(directory=object_path, link=False)
320
- elif file_extension in {"glb", "gltf"}:
321
- import_function(filepath=object_path, merge_vertices=True)
322
- else:
323
- import_function(filepath=object_path)
324
-
325
-
326
- def scene_bbox(
327
- single_obj: Optional[bpy.types.Object] = None, ignore_matrix: bool = False
328
- ) -> Tuple[Vector, Vector]:
329
- """Returns the bounding box of the scene.
330
-
331
- Taken from Shap-E rendering script
332
- (https://github.com/openai/shap-e/blob/main/shap_e/rendering/blender/blender_script.py#L68-L82)
333
-
334
- Args:
335
- single_obj (Optional[bpy.types.Object], optional): If not None, only computes
336
- the bounding box for the given object. Defaults to None.
337
- ignore_matrix (bool, optional): Whether to ignore the object's matrix. Defaults
338
- to False.
339
-
340
- Raises:
341
- RuntimeError: If there are no objects in the scene.
342
-
343
- Returns:
344
- Tuple[Vector, Vector]: The minimum and maximum coordinates of the bounding box.
345
- """
346
- bbox_min = (math.inf,) * 3
347
- bbox_max = (-math.inf,) * 3
348
- found = False
349
- for obj in get_scene_meshes() if single_obj is None else [single_obj]:
350
- found = True
351
- for coord in obj.bound_box:
352
- coord = Vector(coord)
353
- if not ignore_matrix:
354
- coord = obj.matrix_world @ coord
355
- bbox_min = tuple(min(x, y) for x, y in zip(bbox_min, coord))
356
- bbox_max = tuple(max(x, y) for x, y in zip(bbox_max, coord))
357
-
358
- if not found:
359
- raise RuntimeError("no objects in scene to compute bounding box for")
360
-
361
- return Vector(bbox_min), Vector(bbox_max)
362
-
363
-
364
- def get_scene_root_objects() -> Generator[bpy.types.Object, None, None]:
365
- """Returns all root objects in the scene.
366
-
367
- Yields:
368
- Generator[bpy.types.Object, None, None]: Generator of all root objects in the
369
- scene.
370
- """
371
- for obj in bpy.context.scene.objects.values():
372
- if not obj.parent:
373
- yield obj
374
-
375
-
376
- def get_scene_meshes() -> Generator[bpy.types.Object, None, None]:
377
- """Returns all meshes in the scene.
378
-
379
- Yields:
380
- Generator[bpy.types.Object, None, None]: Generator of all meshes in the scene.
381
- """
382
- for obj in bpy.context.scene.objects.values():
383
- if isinstance(obj.data, (bpy.types.Mesh)):
384
- yield obj
385
-
386
-
387
- def get_3x4_RT_matrix_from_blender(cam: bpy.types.Object) -> Matrix:
388
- """Returns the 3x4 RT matrix from the given camera.
389
-
390
- Taken from Zero123, which in turn was taken from
391
- https://github.com/panmari/stanford-shapenet-renderer/blob/master/render_blender.py
392
-
393
- Args:
394
- cam (bpy.types.Object): The camera object.
395
-
396
- Returns:
397
- Matrix: The 3x4 RT matrix from the given camera.
398
- """
399
- # Use matrix_world instead to account for all constraints
400
- location, rotation = cam.matrix_world.decompose()[0:2]
401
- R_world2bcam = rotation.to_matrix().transposed()
402
-
403
- # Use location from matrix_world to account for constraints:
404
- T_world2bcam = -1 * R_world2bcam @ location
405
-
406
- # put into 3x4 matrix
407
- RT = Matrix(
408
- (
409
- R_world2bcam[0][:] + (T_world2bcam[0],),
410
- R_world2bcam[1][:] + (T_world2bcam[1],),
411
- R_world2bcam[2][:] + (T_world2bcam[2],),
412
- )
413
- )
414
- return RT
415
-
416
-
417
- def delete_invisible_objects() -> None:
418
- """Deletes all invisible objects in the scene.
419
-
420
- Returns:
421
- None
422
- """
423
- bpy.ops.object.select_all(action="DESELECT")
424
- for obj in scene.objects:
425
- if obj.hide_viewport or obj.hide_render:
426
- obj.hide_viewport = False
427
- obj.hide_render = False
428
- obj.hide_select = False
429
- obj.select_set(True)
430
- bpy.ops.object.delete()
431
-
432
- # Delete invisible collections
433
- invisible_collections = [col for col in bpy.data.collections if col.hide_viewport]
434
- for col in invisible_collections:
435
- bpy.data.collections.remove(col)
436
-
437
-
438
- def normalize_scene() -> None:
439
- """Normalizes the scene by scaling and translating it to fit in a unit cube centered
440
- at the origin.
441
-
442
- Mostly taken from the Point-E / Shap-E rendering script
443
- (https://github.com/openai/point-e/blob/main/point_e/evals/scripts/blender_script.py#L97-L112),
444
- but fix for multiple root objects: (see bug report here:
445
- https://github.com/openai/shap-e/pull/60).
446
-
447
- Returns:
448
- None
449
- """
450
- if len(list(get_scene_root_objects())) > 1:
451
- # create an empty object to be used as a parent for all root objects
452
- parent_empty = bpy.data.objects.new("ParentEmpty", None)
453
- bpy.context.scene.collection.objects.link(parent_empty)
454
-
455
- # parent all root objects to the empty object
456
- for obj in get_scene_root_objects():
457
- if obj != parent_empty:
458
- obj.parent = parent_empty
459
-
460
- bbox_min, bbox_max = scene_bbox()
461
- scale = 1 / max(bbox_max - bbox_min)
462
- for obj in get_scene_root_objects():
463
- obj.scale = obj.scale * scale
464
-
465
- # Apply scale to matrix_world.
466
- bpy.context.view_layer.update()
467
- bbox_min, bbox_max = scene_bbox()
468
- offset = -(bbox_min + bbox_max) / 2
469
- for obj in get_scene_root_objects():
470
- obj.matrix_world.translation += offset
471
- bpy.ops.object.select_all(action="DESELECT")
472
-
473
- # unparent the camera
474
- bpy.data.objects["Camera"].parent = None
475
-
476
-
477
- def delete_missing_textures() -> Dict[str, Any]:
478
- """Deletes all missing textures in the scene.
479
-
480
- Returns:
481
- Dict[str, Any]: Dictionary with keys "count", "files", and "file_path_to_color".
482
- "count" is the number of missing textures, "files" is a list of the missing
483
- texture file paths, and "file_path_to_color" is a dictionary mapping the
484
- missing texture file paths to a random color.
485
- """
486
- missing_file_count = 0
487
- out_files = []
488
- file_path_to_color = {}
489
-
490
- # Check all materials in the scene
491
- for material in bpy.data.materials:
492
- if material.use_nodes:
493
- for node in material.node_tree.nodes:
494
- if node.type == "TEX_IMAGE":
495
- image = node.image
496
- if image is not None:
497
- file_path = bpy.path.abspath(image.filepath)
498
- if file_path == "":
499
- # means it's embedded
500
- continue
501
-
502
- if not os.path.exists(file_path):
503
- # Find the connected Principled BSDF node
504
- connected_node = node.outputs[0].links[0].to_node
505
-
506
- if connected_node.type == "BSDF_PRINCIPLED":
507
- if file_path not in file_path_to_color:
508
- # Set a random color for the unique missing file path
509
- random_color = [random.random() for _ in range(3)]
510
- file_path_to_color[file_path] = random_color + [1]
511
-
512
- connected_node.inputs[
513
- "Base Color"
514
- ].default_value = file_path_to_color[file_path]
515
-
516
- # Delete the TEX_IMAGE node
517
- material.node_tree.nodes.remove(node)
518
- missing_file_count += 1
519
- out_files.append(image.filepath)
520
- return {
521
- "count": missing_file_count,
522
- "files": out_files,
523
- "file_path_to_color": file_path_to_color,
524
- }
525
-
526
-
527
- def _get_random_color() -> Tuple[float, float, float, float]:
528
- """Generates a random RGB-A color.
529
-
530
- The alpha value is always 1.
531
-
532
- Returns:
533
- Tuple[float, float, float, float]: A random RGB-A color. Each value is in the
534
- range [0, 1].
535
- """
536
- return (random.random(), random.random(), random.random(), 1)
537
-
538
-
539
- def _apply_color_to_object(
540
- obj: bpy.types.Object, color: Tuple[float, float, float, float]
541
- ) -> None:
542
- """Applies the given color to the object.
543
-
544
- Args:
545
- obj (bpy.types.Object): The object to apply the color to.
546
- color (Tuple[float, float, float, float]): The color to apply to the object.
547
-
548
- Returns:
549
- None
550
- """
551
- mat = bpy.data.materials.new(name=f"RandomMaterial_{obj.name}")
552
- mat.use_nodes = True
553
- nodes = mat.node_tree.nodes
554
- principled_bsdf = nodes.get("Principled BSDF")
555
- if principled_bsdf:
556
- principled_bsdf.inputs["Base Color"].default_value = color
557
- obj.data.materials.append(mat)
558
-
559
-
560
- def apply_single_random_color_to_all_objects() -> Tuple[float, float, float, float]:
561
- """Applies a single random color to all objects in the scene.
562
-
563
- Returns:
564
- Tuple[float, float, float, float]: The random color that was applied to all
565
- objects.
566
- """
567
- rand_color = _get_random_color()
568
- for obj in bpy.context.scene.objects:
569
- if obj.type == "MESH":
570
- _apply_color_to_object(obj, rand_color)
571
- return rand_color
572
-
573
-
574
- class MetadataExtractor:
575
- """Class to extract metadata from a Blender scene."""
576
-
577
- def __init__(
578
- self, object_path: str, scene: bpy.types.Scene, bdata: bpy.types.BlendData
579
- ) -> None:
580
- """Initializes the MetadataExtractor.
581
-
582
- Args:
583
- object_path (str): Path to the object file.
584
- scene (bpy.types.Scene): The current scene object from `bpy.context.scene`.
585
- bdata (bpy.types.BlendData): The current blender data from `bpy.data`.
586
-
587
- Returns:
588
- None
589
- """
590
- self.object_path = object_path
591
- self.scene = scene
592
- self.bdata = bdata
593
-
594
- def get_poly_count(self) -> int:
595
- """Returns the total number of polygons in the scene."""
596
- total_poly_count = 0
597
- for obj in self.scene.objects:
598
- if obj.type == "MESH":
599
- total_poly_count += len(obj.data.polygons)
600
- return total_poly_count
601
-
602
- def get_vertex_count(self) -> int:
603
- """Returns the total number of vertices in the scene."""
604
- total_vertex_count = 0
605
- for obj in self.scene.objects:
606
- if obj.type == "MESH":
607
- total_vertex_count += len(obj.data.vertices)
608
- return total_vertex_count
609
-
610
- def get_edge_count(self) -> int:
611
- """Returns the total number of edges in the scene."""
612
- total_edge_count = 0
613
- for obj in self.scene.objects:
614
- if obj.type == "MESH":
615
- total_edge_count += len(obj.data.edges)
616
- return total_edge_count
617
-
618
- def get_lamp_count(self) -> int:
619
- """Returns the number of lamps in the scene."""
620
- return sum(1 for obj in self.scene.objects if obj.type == "LIGHT")
621
-
622
- def get_mesh_count(self) -> int:
623
- """Returns the number of meshes in the scene."""
624
- return sum(1 for obj in self.scene.objects if obj.type == "MESH")
625
-
626
- def get_material_count(self) -> int:
627
- """Returns the number of materials in the scene."""
628
- return len(self.bdata.materials)
629
-
630
- def get_object_count(self) -> int:
631
- """Returns the number of objects in the scene."""
632
- return len(self.bdata.objects)
633
-
634
- def get_animation_count(self) -> int:
635
- """Returns the number of animations in the scene."""
636
- return len(self.bdata.actions)
637
-
638
- def get_linked_files(self) -> List[str]:
639
- """Returns the filepaths of all linked files."""
640
- image_filepaths = self._get_image_filepaths()
641
- material_filepaths = self._get_material_filepaths()
642
- linked_libraries_filepaths = self._get_linked_libraries_filepaths()
643
-
644
- all_filepaths = (
645
- image_filepaths | material_filepaths | linked_libraries_filepaths
646
- )
647
- if "" in all_filepaths:
648
- all_filepaths.remove("")
649
- return list(all_filepaths)
650
-
651
- def _get_image_filepaths(self) -> Set[str]:
652
- """Returns the filepaths of all images used in the scene."""
653
- filepaths = set()
654
- for image in self.bdata.images:
655
- if image.source == "FILE":
656
- filepaths.add(bpy.path.abspath(image.filepath))
657
- return filepaths
658
-
659
- def _get_material_filepaths(self) -> Set[str]:
660
- """Returns the filepaths of all images used in materials."""
661
- filepaths = set()
662
- for material in self.bdata.materials:
663
- if material.use_nodes:
664
- for node in material.node_tree.nodes:
665
- if node.type == "TEX_IMAGE":
666
- image = node.image
667
- if image is not None:
668
- filepaths.add(bpy.path.abspath(image.filepath))
669
- return filepaths
670
-
671
- def _get_linked_libraries_filepaths(self) -> Set[str]:
672
- """Returns the filepaths of all linked libraries."""
673
- filepaths = set()
674
- for library in self.bdata.libraries:
675
- filepaths.add(bpy.path.abspath(library.filepath))
676
- return filepaths
677
-
678
- def get_scene_size(self) -> Dict[str, list]:
679
- """Returns the size of the scene bounds in meters."""
680
- bbox_min, bbox_max = scene_bbox()
681
- return {"bbox_max": list(bbox_max), "bbox_min": list(bbox_min)}
682
-
683
- def get_shape_key_count(self) -> int:
684
- """Returns the number of shape keys in the scene."""
685
- total_shape_key_count = 0
686
- for obj in self.scene.objects:
687
- if obj.type == "MESH":
688
- shape_keys = obj.data.shape_keys
689
- if shape_keys is not None:
690
- total_shape_key_count += (
691
- len(shape_keys.key_blocks) - 1
692
- ) # Subtract 1 to exclude the Basis shape key
693
- return total_shape_key_count
694
-
695
- def get_armature_count(self) -> int:
696
- """Returns the number of armatures in the scene."""
697
- total_armature_count = 0
698
- for obj in self.scene.objects:
699
- if obj.type == "ARMATURE":
700
- total_armature_count += 1
701
- return total_armature_count
702
-
703
- def read_file_size(self) -> int:
704
- """Returns the size of the file in bytes."""
705
- return os.path.getsize(self.object_path)
706
-
707
- def get_metadata(self) -> Dict[str, Any]:
708
- """Returns the metadata of the scene.
709
-
710
- Returns:
711
- Dict[str, Any]: Dictionary of the metadata with keys for "file_size",
712
- "poly_count", "vert_count", "edge_count", "material_count", "object_count",
713
- "lamp_count", "mesh_count", "animation_count", "linked_files", "scene_size",
714
- "shape_key_count", and "armature_count".
715
- """
716
- return {
717
- "file_size": self.read_file_size(),
718
- "poly_count": self.get_poly_count(),
719
- "vert_count": self.get_vertex_count(),
720
- "edge_count": self.get_edge_count(),
721
- "material_count": self.get_material_count(),
722
- "object_count": self.get_object_count(),
723
- "lamp_count": self.get_lamp_count(),
724
- "mesh_count": self.get_mesh_count(),
725
- "animation_count": self.get_animation_count(),
726
- "linked_files": self.get_linked_files(),
727
- "scene_size": self.get_scene_size(),
728
- "shape_key_count": self.get_shape_key_count(),
729
- "armature_count": self.get_armature_count(),
730
- }
731
-
732
-
733
- def render_object(
734
- object_file: str,
735
- num_renders: int,
736
- only_northern_hemisphere: bool,
737
- output_dir: str,
738
- ) -> None:
739
- """Saves rendered images with its camera matrix and metadata of the object.
740
-
741
- Args:
742
- object_file (str): Path to the object file.
743
- num_renders (int): Number of renders to save of the object.
744
- only_northern_hemisphere (bool): Whether to only render sides of the object that
745
- are in the northern hemisphere. This is useful for rendering objects that
746
- are photogrammetrically scanned, as the bottom of the object often has
747
- holes.
748
- output_dir (str): Path to the directory where the rendered images and metadata
749
- will be saved.
750
-
751
- Returns:
752
- None
753
- """
754
- os.makedirs(output_dir, exist_ok=True)
755
-
756
- # load the object
757
- if object_file.endswith(".blend"):
758
- bpy.ops.object.mode_set(mode="OBJECT")
759
- reset_cameras()
760
- delete_invisible_objects()
761
- else:
762
- reset_scene()
763
- load_object(object_file)
764
-
765
- # Set up cameras
766
- cam = scene.objects["Camera"]
767
- cam.data.lens = 35
768
- cam.data.sensor_width = 32
769
-
770
- # Set up camera constraints
771
- cam_constraint = cam.constraints.new(type="TRACK_TO")
772
- cam_constraint.track_axis = "TRACK_NEGATIVE_Z"
773
- cam_constraint.up_axis = "UP_Y"
774
- empty = bpy.data.objects.new("Empty", None)
775
- scene.collection.objects.link(empty)
776
- cam_constraint.target = empty
777
-
778
- # Extract the metadata. This must be done before normalizing the scene to get
779
- # accurate bounding box information.
780
- metadata_extractor = MetadataExtractor(
781
- object_path=object_file, scene=scene, bdata=bpy.data
782
- )
783
- metadata = metadata_extractor.get_metadata()
784
-
785
- # delete all objects that are not meshes
786
- if object_file.lower().endswith(".usdz"):
787
- # don't delete missing textures on usdz files, lots of them are embedded
788
- missing_textures = None
789
- else:
790
- missing_textures = delete_missing_textures()
791
- metadata["missing_textures"] = missing_textures
792
-
793
- # possibly apply a random color to all objects
794
- if object_file.endswith(".stl") or object_file.endswith(".ply"):
795
- assert len(bpy.context.selected_objects) == 1
796
- rand_color = apply_single_random_color_to_all_objects()
797
- metadata["random_color"] = rand_color
798
- else:
799
- metadata["random_color"] = None
800
-
801
- # save metadata
802
- metadata_path = os.path.join(output_dir, "metadata.json")
803
- os.makedirs(os.path.dirname(metadata_path), exist_ok=True)
804
- with open(metadata_path, "w", encoding="utf-8") as f:
805
- json.dump(metadata, f, sort_keys=True, indent=2)
806
-
807
- # normalize the scene
808
- normalize_scene()
809
-
810
- # randomize the lighting
811
- randomize_lighting()
812
-
813
- # render the images
814
- for i in range(num_renders):
815
- # set camera
816
- camera = randomize_camera(
817
- only_northern_hemisphere=only_northern_hemisphere,
818
- )
819
-
820
- # render the image
821
- render_path = os.path.join(output_dir, f"{i:03d}.png")
822
- scene.render.filepath = render_path
823
- bpy.ops.render.render(write_still=True)
824
-
825
- # save camera RT matrix
826
- rt_matrix = get_3x4_RT_matrix_from_blender(camera)
827
- rt_matrix_path = os.path.join(output_dir, f"{i:03d}.npy")
828
- np.save(rt_matrix_path, rt_matrix)
829
-
830
-
831
- if __name__ == "__main__":
832
- parser = argparse.ArgumentParser()
833
- parser.add_argument(
834
- "--object_path",
835
- type=str,
836
- required=True,
837
- help="Path to the object file",
838
- )
839
- parser.add_argument(
840
- "--output_dir",
841
- type=str,
842
- required=True,
843
- help="Path to the directory where the rendered images and metadata will be saved.",
844
- )
845
- parser.add_argument(
846
- "--engine",
847
- type=str,
848
- default="BLENDER_EEVEE",
849
- choices=["CYCLES", "BLENDER_EEVEE"],
850
- )
851
- parser.add_argument(
852
- "--only_northern_hemisphere",
853
- action="store_true",
854
- help="Only render the northern hemisphere of the object.",
855
- default=False,
856
- )
857
- parser.add_argument(
858
- "--num_renders",
859
- type=int,
860
- default=12,
861
- help="Number of renders to save of the object.",
862
- )
863
- argv = sys.argv[sys.argv.index("--") + 1 :]
864
- args = parser.parse_args(argv)
865
-
866
- context = bpy.context
867
- scene = context.scene
868
- render = scene.render
869
-
870
- # Set render settings
871
- render.engine = args.engine
872
- render.image_settings.file_format = "PNG"
873
- render.image_settings.color_mode = "RGBA"
874
- render.resolution_x = 512
875
- render.resolution_y = 512
876
- render.resolution_percentage = 100
877
-
878
- # Set cycles settings
879
- scene.cycles.device = "GPU"
880
- scene.cycles.samples = 128
881
- scene.cycles.diffuse_bounces = 1
882
- scene.cycles.glossy_bounces = 1
883
- scene.cycles.transparent_max_bounces = 3
884
- scene.cycles.transmission_bounces = 3
885
- scene.cycles.filter_width = 0.01
886
- scene.cycles.use_denoising = True
887
- scene.render.film_transparent = True
888
- bpy.context.preferences.addons["cycles"].preferences.get_devices()
889
- bpy.context.preferences.addons[
890
- "cycles"
891
- ].preferences.compute_device_type = "CUDA" # or "OPENCL"
892
-
893
- # Render the images
894
- render_object(
895
- object_file=args.object_path,
896
- num_renders=args.num_renders,
897
- only_northern_hemisphere=args.only_northern_hemisphere,
898
- output_dir=args.output_dir,
899
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
scripts/rendering/example-objects.json DELETED
@@ -1,52 +0,0 @@
1
- [
2
- {
3
- "sha256": "d2b9a5d7c47dc93526082c9b630157ab6bce4fd8669610d942176f4a36444e71",
4
- "fileIdentifier": "https://github.com/mattdeitke/objaverse-xl-test-files/blob/ead0bed6a76012452273bbe18d12e4d68a881956/example.obj",
5
- "source": "github"
6
- },
7
- {
8
- "sha256": "f15f7541614940dedd1ce373dee9cde1cc63471db84081259a44488aca267408",
9
- "fileIdentifier": "https://github.com/mattdeitke/objaverse-xl-test-files/blob/ead0bed6a76012452273bbe18d12e4d68a881956/example-linked.gltf",
10
- "source": "github"
11
- },
12
- {
13
- "sha256": "cd567dfd1605a5fd60af1b8ac67e44b6af25b9e3f160da66047bb1187d02a071",
14
- "fileIdentifier": "https://github.com/mattdeitke/objaverse-xl-test-files/blob/ead0bed6a76012452273bbe18d12e4d68a881956/example.dae",
15
- "source": "github"
16
- },
17
- {
18
- "sha256": "879bc9d2d85e4f3866f0cfef41f5236f9fff5f973380461af9f69cdbed53a0da",
19
- "fileIdentifier": "https://github.com/mattdeitke/objaverse-xl-test-files/blob/ead0bed6a76012452273bbe18d12e4d68a881956/example.abc",
20
- "source": "github"
21
- },
22
- {
23
- "sha256": "ac69da6df0c83b593902c71238a7721f575233723730150230da95d583466562",
24
- "fileIdentifier": "https://github.com/mattdeitke/objaverse-xl-test-files/blob/ead0bed6a76012452273bbe18d12e4d68a881956/example.blend",
25
- "source": "github"
26
- },
27
- {
28
- "sha256": "bc4cc8a78bc57d3c41ddaca9135d2b4e66b59e97846e2b796e5cc6b09c62b273",
29
- "fileIdentifier": "https://github.com/mattdeitke/objaverse-xl-test-files/blob/ead0bed6a76012452273bbe18d12e4d68a881956/example-embedded.gltf",
30
- "source": "github"
31
- },
32
- {
33
- "sha256": "04e6377317d6818e32c5cbd1951e76deb3641bbf4f6db6933046221d5fbf1c5c",
34
- "fileIdentifier": "https://github.com/mattdeitke/objaverse-xl-test-files/blob/ead0bed6a76012452273bbe18d12e4d68a881956/example.glb",
35
- "source": "github"
36
- },
37
- {
38
- "sha256": "7037575f47816118e5a34e7c0da9927e1be7be3f5b4adfac337710822eb50fa9",
39
- "fileIdentifier": "https://github.com/mattdeitke/objaverse-xl-test-files/blob/ead0bed6a76012452273bbe18d12e4d68a881956/example.fbx",
40
- "source": "github"
41
- },
42
- {
43
- "sha256": "10fd5944c3fe32cb25fdcf46319cbda250b98bd22fd87fc41de5769b118d5262",
44
- "fileIdentifier": "https://github.com/mattdeitke/objaverse-xl-test-files/blob/ead0bed6a76012452273bbe18d12e4d68a881956/example.stl",
45
- "source": "github"
46
- },
47
- {
48
- "sha256": "aa5c41753ad8a91562122d8cafbe1a940146fc3d227b7086eb8c830da36cb42b",
49
- "fileIdentifier": "https://github.com/mattdeitke/objaverse-xl-test-files/blob/ead0bed6a76012452273bbe18d12e4d68a881956/example.ply",
50
- "source": "github"
51
- }
52
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
scripts/rendering/main.py DELETED
@@ -1,414 +0,0 @@
1
- import glob
2
- import json
3
- import multiprocessing
4
- import os
5
- import platform
6
- import random
7
- import subprocess
8
- import tempfile
9
- import time
10
- import zipfile
11
- from functools import partial
12
- from typing import Any, Dict, List, Literal, Optional, Union
13
-
14
- import fire
15
- import fsspec
16
- import GPUtil
17
- import pandas as pd
18
- from loguru import logger
19
-
20
- from objaverse_xl.objaverse_xl_downloader import ObjaverseXLDownloader
21
- from objaverse_xl.utils import get_uid_from_str
22
-
23
-
24
- def log_processed_object(csv_filename: str, *args) -> None:
25
- """Log when an object is done being used.
26
-
27
- Args:
28
- csv_filename (str): Name of the CSV file to save the logs to.
29
- *args: Arguments to save to the CSV file.
30
-
31
- Returns:
32
- None
33
- """
34
- args = ",".join([str(arg) for arg in args])
35
- # log that this object was rendered successfully
36
- # saving locally to avoid excessive writes to the cloud
37
- dirname = os.path.expanduser(f"~/.objaverse/github/logs/")
38
- os.makedirs(dirname, exist_ok=True)
39
- with open(os.path.join(dirname, csv_filename), "a", encoding="utf-8") as f:
40
- f.write(f"{time.time()},{args}\n")
41
-
42
-
43
- def zipdir(path: str, ziph: zipfile.ZipFile) -> None:
44
- """Zip up a directory with an arcname structure.
45
-
46
- Args:
47
- path (str): Path to the directory to zip.
48
- ziph (zipfile.ZipFile): ZipFile handler object to write to.
49
-
50
- Returns:
51
- None
52
- """
53
- # ziph is zipfile handle
54
- for root, dirs, files in os.walk(path):
55
- for file in files:
56
- # this ensures the structure inside the zip starts at folder/
57
- arcname = os.path.join(os.path.basename(root), file)
58
- ziph.write(os.path.join(root, file), arcname=arcname)
59
-
60
-
61
- def handle_found_object(
62
- local_path: str,
63
- file_identifier: str,
64
- sha256: str,
65
- metadata: Dict[str, Any],
66
- num_renders: int,
67
- render_dir: str,
68
- only_northern_hemisphere: bool,
69
- gpu_devices: Union[int, List[int]],
70
- render_timeout: int,
71
- successful_log_file: Optional[str] = "handle-found-object-successful.csv",
72
- failed_log_file: Optional[str] = "handle-found-object-failed.csv",
73
- ) -> bool:
74
- """Called when an object is successfully found and downloaded.
75
-
76
- Here, the object has the same sha256 as the one that was downloaded with
77
- Objaverse-XL. If None, the object will be downloaded, but nothing will be done with
78
- it.
79
-
80
- Args:
81
- local_path (str): Local path to the downloaded 3D object.
82
- file_identifier (str): GitHub URL of the 3D object.
83
- sha256 (str): SHA256 of the contents of the 3D object.
84
- metadata (Dict[str, Any]): Metadata about the 3D object, including keys for
85
- `github_organization` and `github_repo`.
86
- num_renders (int): Number of renders to save of the object.
87
- render_dir (str): Directory where the objects will be rendered.
88
- only_northern_hemisphere (bool): Only render the northern hemisphere of the
89
- object.
90
- gpu_devices (Union[int, List[int]]): GPU device(s) to use for rendering. If
91
- an int, the GPU device will be randomly selected from 0 to gpu_devices - 1.
92
- If a list, the GPU device will be randomly selected from the list.
93
- If 0, the CPU will be used for rendering.
94
- render_timeout (int): Number of seconds to wait for the rendering job to
95
- complete.
96
- successful_log_file (str): Name of the log file to save successful renders to.
97
- failed_log_file (str): Name of the log file to save failed renders to.
98
-
99
- Returns: True if the object was rendered successfully, False otherwise.
100
- """
101
- save_uid = get_uid_from_str(file_identifier)
102
- args = f"--object_path '{local_path}' --num_renders {num_renders}"
103
-
104
- # get the GPU to use for rendering
105
- using_gpu: bool = True
106
- gpu_i = 0
107
- if isinstance(gpu_devices, int) and gpu_devices > 0:
108
- num_gpus = gpu_devices
109
- gpu_i = random.randint(0, num_gpus - 1)
110
- elif isinstance(gpu_devices, list):
111
- gpu_i = random.choice(gpu_devices)
112
- elif isinstance(gpu_devices, int) and gpu_devices == 0:
113
- using_gpu = False
114
- else:
115
- raise ValueError(
116
- f"gpu_devices must be an int > 0, 0, or a list of ints. Got {gpu_devices}."
117
- )
118
-
119
- with tempfile.TemporaryDirectory() as temp_dir:
120
- # get the target directory for the rendering job
121
- target_directory = os.path.join(temp_dir, save_uid)
122
- os.makedirs(target_directory, exist_ok=True)
123
- args += f" --output_dir {target_directory}"
124
-
125
- # check for Linux / Ubuntu or MacOS
126
- if platform.system() == "Linux" and using_gpu:
127
- args += " --engine BLENDER_EEVEE"
128
- elif platform.system() == "Darwin" or (
129
- platform.system() == "Linux" and not using_gpu
130
- ):
131
- # As far as I know, MacOS does not support BLENER_EEVEE, which uses GPU
132
- # rendering. Generally, I'd only recommend using MacOS for debugging and
133
- # small rendering jobs, since CYCLES is much slower than BLENDER_EEVEE.
134
- args += " --engine CYCLES"
135
- else:
136
- raise NotImplementedError(f"Platform {platform.system()} is not supported.")
137
-
138
- # check if we should only render the northern hemisphere
139
- if only_northern_hemisphere:
140
- args += " --only_northern_hemisphere"
141
-
142
- # get the command to run
143
- command = f"blender-3.2.2-linux-x64/blender --background --python blender_script.py -- {args}"
144
- if using_gpu:
145
- command = f"export DISPLAY=:0.{gpu_i} && {command}"
146
-
147
- # render the object (put in dev null)
148
- subprocess.run(
149
- ["bash", "-c", command],
150
- timeout=render_timeout,
151
- check=False,
152
- stdout=subprocess.DEVNULL,
153
- stderr=subprocess.DEVNULL,
154
- )
155
-
156
- # check that the renders were saved successfully
157
- png_files = glob.glob(os.path.join(target_directory, "*.png"))
158
- metadata_files = glob.glob(os.path.join(target_directory, "*.json"))
159
- npy_files = glob.glob(os.path.join(target_directory, "*.npy"))
160
- if (
161
- (len(png_files) != num_renders)
162
- or (len(npy_files) != num_renders)
163
- or (len(metadata_files) != 1)
164
- ):
165
- logger.error(
166
- f"Found object {file_identifier} was not rendered successfully!"
167
- )
168
- if failed_log_file is not None:
169
- log_processed_object(
170
- failed_log_file,
171
- file_identifier,
172
- sha256,
173
- )
174
- return False
175
-
176
- # update the metadata
177
- metadata_path = os.path.join(target_directory, "metadata.json")
178
- with open(metadata_path, "r", encoding="utf-8") as f:
179
- metadata_file = json.load(f)
180
- metadata_file["sha256"] = sha256
181
- metadata_file["file_identifier"] = file_identifier
182
- metadata_file["save_uid"] = save_uid
183
- metadata_file["metadata"] = metadata
184
- with open(metadata_path, "w", encoding="utf-8") as f:
185
- json.dump(metadata, f, indent=2, sort_keys=True)
186
-
187
- # Make a zip of the target_directory.
188
- # Keeps the {save_uid} directory structure when unzipped
189
- with zipfile.ZipFile(
190
- f"{target_directory}.zip", "w", zipfile.ZIP_DEFLATED
191
- ) as ziph:
192
- zipdir(target_directory, ziph)
193
-
194
- # move the zip to the render_dir
195
- fs, path = fsspec.core.url_to_fs(render_dir)
196
-
197
- # move the zip to the render_dir
198
- fs.makedirs(os.path.join(path, "github", "renders"), exist_ok=True)
199
- fs.put(
200
- os.path.join(f"{target_directory}.zip"),
201
- os.path.join(path, "github", "renders", f"{save_uid}.zip"),
202
- )
203
-
204
- # log that this object was rendered successfully
205
- if successful_log_file is not None:
206
- log_processed_object(successful_log_file, file_identifier, sha256)
207
-
208
- return True
209
-
210
-
211
- def handle_new_object(
212
- local_path: str,
213
- file_identifier: str,
214
- sha256: str,
215
- metadata: Dict[str, Any],
216
- log_file: str = "handle-new-object.csv",
217
- ) -> None:
218
- """Called when a new object is found.
219
-
220
- Here, the object is not used in Objaverse-XL, but is still downloaded with the
221
- repository. The object may have not been used because it does not successfully
222
- import into Blender. If None, the object will be downloaded, but nothing will be
223
- done with it.
224
-
225
- Args:
226
- local_path (str): Local path to the downloaded 3D object.
227
- file_identifier (str): GitHub URL of the 3D object.
228
- sha256 (str): SHA256 of the contents of the 3D object.
229
- metadata (Dict[str, Any]): Metadata about the 3D object, including the GitHub
230
- organization and repo names.
231
- log_file (str): Name of the log file to save the handle_new_object logs to.
232
-
233
- Returns:
234
- None
235
- """
236
- # log the new object
237
- log_processed_object(log_file, file_identifier, sha256)
238
-
239
-
240
- def handle_modified_object(
241
- local_path: str,
242
- file_identifier: str,
243
- new_sha256: str,
244
- old_sha256: str,
245
- metadata: Dict[str, Any],
246
- num_renders: int,
247
- render_dir: str,
248
- only_northern_hemisphere: bool,
249
- gpu_devices: Union[int, List[int]],
250
- render_timeout: int,
251
- ) -> None:
252
- """Called when a modified object is found and downloaded.
253
-
254
- Here, the object is successfully downloaded, but it has a different sha256 than the
255
- one that was downloaded with Objaverse-XL. This is not expected to happen very
256
- often, because the same commit hash is used for each repo. If None, the object will
257
- be downloaded, but nothing will be done with it.
258
-
259
- Args:
260
- local_path (str): Local path to the downloaded 3D object.
261
- file_identifier (str): GitHub URL of the 3D object.
262
- new_sha256 (str): SHA256 of the contents of the newly downloaded 3D object.
263
- old_sha256 (str): Expected SHA256 of the contents of the 3D object as it was
264
- when it was downloaded with Objaverse-XL.
265
- metadata (Dict[str, Any]): Metadata about the 3D object, including the GitHub
266
- organization and repo names.
267
- num_renders (int): Number of renders to save of the object.
268
- render_dir (str): Directory where the objects will be rendered.
269
- only_northern_hemisphere (bool): Only render the northern hemisphere of the
270
- object.
271
- gpu_devices (Union[int, List[int]]): GPU device(s) to use for rendering. If
272
- an int, the GPU device will be randomly selected from 0 to gpu_devices - 1.
273
- If a list, the GPU device will be randomly selected from the list.
274
- If 0, the CPU will be used for rendering.
275
- render_timeout (int): Number of seconds to wait for the rendering job to
276
- complete.
277
-
278
- Returns:
279
- None
280
- """
281
- success = handle_found_object(
282
- local_path=local_path,
283
- file_identifier=file_identifier,
284
- sha256=new_sha256,
285
- metadata=metadata,
286
- num_renders=num_renders,
287
- render_dir=render_dir,
288
- only_northern_hemisphere=only_northern_hemisphere,
289
- gpu_devices=gpu_devices,
290
- render_timeout=render_timeout,
291
- successful_log_file=None,
292
- failed_log_file=None,
293
- )
294
-
295
- if success:
296
- log_processed_object(
297
- "handle-modified-object-successful.csv",
298
- file_identifier,
299
- old_sha256,
300
- new_sha256,
301
- )
302
- else:
303
- log_processed_object(
304
- "handle-modified-object-failed.csv",
305
- file_identifier,
306
- old_sha256,
307
- new_sha256,
308
- )
309
-
310
-
311
- def handle_missing_object(
312
- github_url: str,
313
- sha256: str,
314
- metadata: Dict[str, Any],
315
- log_file: str = "handle-missing-object.csv",
316
- ) -> None:
317
- """Called when an object that is in Objaverse-XL is not found.
318
-
319
- Here, it is likely that the repository was deleted or renamed. If None, nothing
320
- will be done with the missing object.
321
-
322
- Args:
323
- github_url (str): GitHub URL of the 3D object.
324
- sha256 (str): SHA256 of the contents of the original 3D object.
325
- metadata (Dict[str, Any]): Metadata about the 3D object, including the GitHub
326
- organization and repo names.
327
- log_file (str): Name of the log file to save missing renders to.
328
-
329
- Returns:
330
- None
331
- """
332
- # log the missing object
333
- log_processed_object(log_file, github_url, sha256)
334
-
335
-
336
- def get_example_objects() -> pd.DataFrame:
337
- """Returns a DataFrame of example objects to use for debugging."""
338
- return pd.read_json("example-objects.json", orient="records")
339
-
340
-
341
- def render_objects(
342
- render_dir: str = "~/.objaverse",
343
- num_renders: int = 12,
344
- processes: Optional[int] = None,
345
- save_repo_format: Optional[Literal["zip", "tar", "tar.gz"]] = None,
346
- only_northern_hemisphere: bool = False,
347
- render_timeout: int = 300,
348
- gpu_devices: Optional[Union[int, List[int]]] = None,
349
- ) -> None:
350
- """Renders all GitHub objects in the Objaverse-XL dataset.
351
-
352
- Args:
353
- render_dir (str): Directory where the objects will be rendered.
354
- num_renders (int): Number of renders to save of the object.
355
- processes (Optional[int]): Number of processes to use for downloading the
356
- objects. If None, defaults to multiprocessing.cpu_count() * 3.
357
- save_repo_format (Optional[Literal["zip", "tar", "tar.gz"]]): If not None,
358
- the GitHub repo will be deleted after rendering each object from it.
359
- only_northern_hemisphere (bool): Only render the northern hemisphere of the
360
- object. Useful for rendering objects that are obtained from photogrammetry,
361
- since the southern hemisphere is often has holes.
362
- render_timeout (int): Number of seconds to wait for the rendering job to
363
- complete.
364
- gpu_devices (Optional[Union[int, List[int]]]): GPU device(s) to use for
365
- rendering. If an int, the GPU device will be randomly selected from 0 to
366
- gpu_devices - 1. If a list, the GPU device will be randomly selected from
367
- the list. If 0, the CPU will be used for rendering. If None, defaults to
368
- use all available GPUs.
369
-
370
- Returns:
371
- None
372
- """
373
- if platform.system() not in ["Linux", "Darwin"]:
374
- raise NotImplementedError(
375
- f"Platform {platform.system()} is not supported. Use Linux or MacOS."
376
- )
377
-
378
- # get the gpu devices to use
379
- parsed_gpu_devices: Union[int, List[int]] = 0
380
- if gpu_devices is None:
381
- parsed_gpu_devices = len(GPUtil.getGPUs())
382
-
383
- if processes is None:
384
- processes = multiprocessing.cpu_count() * 3
385
-
386
- objects = get_example_objects()
387
- objaverse_xl_downloader = ObjaverseXLDownloader()
388
- objaverse_xl_downloader.download_objects(
389
- objects=objects,
390
- processes=processes,
391
- save_repo_format=save_repo_format,
392
- download_dir=render_dir, # only used when save_repo_format is not None
393
- handle_found_object=partial(
394
- handle_found_object,
395
- render_dir=render_dir,
396
- num_renders=num_renders,
397
- only_northern_hemisphere=only_northern_hemisphere,
398
- gpu_devices=parsed_gpu_devices,
399
- render_timeout=render_timeout,
400
- ),
401
- handle_new_object=handle_new_object,
402
- handle_modified_object=partial(
403
- handle_modified_object,
404
- render_dir=render_dir,
405
- num_renders=num_renders,
406
- gpu_devices=parsed_gpu_devices,
407
- only_northern_hemisphere=only_northern_hemisphere,
408
- ),
409
- handle_missing_object=handle_missing_object,
410
- )
411
-
412
-
413
- if __name__ == "__main__":
414
- fire.Fire(render_objects)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
scripts/rendering/start_x_server.py DELETED
@@ -1,272 +0,0 @@
1
- # Taken from https://github.com/allenai/ai2thor/blob/main/scripts/ai2thor-xorg
2
- # Starts an x-server to support running Blender on a headless machine with
3
- # dedicated NVIDIA GPUs
4
-
5
- import argparse
6
-
7
- #!/usr/bin/env python3
8
- import os
9
- import platform
10
- import re
11
- import shlex
12
- import signal
13
- import subprocess
14
- import sys
15
- import time
16
-
17
- # Turning off automatic black formatting for this script as it breaks quotes.
18
- # fmt: off
19
- from typing import List
20
-
21
- PID_FILE = "/var/run/ai2thor-xorg.pid"
22
- CONFIG_FILE = "/tmp/ai2thor-xorg.conf"
23
-
24
- DEFAULT_HEIGHT = 768
25
- DEFAULT_WIDTH = 1024
26
-
27
-
28
- def process_alive(pid):
29
- """
30
- Use kill(0) to determine if pid is alive
31
- :param pid: process id
32
- :rtype: bool
33
- """
34
- try:
35
- os.kill(pid, 0)
36
- except OSError:
37
- return False
38
-
39
- return True
40
-
41
-
42
- def find_devices(excluded_device_ids):
43
- devices = []
44
- id_counter = 0
45
- for r in pci_records():
46
- if r.get("Vendor", "") == "NVIDIA Corporation" and r["Class"] in [
47
- "VGA compatible controller",
48
- "3D controller",
49
- ]:
50
- bus_id = "PCI:" + ":".join(
51
- map(lambda x: str(int(x, 16)), re.split(r"[:\.]", r["Slot"]))
52
- )
53
-
54
- if id_counter not in excluded_device_ids:
55
- devices.append(bus_id)
56
-
57
- id_counter += 1
58
-
59
- if not devices:
60
- print("Error: ai2thor-xorg requires at least one NVIDIA device")
61
- sys.exit(1)
62
-
63
- return devices
64
-
65
- def active_display_bus_ids():
66
- # this determines whether a monitor is connected to the GPU
67
- # if one is, the following Option is added for the Screen "UseDisplayDevice" "None"
68
- command = "nvidia-smi --query-gpu=pci.bus_id,display_active --format=csv,noheader"
69
- active_bus_ids = set()
70
- result = subprocess.run(command, shell=True, stdout=subprocess.PIPE)
71
- if result.returncode == 0:
72
- for line in result.stdout.decode().strip().split("\n"):
73
- nvidia_bus_id, display_status = re.split(r",\s?", line.strip())
74
- bus_id = "PCI:" + ":".join(
75
- map(lambda x: str(int(x, 16)), re.split(r"[:\.]", nvidia_bus_id)[1:])
76
- )
77
- if display_status.lower() == "enabled":
78
- active_bus_ids.add(bus_id)
79
-
80
- return active_bus_ids
81
-
82
- def pci_records():
83
- records = []
84
- command = shlex.split("lspci -vmm")
85
- output = subprocess.check_output(command).decode()
86
-
87
- for devices in output.strip().split("\n\n"):
88
- record = {}
89
- records.append(record)
90
- for row in devices.split("\n"):
91
- key, value = row.split("\t")
92
- record[key.split(":")[0]] = value
93
-
94
- return records
95
-
96
-
97
- def read_pid():
98
- if os.path.isfile(PID_FILE):
99
- with open(PID_FILE) as f:
100
- return int(f.read())
101
- else:
102
- return None
103
-
104
-
105
- def start(display: str, excluded_device_ids: List[int], width: int, height: int):
106
- pid = read_pid()
107
-
108
- if pid and process_alive(pid):
109
- print("Error: ai2thor-xorg is already running with pid: %s" % pid)
110
- sys.exit(1)
111
-
112
- with open(CONFIG_FILE, "w") as f:
113
- f.write(generate_xorg_conf(excluded_device_ids, width=width, height=height))
114
-
115
- log_file = "/var/log/ai2thor-xorg.%s.log" % display
116
- error_log_file = "/var/log/ai2thor-xorg-error.%s.log" % display
117
- command = shlex.split(
118
- "Xorg -quiet -maxclients 1024 -noreset +extension GLX +extension RANDR +extension RENDER -logfile %s -config %s :%s"
119
- % (log_file, CONFIG_FILE, display)
120
- )
121
-
122
- pid = None
123
- with open(error_log_file, "w") as error_log_f:
124
- proc = subprocess.Popen(command, stdin=subprocess.DEVNULL, stdout=subprocess.DEVNULL, stderr=error_log_f)
125
- pid = proc.pid
126
- try:
127
- proc.wait(timeout=0.25)
128
- except subprocess.TimeoutExpired:
129
- pass
130
-
131
- if pid and process_alive(pid):
132
- with open(PID_FILE, "w") as f:
133
- f.write(str(proc.pid))
134
- else:
135
- print("Error: error with command '%s'" % " ".join(command))
136
- with open(error_log_file, "r") as f:
137
- print(f.read())
138
-
139
-
140
- def print_config(excluded_device_ids: List[int], width: int, height: int):
141
- print(generate_xorg_conf(excluded_device_ids, width=width, height=height))
142
-
143
-
144
- def stop():
145
- pid = read_pid()
146
- if pid and process_alive(pid):
147
- os.kill(pid, signal.SIGTERM)
148
-
149
- for i in range(10):
150
- time.sleep(0.2)
151
- if not process_alive(pid):
152
- os.unlink(PID_FILE)
153
- break
154
-
155
-
156
- def generate_xorg_conf(
157
- excluded_device_ids: List[int], width: int, height: int
158
- ):
159
- devices = find_devices(excluded_device_ids)
160
- active_display_devices = active_display_bus_ids()
161
-
162
- xorg_conf = []
163
-
164
- device_section = """
165
- Section "Device"
166
- Identifier "Device{device_id}"
167
- Driver "nvidia"
168
- VendorName "NVIDIA Corporation"
169
- BusID "{bus_id}"
170
- EndSection
171
- """
172
- server_layout_section = """
173
- Section "ServerLayout"
174
- Identifier "Layout0"
175
- {screen_records}
176
- EndSection
177
- """
178
- screen_section = """
179
- Section "Screen"
180
- Identifier "Screen{screen_id}"
181
- Device "Device{device_id}"
182
- DefaultDepth 24
183
- Option "AllowEmptyInitialConfiguration" "True"
184
- Option "Interactive" "False"
185
- {extra_options}
186
- SubSection "Display"
187
- Depth 24
188
- Virtual {width} {height}
189
- EndSubSection
190
- EndSection
191
- """
192
- screen_records = []
193
- for i, bus_id in enumerate(devices):
194
- extra_options = ""
195
- if bus_id in active_display_devices:
196
- # See https://github.com/allenai/ai2thor/pull/990
197
- # when a monitor is connected, this option must be used otherwise
198
- # Xorg will fail to start
199
- extra_options = 'Option "UseDisplayDevice" "None"'
200
- xorg_conf.append(device_section.format(device_id=i, bus_id=bus_id))
201
- xorg_conf.append(screen_section.format(device_id=i, screen_id=i, width=width, height=height, extra_options=extra_options))
202
- screen_records.append(
203
- 'Screen {screen_id} "Screen{screen_id}" 0 0'.format(screen_id=i)
204
- )
205
-
206
- xorg_conf.append(
207
- server_layout_section.format(screen_records="\n ".join(screen_records))
208
- )
209
-
210
- output = "\n".join(xorg_conf)
211
- return output
212
-
213
-
214
- # fmt: on
215
-
216
- if __name__ == "__main__":
217
- if os.geteuid() != 0:
218
- path = os.path.abspath(__file__)
219
- print("Executing ai2thor-xorg with sudo")
220
- args = ["--", path] + sys.argv[1:]
221
- os.execvp("sudo", args)
222
-
223
- if platform.system() != "Linux":
224
- print("Error: Can only run ai2thor-xorg on linux")
225
- sys.exit(1)
226
-
227
- parser = argparse.ArgumentParser()
228
- parser.add_argument(
229
- "--exclude-device",
230
- help="exclude a specific GPU device",
231
- action="append",
232
- type=int,
233
- default=[],
234
- )
235
- parser.add_argument(
236
- "--width",
237
- help="width of the screen to start (should be greater than the maximum"
238
- f" width of any ai2thor instance you will start) [default: {DEFAULT_WIDTH}]",
239
- type=int,
240
- default=DEFAULT_WIDTH,
241
- )
242
- parser.add_argument(
243
- "--height",
244
- help="height of the screen to start (should be greater than the maximum"
245
- f" height of any ai2thor instance you will start) [default: {DEFAULT_HEIGHT}]",
246
- type=int,
247
- default=DEFAULT_HEIGHT,
248
- )
249
- parser.add_argument(
250
- "command",
251
- help="command to be executed",
252
- choices=["start", "stop", "print-config"],
253
- )
254
- parser.add_argument(
255
- "display", help="display to be used", nargs="?", type=int, default=0
256
- )
257
- args = parser.parse_args()
258
- if args.command == "start":
259
- start(
260
- display=args.display,
261
- excluded_device_ids=args.exclude_device,
262
- height=args.height,
263
- width=args.width,
264
- )
265
- elif args.command == "stop":
266
- stop()
267
- elif args.command == "print-config":
268
- print_config(
269
- excluded_device_ids=args.exclude_device,
270
- width=args.width,
271
- height=args.height,
272
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
setup.py DELETED
@@ -1,27 +0,0 @@
1
- from setuptools import find_packages, setup
2
-
3
- # Read the README content
4
- with open("README.md", "r", encoding="utf-8") as fh:
5
- long_description = fh.read()
6
-
7
- # Read requirements from requirements.txt
8
- with open("requirements.txt", "r", encoding="utf-8") as f:
9
- requirements = [line.strip() for line in f.readlines()]
10
-
11
- setup(
12
- name="objaverse_xl",
13
- version="0.1.1",
14
- author="Allen Institute for AI",
15
- author_email="mattd@allenai.org",
16
- description="Objaverse-XL is an open dataset of over 10 million 3D objects",
17
- long_description=long_description,
18
- long_description_content_type="text/markdown",
19
- url="https://huggingface.co/datasets/allenai/objaverse-xl",
20
- packages=find_packages(),
21
- classifiers=[
22
- "Programming Language :: Python :: 3",
23
- "License :: OSI Approved :: Apache Software License",
24
- ],
25
- install_requires=requirements,
26
- extras_require={"s3": ["s3fs==0.4.2"]},
27
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
objaverse_v1/object-metadata.parquet → sketchfab/sketchfab.parquet RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:08a95607fdcacbe22504001890a137d83cb004a37dcd9e58175a81bf8344a2a2
3
- size 79893453
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6462ef2ecf39069e210f6df1416e8a28ca931039e3c7fe6aa3dca593115c64e3
3
+ size 79625309
scripts/rendering/io_scene_usdz.zip → smithsonian/smithsonian.parquet RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ec07ab6125fe0a021ed08c64169eceda126330401aba3d494d5203d26ac4b093
3
- size 34685
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e8dc6d4eb8035f174dc518449f6eea3e5d5d55084b9891371db8ae0141259ae5
3
+ size 313114
tests/test_api.py DELETED
@@ -1,553 +0,0 @@
1
- import os
2
- import shutil
3
-
4
- import fsspec
5
- import pandas as pd
6
-
7
- from objaverse_xl.github import GitHubDownloader
8
- from objaverse_xl.objaverse_v1 import SketchfabDownloader
9
- from objaverse_xl.thingiverse import ThingiverseDownloader
10
- from objaverse_xl.smithsonian import SmithsonianDownloader
11
-
12
-
13
- def test_github_process_repo():
14
- github_downloader = GitHubDownloader()
15
- download_dir = "~/.objaverse-tests"
16
- base_download_dir = os.path.join(download_dir, "github")
17
- fs, path = fsspec.core.url_to_fs(base_download_dir)
18
- fs.makedirs(path, exist_ok=True)
19
-
20
- new_objects = []
21
- handle_new_object = (
22
- lambda local_path, file_identifier, sha256, metadata: new_objects.append(
23
- dict(
24
- local_path=local_path,
25
- file_identifier=file_identifier,
26
- sha256=sha256,
27
- metadata=metadata,
28
- )
29
- )
30
- )
31
-
32
- for save_repo_format in ["tar", "tar.gz", "zip", "files"]:
33
- shutil.rmtree(os.path.join(path, "repos"), ignore_errors=True)
34
- out = github_downloader._process_repo(
35
- repo_id="mattdeitke/objaverse-xl-test-files",
36
- fs=fs,
37
- base_dir=path,
38
- save_repo_format=save_repo_format, # type: ignore
39
- expected_objects=dict(),
40
- handle_found_object=None,
41
- handle_modified_object=None,
42
- handle_missing_object=None,
43
- handle_new_object=handle_new_object,
44
- commit_hash="6928b08a2501aa7a4a4aabac1f888b66e7782056",
45
- )
46
-
47
- # test that the sha256's are correct
48
- assert len(out) == 0
49
- sha256s = [x["sha256"] for x in new_objects]
50
- for sha256 in [
51
- "d2b9a5d7c47dc93526082c9b630157ab6bce4fd8669610d942176f4a36444e71",
52
- "04e6377317d6818e32c5cbd1951e76deb3641bbf4f6db6933046221d5fbf1c5c",
53
- "7037575f47816118e5a34e7c0da9927e1be7be3f5b4adfac337710822eb50fa9",
54
- ]:
55
- assert sha256 in sha256s, f"{sha256=} not in {sha256s=}"
56
- github_urls = [x["file_identifier"] for x in new_objects]
57
- for github_url in [
58
- "https://github.com/mattdeitke/objaverse-xl-test-files/blob/6928b08a2501aa7a4a4aabac1f888b66e7782056/example.fbx",
59
- "https://github.com/mattdeitke/objaverse-xl-test-files/blob/6928b08a2501aa7a4a4aabac1f888b66e7782056/example.glb",
60
- "https://github.com/mattdeitke/objaverse-xl-test-files/blob/6928b08a2501aa7a4a4aabac1f888b66e7782056/example.obj",
61
- ]:
62
- assert github_url in github_urls, f"{github_url=} not in {github_urls=}"
63
-
64
- # test that the files are correct
65
- if save_repo_format != "files":
66
- assert fs.exists(
67
- os.path.join(
68
- path,
69
- "repos",
70
- "mattdeitke",
71
- f"objaverse-xl-test-files.{save_repo_format}",
72
- )
73
- )
74
- else:
75
- assert fs.exists(
76
- os.path.join(
77
- base_download_dir, "repos", "mattdeitke", "objaverse-xl-test-files"
78
- )
79
- )
80
-
81
-
82
- def test_github_handle_new_object():
83
- github_downloader = GitHubDownloader()
84
- found_objects = []
85
- handle_found_object = (
86
- lambda local_path, file_identifier, sha256, metadata: found_objects.append(
87
- dict(
88
- local_path=local_path,
89
- file_identifier=file_identifier,
90
- sha256=sha256,
91
- metadata=metadata,
92
- )
93
- )
94
- )
95
-
96
- missing_objects = []
97
- handle_missing_object = (
98
- lambda file_identifier, sha256, metadata: missing_objects.append(
99
- dict(
100
- file_identifier=file_identifier,
101
- sha256=sha256,
102
- metadata=metadata,
103
- )
104
- )
105
- )
106
-
107
- new_objects = []
108
- handle_new_object = (
109
- lambda local_path, file_identifier, sha256, metadata: new_objects.append(
110
- dict(
111
- local_path=local_path,
112
- file_identifier=file_identifier,
113
- sha256=sha256,
114
- metadata=metadata,
115
- )
116
- )
117
- )
118
-
119
- modified_objects = []
120
- handle_modified_object = lambda local_path, file_identifier, new_sha256, old_sha256, metadata: modified_objects.append(
121
- dict(
122
- local_path=local_path,
123
- file_identifier=file_identifier,
124
- new_sha256=new_sha256,
125
- old_sha256=old_sha256,
126
- metadata=metadata,
127
- )
128
- )
129
-
130
- download_dir = "~/.objaverse-tests"
131
- base_download_dir = os.path.join(download_dir, "github")
132
- fs, path = fsspec.core.url_to_fs(base_download_dir)
133
- fs.makedirs(path, exist_ok=True)
134
-
135
- shutil.rmtree(os.path.join(path, "repos"), ignore_errors=True)
136
- out = github_downloader._process_repo(
137
- repo_id="mattdeitke/objaverse-xl-test-files",
138
- fs=fs,
139
- base_dir=path,
140
- save_repo_format=None,
141
- expected_objects=dict(),
142
- handle_found_object=handle_found_object,
143
- handle_modified_object=handle_modified_object,
144
- handle_missing_object=handle_missing_object,
145
- handle_new_object=handle_new_object,
146
- commit_hash="6928b08a2501aa7a4a4aabac1f888b66e7782056",
147
- )
148
-
149
- assert len(out) == 0
150
- assert len(new_objects) == 3
151
- assert len(found_objects) == 0
152
- assert len(modified_objects) == 0
153
- assert len(missing_objects) == 0
154
-
155
-
156
- def test_github_handle_found_object():
157
- github_downloader = GitHubDownloader()
158
- found_objects = []
159
- handle_found_object = (
160
- lambda local_path, file_identifier, sha256, metadata: found_objects.append(
161
- dict(
162
- local_path=local_path,
163
- file_identifier=file_identifier,
164
- sha256=sha256,
165
- metadata=metadata,
166
- )
167
- )
168
- )
169
-
170
- missing_objects = []
171
- handle_missing_object = (
172
- lambda file_identifier, sha256, metadata: missing_objects.append(
173
- dict(
174
- file_identifier=file_identifier,
175
- sha256=sha256,
176
- metadata=metadata,
177
- )
178
- )
179
- )
180
-
181
- new_objects = []
182
- handle_new_object = (
183
- lambda local_path, file_identifier, sha256, metadata: new_objects.append(
184
- dict(
185
- local_path=local_path,
186
- file_identifier=file_identifier,
187
- sha256=sha256,
188
- metadata=metadata,
189
- )
190
- )
191
- )
192
-
193
- modified_objects = []
194
- handle_modified_object = lambda local_path, file_identifier, new_sha256, old_sha256, metadata: modified_objects.append(
195
- dict(
196
- local_path=local_path,
197
- file_identifier=file_identifier,
198
- new_sha256=new_sha256,
199
- old_sha256=old_sha256,
200
- metadata=metadata,
201
- )
202
- )
203
-
204
- download_dir = "~/.objaverse-tests"
205
- base_download_dir = os.path.join(download_dir, "github")
206
- fs, path = fsspec.core.url_to_fs(base_download_dir)
207
- fs.makedirs(path, exist_ok=True)
208
-
209
- shutil.rmtree(os.path.join(path, "repos"), ignore_errors=True)
210
- out = github_downloader._process_repo(
211
- repo_id="mattdeitke/objaverse-xl-test-files",
212
- fs=fs,
213
- base_dir=path,
214
- save_repo_format=None,
215
- expected_objects={
216
- "https://github.com/mattdeitke/objaverse-xl-test-files/blob/6928b08a2501aa7a4a4aabac1f888b66e7782056/example.fbx": "7037575f47816118e5a34e7c0da9927e1be7be3f5b4adfac337710822eb50fa9",
217
- "https://github.com/mattdeitke/objaverse-xl-test-files/blob/6928b08a2501aa7a4a4aabac1f888b66e7782056/example.glb": "04e6377317d6818e32c5cbd1951e76deb3641bbf4f6db6933046221d5fbf1c5c",
218
- "https://github.com/mattdeitke/objaverse-xl-test-files/blob/6928b08a2501aa7a4a4aabac1f888b66e7782056/example.obj": "d2b9a5d7c47dc93526082c9b630157ab6bce4fd8669610d942176f4a36444e71",
219
- },
220
- handle_found_object=handle_found_object,
221
- handle_modified_object=handle_modified_object,
222
- handle_missing_object=handle_missing_object,
223
- handle_new_object=handle_new_object,
224
- commit_hash="6928b08a2501aa7a4a4aabac1f888b66e7782056",
225
- )
226
-
227
- assert len(out) == 0
228
- assert len(found_objects) == 3
229
- assert len(missing_objects) == 0
230
- assert len(new_objects) == 0
231
- assert len(modified_objects) == 0
232
-
233
-
234
- def test_github_handle_modified_object():
235
- github_downloader = GitHubDownloader()
236
- found_objects = []
237
- handle_found_object = (
238
- lambda local_path, file_identifier, sha256, metadata: found_objects.append(
239
- dict(
240
- local_path=local_path,
241
- file_identifier=file_identifier,
242
- sha256=sha256,
243
- metadata=metadata,
244
- )
245
- )
246
- )
247
-
248
- missing_objects = []
249
- handle_missing_object = (
250
- lambda file_identifier, sha256, metadata: missing_objects.append(
251
- dict(
252
- file_identifier=file_identifier,
253
- sha256=sha256,
254
- metadata=metadata,
255
- )
256
- )
257
- )
258
-
259
- new_objects = []
260
- handle_new_object = (
261
- lambda local_path, file_identifier, sha256, metadata: new_objects.append(
262
- dict(
263
- local_path=local_path,
264
- file_identifier=file_identifier,
265
- sha256=sha256,
266
- metadata=metadata,
267
- )
268
- )
269
- )
270
-
271
- modified_objects = []
272
- handle_modified_object = lambda local_path, file_identifier, new_sha256, old_sha256, metadata: modified_objects.append(
273
- dict(
274
- local_path=local_path,
275
- file_identifier=file_identifier,
276
- new_sha256=new_sha256,
277
- old_sha256=old_sha256,
278
- metadata=metadata,
279
- )
280
- )
281
-
282
- download_dir = "~/.objaverse-tests"
283
- base_download_dir = os.path.join(download_dir, "github")
284
- fs, path = fsspec.core.url_to_fs(base_download_dir)
285
- fs.makedirs(path, exist_ok=True)
286
-
287
- shutil.rmtree(os.path.join(path, "repos"), ignore_errors=True)
288
- out = github_downloader._process_repo(
289
- repo_id="mattdeitke/objaverse-xl-test-files",
290
- fs=fs,
291
- base_dir=path,
292
- save_repo_format=None,
293
- expected_objects={
294
- "https://github.com/mattdeitke/objaverse-xl-test-files/blob/6928b08a2501aa7a4a4aabac1f888b66e7782056/example.fbx": "7037575f47816118e5a34e7c0da9927e1be7be3f5b4adfac337710822eb50fa9<modified>",
295
- "https://github.com/mattdeitke/objaverse-xl-test-files/blob/6928b08a2501aa7a4a4aabac1f888b66e7782056/example.glb": "04e6377317d6818e32c5cbd1951e76deb3641bbf4f6db6933046221d5fbf1c5c",
296
- "https://github.com/mattdeitke/objaverse-xl-test-files/blob/6928b08a2501aa7a4a4aabac1f888b66e7782056/example.obj": "d2b9a5d7c47dc93526082c9b630157ab6bce4fd8669610d942176f4a36444e71",
297
- },
298
- handle_found_object=handle_found_object,
299
- handle_modified_object=handle_modified_object,
300
- handle_missing_object=handle_missing_object,
301
- handle_new_object=handle_new_object,
302
- commit_hash="6928b08a2501aa7a4a4aabac1f888b66e7782056",
303
- )
304
-
305
- assert len(out) == 0
306
- assert len(found_objects) == 2
307
- assert len(missing_objects) == 0
308
- assert len(new_objects) == 0
309
- assert len(modified_objects) == 1
310
-
311
-
312
- def test_github_handle_missing_object():
313
- github_downloader = GitHubDownloader()
314
- found_objects = []
315
- handle_found_object = (
316
- lambda local_path, file_identifier, sha256, metadata: found_objects.append(
317
- dict(
318
- local_path=local_path,
319
- file_identifier=file_identifier,
320
- sha256=sha256,
321
- metadata=metadata,
322
- )
323
- )
324
- )
325
-
326
- missing_objects = []
327
- handle_missing_object = (
328
- lambda file_identifier, sha256, metadata: missing_objects.append(
329
- dict(
330
- file_identifier=file_identifier,
331
- sha256=sha256,
332
- metadata=metadata,
333
- )
334
- )
335
- )
336
-
337
- new_objects = []
338
- handle_new_object = (
339
- lambda local_path, file_identifier, sha256, metadata: new_objects.append(
340
- dict(
341
- local_path=local_path,
342
- file_identifier=file_identifier,
343
- sha256=sha256,
344
- metadata=metadata,
345
- )
346
- )
347
- )
348
-
349
- modified_objects = []
350
- handle_modified_object = lambda local_path, file_identifier, new_sha256, old_sha256, metadata: modified_objects.append(
351
- dict(
352
- local_path=local_path,
353
- file_identifier=file_identifier,
354
- new_sha256=new_sha256,
355
- old_sha256=old_sha256,
356
- metadata=metadata,
357
- )
358
- )
359
-
360
- download_dir = "~/.objaverse-tests"
361
- base_download_dir = os.path.join(download_dir, "github")
362
- fs, path = fsspec.core.url_to_fs(base_download_dir)
363
- fs.makedirs(path, exist_ok=True)
364
-
365
- shutil.rmtree(os.path.join(path, "repos"), ignore_errors=True)
366
- out = github_downloader._process_repo(
367
- repo_id="mattdeitke/objaverse-xl-test-files",
368
- fs=fs,
369
- base_dir=path,
370
- save_repo_format=None,
371
- expected_objects={
372
- "https://github.com/mattdeitke/objaverse-xl-test-files/blob/6928b08a2501aa7a4a4aabac1f888b66e7782056/example.fbx": "7037575f47816118e5a34e7c0da9927e1be7be3f5b4adfac337710822eb50fa9",
373
- "https://github.com/mattdeitke/objaverse-xl-test-files/blob/6928b08a2501aa7a4a4aabac1f888b66e7782056/example-2.fbx": "<fake-missing-object>",
374
- "https://github.com/mattdeitke/objaverse-xl-test-files/blob/6928b08a2501aa7a4a4aabac1f888b66e7782056/example.glb": "04e6377317d6818e32c5cbd1951e76deb3641bbf4f6db6933046221d5fbf1c5c",
375
- "https://github.com/mattdeitke/objaverse-xl-test-files/blob/6928b08a2501aa7a4a4aabac1f888b66e7782056/example.obj": "d2b9a5d7c47dc93526082c9b630157ab6bce4fd8669610d942176f4a36444e71",
376
- },
377
- handle_found_object=handle_found_object,
378
- handle_modified_object=handle_modified_object,
379
- handle_missing_object=handle_missing_object,
380
- handle_new_object=handle_new_object,
381
- commit_hash="6928b08a2501aa7a4a4aabac1f888b66e7782056",
382
- )
383
-
384
- assert len(out) == 0
385
- assert len(found_objects) == 3
386
- assert len(missing_objects) == 1
387
- assert len(new_objects) == 0
388
- assert len(modified_objects) == 0
389
-
390
-
391
- def test_github_handle_missing_object_2():
392
- github_downloader = GitHubDownloader()
393
- found_objects = []
394
- handle_found_object = (
395
- lambda local_path, file_identifier, sha256, metadata: found_objects.append(
396
- dict(
397
- local_path=local_path,
398
- file_identifier=file_identifier,
399
- sha256=sha256,
400
- metadata=metadata,
401
- )
402
- )
403
- )
404
-
405
- missing_objects = []
406
- handle_missing_object = (
407
- lambda file_identifier, sha256, metadata: missing_objects.append(
408
- dict(
409
- file_identifier=file_identifier,
410
- sha256=sha256,
411
- metadata=metadata,
412
- )
413
- )
414
- )
415
-
416
- new_objects = []
417
- handle_new_object = (
418
- lambda local_path, file_identifier, sha256, metadata: new_objects.append(
419
- dict(
420
- local_path=local_path,
421
- file_identifier=file_identifier,
422
- sha256=sha256,
423
- metadata=metadata,
424
- )
425
- )
426
- )
427
-
428
- modified_objects = []
429
- handle_modified_object = lambda local_path, file_identifier, new_sha256, old_sha256, metadata: modified_objects.append(
430
- dict(
431
- local_path=local_path,
432
- file_identifier=file_identifier,
433
- new_sha256=new_sha256,
434
- old_sha256=old_sha256,
435
- metadata=metadata,
436
- )
437
- )
438
-
439
- download_dir = "~/.objaverse-tests"
440
- base_download_dir = os.path.join(download_dir, "github")
441
- fs, path = fsspec.core.url_to_fs(base_download_dir)
442
- fs.makedirs(path, exist_ok=True)
443
-
444
- shutil.rmtree(os.path.join(path, "repos"), ignore_errors=True)
445
- out = github_downloader._process_repo(
446
- repo_id="mattdeitke/objaverse-xl-test-files-does-not-exist",
447
- fs=fs,
448
- base_dir=path,
449
- save_repo_format=None,
450
- expected_objects={
451
- "https://github.com/mattdeitke/objaverse-xl-test-files/blob/6928b08a2501aa7a4a4aabac1f888b66e7782056/example.fbx": "7037575f47816118e5a34e7c0da9927e1be7be3f5b4adfac337710822eb50fa9",
452
- "https://github.com/mattdeitke/objaverse-xl-test-files/blob/6928b08a2501aa7a4a4aabac1f888b66e7782056/example-2.fbx": "<fake-missing-object>",
453
- "https://github.com/mattdeitke/objaverse-xl-test-files/blob/6928b08a2501aa7a4a4aabac1f888b66e7782056/example.glb": "04e6377317d6818e32c5cbd1951e76deb3641bbf4f6db6933046221d5fbf1c5c",
454
- "https://github.com/mattdeitke/objaverse-xl-test-files/blob/6928b08a2501aa7a4a4aabac1f888b66e7782056/example.obj": "d2b9a5d7c47dc93526082c9b630157ab6bce4fd8669610d942176f4a36444e71",
455
- },
456
- handle_found_object=handle_found_object,
457
- handle_modified_object=handle_modified_object,
458
- handle_missing_object=handle_missing_object,
459
- handle_new_object=handle_new_object,
460
- commit_hash="6928b08a2501aa7a4a4aabac1f888b66e7782056",
461
- )
462
-
463
- assert len(out) == 0
464
- assert len(found_objects) == 0
465
- assert len(missing_objects) == 4
466
- assert len(new_objects) == 0
467
- assert len(modified_objects) == 0
468
-
469
-
470
- def test_github_download_cache():
471
- github_downloader = GitHubDownloader()
472
- objects = pd.DataFrame(
473
- [
474
- {
475
- "fileIdentifier": "https://github.com/mattdeitke/objaverse-xl-test-files/blob/6928b08a2501aa7a4a4aabac1f888b66e7782056/example.fbx",
476
- "license": None,
477
- "sha256": "7037575f47816118e5a34e7c0da9927e1be7be3f5b4adfac337710822eb50fa9",
478
- },
479
- {
480
- "fileIdentifier": "https://github.com/mattdeitke/objaverse-xl-test-files/blob/6928b08a2501aa7a4a4aabac1f888b66e7782056/example.glb",
481
- "license": None,
482
- "sha256": "04e6377317d6818e32c5cbd1951e76deb3641bbf4f6db6933046221d5fbf1c5c",
483
- },
484
- {
485
- "fileIdentifier": "https://github.com/mattdeitke/objaverse-xl-test-files/blob/6928b08a2501aa7a4a4aabac1f888b66e7782056/example.obj",
486
- "license": None,
487
- "sha256": "d2b9a5d7c47dc93526082c9b630157ab6bce4fd8669610d942176f4a36444e71",
488
- },
489
- ]
490
- )
491
-
492
- # remove the repos directory
493
- for save_repo_format in ["tar", "tar.gz", "zip", "files"]:
494
- repos_dir = "~/.objaverse-tests/github/repos"
495
- shutil.rmtree(os.path.expanduser(repos_dir), ignore_errors=True)
496
-
497
- out = github_downloader.download_objects(
498
- objects=objects,
499
- processes=1,
500
- download_dir="~/.objaverse-tests",
501
- save_repo_format=save_repo_format, # type: ignore
502
- )
503
- assert len(out) == 3
504
-
505
- out = github_downloader.download_objects(
506
- objects=objects,
507
- processes=1,
508
- download_dir="~/.objaverse-tests",
509
- save_repo_format=save_repo_format, # type: ignore
510
- )
511
- assert len(out) == 0
512
-
513
-
514
- def test_annotations():
515
- downloaders = [
516
- GitHubDownloader(),
517
- SketchfabDownloader(),
518
- SmithsonianDownloader(),
519
- ThingiverseDownloader(),
520
- ]
521
-
522
- for downloader in downloaders:
523
- annotations_df = downloader.get_annotations()
524
-
525
- # make sure the columns are
526
- assert set(annotations_df.columns) == set(
527
- ["fileIdentifier", "source", "license", "fileType", "sha256", "metadata"]
528
- )
529
-
530
-
531
- def test_download_objects():
532
- downloaders = [
533
- GitHubDownloader(),
534
- SketchfabDownloader(),
535
- SmithsonianDownloader(),
536
- ThingiverseDownloader(),
537
- ]
538
-
539
- download_dir = "~/.objaverse-tests"
540
-
541
- for downloader in downloaders:
542
- shutil.rmtree(os.path.expanduser(download_dir), ignore_errors=True)
543
-
544
- annotations_df = downloader.get_annotations()
545
-
546
- test_objects = annotations_df.head(n=2)
547
-
548
- out = downloader.download_objects(
549
- objects=test_objects,
550
- download_dir=download_dir,
551
- processes=2,
552
- )
553
- assert isinstance(out, dict), f"{out=}"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
thingiverse/thingiverse-objects.parquet DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:57f0a79e23af18fed59c08efeabbaa8886bfc5240e94119687451190c09d76ab
3
- size 292395520
 
 
 
 
smithsonian/object-metadata.parquet → thingiverse/thingiverse.parquet RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:6233d2bb2096b5ba7792c206b3772831849991a2861b2539e6d1c4dd62a30922
3
- size 312580
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b4603c1e039b1e963f4848af145abae6e15d6f201c8d075b0c29d6939cc3eeee
3
+ size 340534075