Datasets:

Languages:
English
ArXiv:
License:
mattdeitke commited on
Commit
3dc86cc
1 Parent(s): aae8281

convert objaverse 1.0 to use the abstract class

Browse files
Files changed (1) hide show
  1. objaverse_xl/objaverse_v1.py +431 -285
objaverse_xl/objaverse_v1.py CHANGED
@@ -10,100 +10,151 @@ import requests
10
  import pandas as pd
11
  import tempfile
12
  from objaverse_xl.utils import get_file_hash
 
13
 
14
  import fsspec
15
  from loguru import logger
16
  from tqdm import tqdm
17
 
18
 
19
- def load_annotations(download_dir: str = "~/.objaverse") -> pd.DataFrame:
20
- """Load the annotations from the given directory.
21
-
22
- Args:
23
- download_dir (str, optional): The directory to load the annotations from.
24
- Supports all file systems supported by fsspec. Defaults to
25
- "~/.objaverse".
26
-
27
- Returns:
28
- pd.DataFrame: The annotations, which includes the columns "thingId", "fileId",
29
- "filename", and "license".
30
- """
31
- remote_url = "https://huggingface.co/datasets/allenai/objaverse-xl/resolve/main/objaverse_v1/object-metadata.parquet"
32
- download_path = os.path.join(
33
- download_dir, "hf-objaverse-v1", "thingiverse-objects.parquet"
34
- )
35
- fs, path = fsspec.core.url_to_fs(download_path)
36
-
37
- if not fs.exists(path):
38
- fs.makedirs(os.path.dirname(path), exist_ok=True)
39
- logger.info(f"Downloading {remote_url} to {download_path}")
40
- response = requests.get(remote_url)
41
- response.raise_for_status()
42
- with fs.open(path, "wb") as file:
43
- file.write(response.content)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
44
 
45
- # read the file with pandas and fsspec
46
- with fs.open(download_path, "rb") as f:
47
- annotations_df = pd.read_parquet(f)
 
 
48
 
49
- annotations_df["metadata"] = "{}"
 
 
50
 
51
- return annotations_df
 
52
 
 
53
 
54
- def load_full_annotations(
55
- uids: Optional[List[str]] = None,
56
- download_dir: str = "~/.objaverse",
57
- ) -> Dict[str, Any]:
58
- """Load the full metadata of all objects in the dataset.
59
 
60
- Args:
61
- uids: A list of uids with which to load metadata. If None, it loads
62
- the metadata for all uids.
63
- download_dir: The base directory to download the annotations to. Supports all
64
- file systems supported by fsspec. Defaults to "~/.objaverse".
65
 
66
- Returns:
67
- A dictionary of the metadata for each object. The keys are the uids and the
68
- values are the metadata for that object.
69
- """
70
- # make the metadata dir if it doesn't exist
71
- metadata_path = os.path.join(download_dir, "hf-objaverse-v1", "metadata")
72
- fs, _ = fsspec.core.url_to_fs(metadata_path)
73
- fs.makedirs(metadata_path, exist_ok=True)
74
-
75
- # get the dir ids that need to be loaded if only downloading a subset of uids
76
- object_paths = _load_object_paths(download_dir=download_dir)
77
- dir_ids = (
78
- {object_paths[uid].split("/")[1] for uid in uids}
79
- if uids is not None
80
- else {f"{i // 1000:03d}-{i % 1000:03d}" for i in range(160)}
81
- )
82
-
83
- # get the existing metadata files
84
- existing_metadata_files = fs.glob(
85
- os.path.join(metadata_path, "*.json.gz"), refresh=True
86
- )
87
- existing_dir_ids = {
88
- file.split("/")[-1].split(".")[0]
89
- for file in existing_metadata_files
90
- if file.endswith(".json.gz") # note partial files end with .json.gz.tmp
91
- }
92
- downloaded_dir_ids = existing_dir_ids.intersection(dir_ids)
93
- logger.info(f"Found {len(downloaded_dir_ids)} metadata files already downloaded")
94
-
95
- # download the metadata from the missing dir_ids
96
- dir_ids_to_download = dir_ids - existing_dir_ids
97
- logger.info(f"Downloading {len(dir_ids_to_download)} metadata files")
98
-
99
- # download the metadata file if it doesn't exist
100
- if len(dir_ids_to_download) > 0:
101
- for i_id in tqdm(dir_ids_to_download, desc="Downloading metadata files"):
102
- # get the path to the json file
103
- path = os.path.join(metadata_path, f"{i_id}.json.gz")
104
 
105
- # get the url to the remote json file
106
- hf_url = f"https://huggingface.co/datasets/allenai/objaverse/resolve/main/metadata/{i_id}.json.gz"
 
 
 
107
 
108
  # download the file to a tmp path to avoid partial downloads on interruption
109
  tmp_path = f"{path}.tmp"
@@ -112,235 +163,330 @@ def load_full_annotations(
112
  f.write(response.read())
113
  fs.rename(tmp_path, path)
114
 
115
- out = {}
116
- for i_id in tqdm(dir_ids, desc="Reading metadata files"):
117
- # get the path to the json file
118
- path = os.path.join(metadata_path, f"{i_id}.json.gz")
119
-
120
- # read the json file of the metadata chunk
121
  with fs.open(path, "rb") as f:
122
  with gzip.GzipFile(fileobj=f) as gfile:
123
  content = gfile.read()
124
- data = json.loads(content)
125
-
126
- # filter the data to only include the uids we want
127
- if uids is not None:
128
- data = {uid: data[uid] for uid in uids if uid in data}
129
-
130
- # add the data to the out dict
131
- out.update(data)
132
-
133
- return out
134
-
135
-
136
- annotations = load_annotations(download_dir="~/.objaverse-temp-400")
137
-
138
-
139
- def _load_object_paths(download_dir: str) -> Dict[str, str]:
140
- """Load the object paths from the dataset.
141
-
142
- The object paths specify the location of where the object is located
143
- in the Hugging Face repo.
144
-
145
- Returns:
146
- A dictionary mapping the uid to the object path.
147
- """
148
- object_paths_file = "object-paths.json.gz"
149
- local_path = os.path.join(download_dir, "hf-objaverse-v1", object_paths_file)
150
-
151
- # download the object_paths file if it doesn't exist
152
- fs, path = fsspec.core.url_to_fs(local_path)
153
- if not fs.exists(path):
154
- hf_url = f"https://huggingface.co/datasets/allenai/objaverse/resolve/main/{object_paths_file}"
155
- fs.makedirs(os.path.dirname(path), exist_ok=True)
156
-
157
- # download the file to a tmp path to avoid partial downloads on interruption
158
- tmp_path = f"{path}.tmp"
159
- with fs.open(tmp_path, "wb") as f:
160
- with urllib.request.urlopen(hf_url) as response:
161
- f.write(response.read())
162
- fs.rename(tmp_path, path)
163
-
164
- # read the object_paths
165
- with fs.open(path, "rb") as f:
166
- with gzip.GzipFile(fileobj=f) as gfile:
167
- content = gfile.read()
168
- object_paths = json.loads(content)
169
-
170
- return object_paths
171
-
172
-
173
- def load_uids(download_dir: str = "~/.objaverse") -> List[str]:
174
- """Load the uids from the dataset.
175
-
176
- Returns:
177
- A list of all the UIDs from the dataset.
178
- """
179
- return list(_load_object_paths(download_dir=download_dir).keys())
180
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
181
 
182
- def _download_object(
183
- uid: str,
184
- hf_object_path: str,
185
- download_dir: str,
186
- ) -> Tuple[str, str]:
187
- """Download the object for the given uid.
 
 
 
 
 
 
 
 
 
 
188
 
189
- Args:
190
- uid: The uid of the object to load.
191
- hf_object_path: The path to the object in the Hugging Face repo. Here, hf_object_path
192
- is the part that comes after "main" in the Hugging Face repo url:
193
- https://huggingface.co/datasets/allenai/objaverse/resolve/main/{hf_object_path}
194
- download_dir: The base directory to download the object to. Supports all
195
- file systems supported by fsspec. Defaults to "~/.objaverse".
196
 
197
- Returns:
198
- A tuple of the uid and the path to where the downloaded object.
199
- """
200
- hf_url = f"https://huggingface.co/datasets/allenai/objaverse/resolve/main/{hf_object_path}"
201
-
202
- filename = os.path.join(download_dir, "hf-objaverse-v1", hf_object_path)
203
- fs, path = fsspec.core.url_to_fs(filename)
204
-
205
- # download the file
206
- fs.makedirs(os.path.dirname(path), exist_ok=True)
207
- tmp_path = f"{path}.tmp"
208
- with fs.open(tmp_path, "wb") as file:
209
- with urllib.request.urlopen(hf_url) as response:
210
- file.write(response.read())
211
-
212
- fs.rename(tmp_path, path)
213
-
214
- return uid, filename
215
-
216
-
217
- def _parallel_download_object(args):
218
- # workaround since starmap doesn't work well with tqdm
219
- return _download_object(*args)
220
-
221
-
222
- def load_objects(
223
- uids: List[str],
224
- download_processes: int = 1,
225
- download_dir: str = "~/.objaverse",
226
- ) -> Dict[str, str]:
227
- """Return the path to the object files for the given uids.
228
-
229
- If the object is not already downloaded, it will be downloaded.
230
-
231
- Args:
232
- uids: A list of uids.
233
- download_processes: The number of processes to use to download the objects.
234
-
235
- Returns:
236
- A dictionary mapping the object uid to the local path of where the object
237
- downloaded.
238
- """
239
- uids_set = set(uids)
240
- hf_object_paths = _load_object_paths(download_dir=download_dir)
241
-
242
- versioned_dirname = os.path.join(download_dir, "hf-objaverse-v1")
243
- fs, path = fsspec.core.url_to_fs(versioned_dirname)
244
-
245
- # Get the existing file paths. This is much faster than calling fs.exists() for each
246
- # file. `glob()` is like walk, but returns a list of files instead of the nested
247
- # directory structure. glob() is also faster than find() / walk() since it doesn't
248
- # need to traverse the entire directory structure.
249
- existing_file_paths = fs.glob(
250
- os.path.join(path, "glbs", "*", "*.glb"), refresh=True
251
- )
252
- existing_uids = {
253
- file.split("/")[-1].split(".")[0]
254
- for file in existing_file_paths
255
- if file.endswith(".glb") # note partial files end with .glb.tmp
256
- }
257
-
258
- # add the existing downloaded uids to the return dict
259
- out = {}
260
- already_downloaded_uids = uids_set.intersection(existing_uids)
261
- for uid in already_downloaded_uids:
262
- hf_object_path = hf_object_paths[uid]
263
- fs_abs_object_path = os.path.join(versioned_dirname, hf_object_path)
264
- out[uid] = fs_abs_object_path
265
-
266
- logger.info(f"Found {len(already_downloaded_uids)} objects already downloaded")
267
-
268
- # get the uids that need to be downloaded
269
- remaining_uids = uids_set - existing_uids
270
- uids_to_download = []
271
- for uid in remaining_uids:
272
- if uid not in hf_object_paths:
273
- logger.error(f"Could not find object with uid {uid}. Skipping it.")
274
- continue
275
- uids_to_download.append((uid, hf_object_paths[uid]))
276
-
277
- logger.info(f"Downloading {len(uids_to_download)} new objects")
278
-
279
- # check if all objects are already downloaded
280
- if len(uids_to_download) == 0:
281
- return out
282
 
283
- if download_processes == 1:
284
- # iteratively download the objects
285
- for uid, hf_object_path in tqdm(uids_to_download):
286
- uid, local_path = _download_object(
287
- uid=uid, hf_object_path=hf_object_path, download_dir=download_dir
288
- )
289
- out[uid] = local_path
290
- else:
291
  args = [
292
- (uid, hf_object_path, download_dir)
293
- for uid, hf_object_path in uids_to_download
 
 
 
 
 
 
 
 
294
  ]
295
 
296
  # download the objects in parallel
297
- with Pool(download_processes) as pool:
298
  new_object_downloads = list(
299
  tqdm(
300
- pool.imap_unordered(_parallel_download_object, args),
301
  total=len(args),
302
  )
303
  )
304
 
305
- for uid, local_path in new_object_downloads:
306
- out[uid] = local_path
307
-
308
- return out
309
 
 
310
 
311
- def load_lvis_annotations(download_dir: str = "~/.objaverse") -> Dict[str, List[str]]:
312
- """Load the LVIS annotations.
 
 
 
313
 
314
- If the annotations are not already downloaded, they will be downloaded.
315
 
316
- Args:
317
- download_dir: The base directory to download the annotations to. Supports all
318
- file systems supported by fsspec. Defaults to "~/.objaverse".
319
 
320
- Returns:
321
- A dictionary mapping the LVIS category to the list of uids in that category.
322
- """
323
- hf_url = "https://huggingface.co/datasets/allenai/objaverse/resolve/main/lvis-annotations.json.gz"
324
 
325
- download_path = os.path.join(
326
- download_dir, "hf-objaverse-v1", "lvis-annotations.json.gz"
327
- )
328
 
329
- # use fsspec
330
- fs, path = fsspec.core.url_to_fs(download_path)
331
- if not fs.exists(path):
332
- # make dir if it doesn't exist
333
- fs.makedirs(os.path.dirname(path), exist_ok=True)
334
 
335
- # download the file
336
- with fs.open(path, "wb") as f:
337
- with urllib.request.urlopen(hf_url) as response:
338
- f.write(response.read())
339
 
340
- # load the gzip file
341
- with fs.open(path, "rb") as f:
342
- with gzip.GzipFile(fileobj=f) as gfile:
343
- content = gfile.read()
344
- data = json.loads(content)
345
 
346
- return data
 
10
  import pandas as pd
11
  import tempfile
12
  from objaverse_xl.utils import get_file_hash
13
+ from objaverse_xl.abstract import ObjaverseSource
14
 
15
  import fsspec
16
  from loguru import logger
17
  from tqdm import tqdm
18
 
19
 
20
+ class Sketchfab(ObjaverseSource):
21
+ """A class for downloading and processing Objaverse 1.0."""
22
+
23
+ def load_annotations(self, download_dir: str = "~/.objaverse") -> pd.DataFrame:
24
+ """Load the annotations from the given directory.
25
+
26
+ Args:
27
+ download_dir (str, optional): The directory to load the annotations from.
28
+ Supports all file systems supported by fsspec. Defaults to
29
+ "~/.objaverse".
30
+
31
+ Returns:
32
+ pd.DataFrame: The annotations, which includes the columns "thingId", "fileId",
33
+ "filename", and "license".
34
+ """
35
+ remote_url = "https://huggingface.co/datasets/allenai/objaverse-xl/resolve/main/objaverse_v1/object-metadata.parquet"
36
+ download_path = os.path.join(
37
+ download_dir, "hf-objaverse-v1", "thingiverse-objects.parquet"
38
+ )
39
+ fs, path = fsspec.core.url_to_fs(download_path)
40
+
41
+ if not fs.exists(path):
42
+ fs.makedirs(os.path.dirname(path), exist_ok=True)
43
+ logger.info(f"Downloading {remote_url} to {download_path}")
44
+ response = requests.get(remote_url)
45
+ response.raise_for_status()
46
+ with fs.open(path, "wb") as file:
47
+ file.write(response.content)
48
+
49
+ # read the file with pandas and fsspec
50
+ with fs.open(download_path, "rb") as f:
51
+ annotations_df = pd.read_parquet(f)
52
+
53
+ annotations_df["metadata"] = "{}"
54
+
55
+ return annotations_df
56
+
57
+ def load_full_annotations(
58
+ self,
59
+ uids: Optional[List[str]] = None,
60
+ download_dir: str = "~/.objaverse",
61
+ ) -> Dict[str, Any]:
62
+ """Load the full metadata of all objects in the dataset.
63
+
64
+ Args:
65
+ uids: A list of uids with which to load metadata. If None, it loads
66
+ the metadata for all uids.
67
+ download_dir: The base directory to download the annotations to. Supports all
68
+ file systems supported by fsspec. Defaults to "~/.objaverse".
69
+
70
+ Returns:
71
+ A dictionary of the metadata for each object. The keys are the uids and the
72
+ values are the metadata for that object.
73
+ """
74
+ # make the metadata dir if it doesn't exist
75
+ metadata_path = os.path.join(download_dir, "hf-objaverse-v1", "metadata")
76
+ fs, _ = fsspec.core.url_to_fs(metadata_path)
77
+ fs.makedirs(metadata_path, exist_ok=True)
78
+
79
+ # get the dir ids that need to be loaded if only downloading a subset of uids
80
+ object_paths = self._load_object_paths(download_dir=download_dir)
81
+ dir_ids = (
82
+ {object_paths[uid].split("/")[1] for uid in uids}
83
+ if uids is not None
84
+ else {f"{i // 1000:03d}-{i % 1000:03d}" for i in range(160)}
85
+ )
86
+
87
+ # get the existing metadata files
88
+ existing_metadata_files = fs.glob(
89
+ os.path.join(metadata_path, "*.json.gz"), refresh=True
90
+ )
91
+ existing_dir_ids = {
92
+ file.split("/")[-1].split(".")[0]
93
+ for file in existing_metadata_files
94
+ if file.endswith(".json.gz") # note partial files end with .json.gz.tmp
95
+ }
96
+ downloaded_dir_ids = existing_dir_ids.intersection(dir_ids)
97
+ logger.info(
98
+ f"Found {len(downloaded_dir_ids)} metadata files already downloaded"
99
+ )
100
+
101
+ # download the metadata from the missing dir_ids
102
+ dir_ids_to_download = dir_ids - existing_dir_ids
103
+ logger.info(f"Downloading {len(dir_ids_to_download)} metadata files")
104
+
105
+ # download the metadata file if it doesn't exist
106
+ if len(dir_ids_to_download) > 0:
107
+ for i_id in tqdm(dir_ids_to_download, desc="Downloading metadata files"):
108
+ # get the path to the json file
109
+ path = os.path.join(metadata_path, f"{i_id}.json.gz")
110
+
111
+ # get the url to the remote json file
112
+ hf_url = f"https://huggingface.co/datasets/allenai/objaverse/resolve/main/metadata/{i_id}.json.gz"
113
+
114
+ # download the file to a tmp path to avoid partial downloads on interruption
115
+ tmp_path = f"{path}.tmp"
116
+ with fs.open(tmp_path, "wb") as f:
117
+ with urllib.request.urlopen(hf_url) as response:
118
+ f.write(response.read())
119
+ fs.rename(tmp_path, path)
120
+
121
+ out = {}
122
+ for i_id in tqdm(dir_ids, desc="Reading metadata files"):
123
+ # get the path to the json file
124
+ path = os.path.join(metadata_path, f"{i_id}.json.gz")
125
 
126
+ # read the json file of the metadata chunk
127
+ with fs.open(path, "rb") as f:
128
+ with gzip.GzipFile(fileobj=f) as gfile:
129
+ content = gfile.read()
130
+ data = json.loads(content)
131
 
132
+ # filter the data to only include the uids we want
133
+ if uids is not None:
134
+ data = {uid: data[uid] for uid in uids if uid in data}
135
 
136
+ # add the data to the out dict
137
+ out.update(data)
138
 
139
+ return out
140
 
141
+ def _load_object_paths(self, download_dir: str) -> Dict[str, str]:
142
+ """Load the object paths from the dataset.
 
 
 
143
 
144
+ The object paths specify the location of where the object is located in the
145
+ Hugging Face repo.
 
 
 
146
 
147
+ Returns:
148
+ A dictionary mapping the uid to the object path.
149
+ """
150
+ object_paths_file = "object-paths.json.gz"
151
+ local_path = os.path.join(download_dir, "hf-objaverse-v1", object_paths_file)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
152
 
153
+ # download the object_paths file if it doesn't exist
154
+ fs, path = fsspec.core.url_to_fs(local_path)
155
+ if not fs.exists(path):
156
+ hf_url = f"https://huggingface.co/datasets/allenai/objaverse/resolve/main/{object_paths_file}"
157
+ fs.makedirs(os.path.dirname(path), exist_ok=True)
158
 
159
  # download the file to a tmp path to avoid partial downloads on interruption
160
  tmp_path = f"{path}.tmp"
 
163
  f.write(response.read())
164
  fs.rename(tmp_path, path)
165
 
166
+ # read the object_paths
 
 
 
 
 
167
  with fs.open(path, "rb") as f:
168
  with gzip.GzipFile(fileobj=f) as gfile:
169
  content = gfile.read()
170
+ object_paths = json.loads(content)
171
+
172
+ return object_paths
173
+
174
+ def load_uids(self, download_dir: str = "~/.objaverse") -> List[str]:
175
+ """Load the uids from the dataset.
176
+
177
+ Returns:
178
+ A list of all the UIDs from the dataset.
179
+ """
180
+ return list(self._load_object_paths(download_dir=download_dir).keys())
181
+
182
+ def _download_object(
183
+ self,
184
+ file_identifier: str,
185
+ hf_object_path: str,
186
+ download_dir: Optional[str],
187
+ expected_sha256: str,
188
+ handle_found_object: Optional[Callable] = None,
189
+ handle_modified_object: Optional[Callable] = None,
190
+ handle_missing_object: Optional[Callable] = None,
191
+ ) -> Tuple[str, Optional[str]]:
192
+ """Download the object for the given uid.
193
+
194
+ Args:
195
+ file_identifier: The file identifier of the object.
196
+ hf_object_path: The path to the object in the Hugging Face repo. Here,
197
+ hf_object_path is the part that comes after "main" in the Hugging Face
198
+ repo url:
199
+ https://huggingface.co/datasets/allenai/objaverse/resolve/main/{hf_object_path}
200
+ download_dir: The base directory to download the object to. Supports all
201
+ file systems supported by fsspec. Defaults to "~/.objaverse".
202
+ expected_objects (str): The expected SHA256 of the contents of the
203
+ downloaded object.
204
+ handle_found_object (Optional[Callable], optional): Called when an object is
205
+ successfully found and downloaded. Here, the object has the same sha256
206
+ as the one that was downloaded with Objaverse-XL. If None, the object
207
+ will be downloaded, but nothing will be done with it. Args for the
208
+ function include:
209
+ - local_path (str): Local path to the downloaded 3D object.
210
+ - file_identifier (str): GitHub URL of the 3D object.
211
+ - sha256 (str): SHA256 of the contents of the 3D object.
212
+ - metadata (Dict[str, Any]): Metadata about the 3D object, including the
213
+ GitHub organization and repo names.
214
+ Return is not used. Defaults to None.
215
+ handle_modified_object (Optional[Callable], optional): Called when a
216
+ modified object is found and downloaded. Here, the object is
217
+ successfully downloaded, but it has a different sha256 than the one that
218
+ was downloaded with Objaverse-XL. This is not expected to happen very
219
+ often, because the same commit hash is used for each repo. If None, the
220
+ object will be downloaded, but nothing will be done with it. Args for
221
+ the function include:
222
+ - local_path (str): Local path to the downloaded 3D object.
223
+ - file_identifier (str): GitHub URL of the 3D object.
224
+ - new_sha256 (str): SHA256 of the contents of the newly downloaded 3D
225
+ object.
226
+ - old_sha256 (str): Expected SHA256 of the contents of the 3D object as
227
+ it was when it was downloaded with Objaverse-XL.
228
+ - metadata (Dict[str, Any]): Metadata about the 3D object, including the
229
+ GitHub organization and repo names.
230
+ Return is not used. Defaults to None.
231
+ handle_missing_object (Optional[Callable], optional): Called when an object
232
+ that is in Objaverse-XL is not found. Here, it is likely that the
233
+ repository was deleted or renamed. If None, nothing will be done with
234
+ the missing object. Args for the function include:
235
+ - file_identifier (str): GitHub URL of the 3D object.
236
+ - sha256 (str): SHA256 of the contents of the original 3D object.
237
+ - metadata (Dict[str, Any]): Metadata about the 3D object, including the
238
+ GitHub organization and repo names.
239
+ Return is not used. Defaults to None.
240
+
241
+ Returns:
242
+ A tuple of the uid and the path to where the downloaded object. If
243
+ download_dir is None, the path will be None.
244
+ """
245
+ hf_url = f"https://huggingface.co/datasets/allenai/objaverse/resolve/main/{hf_object_path}"
246
+
247
+ with tempfile.TemporaryDirectory() as temp_dir:
248
+ # download the file locally
249
+ temp_path = os.path.join(temp_dir, hf_object_path)
250
+ os.makedirs(os.path.dirname(temp_path), exist_ok=True)
251
+ temp_path_tmp = f"{temp_path}.tmp"
252
+ with open(temp_path_tmp, "wb") as file:
253
+ with urllib.request.urlopen(hf_url) as response:
254
+ file.write(response.read())
255
+ os.rename(temp_path_tmp, temp_path)
256
+
257
+ # get the sha256 of the downloaded file
258
+ sha256 = get_file_hash(temp_path)
259
+
260
+ if sha256 == expected_sha256:
261
+ if handle_found_object is not None:
262
+ handle_found_object(
263
+ local_path=temp_path,
264
+ file_identifier=file_identifier,
265
+ sha256=sha256,
266
+ metadata=dict(),
267
+ )
268
+ else:
269
+ if handle_modified_object is not None:
270
+ handle_modified_object(
271
+ local_path=temp_path,
272
+ file_identifier=file_identifier,
273
+ new_sha256=sha256,
274
+ old_sha256=expected_sha256,
275
+ metadata=dict(),
276
+ )
277
+
278
+ if download_dir is not None:
279
+ filename = os.path.join(download_dir, "hf-objaverse-v1", hf_object_path)
280
+ fs, path = fsspec.core.url_to_fs(filename)
281
+ fs.makedirs(os.path.dirname(path), exist_ok=True)
282
+ fs.put(temp_path, path)
283
+ else:
284
+ path = None
285
+
286
+ return file_identifier, path
287
+
288
+ def _parallel_download_object(self, args):
289
+ # workaround since starmap doesn't work well with tqdm
290
+ return self._download_object(*args)
291
+
292
+ def _get_uid(self, item: pd.Series) -> str:
293
+ file_identifier = item["fileIdentifier"]
294
+ return file_identifier.split("/")[-1]
295
+
296
+ def uid_to_file_identifier(self, uid: str) -> str:
297
+ """Convert the uid to the file identifier.
298
+
299
+ Args:
300
+ uid (str): The uid of the object.
301
+
302
+ Returns:
303
+ The file identifier of the object.
304
+ """
305
+ return f"https://sketchfab.com/3d-models/{uid}"
306
+
307
+ def file_identifier_to_uid(self, file_identifier: str) -> str:
308
+ """Convert the file identifier to the uid.
309
+
310
+ Args:
311
+ file_identifier (str): The file identifier of the object.
312
+
313
+ Returns:
314
+ The uid of the object.
315
+ """
316
+ return file_identifier.split("/")[-1]
317
+
318
+ def download_objects(
319
+ self,
320
+ objects: pd.DataFrame,
321
+ download_dir: Optional[str] = "~/.objaverse",
322
+ processes: Optional[int] = None,
323
+ handle_found_object: Optional[Callable] = None,
324
+ handle_modified_object: Optional[Callable] = None,
325
+ handle_missing_object: Optional[Callable] = None,
326
+ **kwargs,
327
+ ) -> Dict[str, str]:
328
+ """Return the path to the object files for the given uids.
329
+
330
+ If the object is not already downloaded, it will be downloaded.
331
+
332
+ Args:
333
+ objects (pd.DataFrame): Objects to download. Must have columns for
334
+ the object "fileIdentifier" and "sha256". Use the `load_annotations`
335
+ function to get the metadata.
336
+ processes (int, optional): The number of processes to use to download
337
+ the objects. Defaults to 1.
338
+ download_dir (Optional[str], optional): The base directory to download the
339
+ object to. Supports all file systems supported by fsspec. If None, the
340
+ objects will be removed after downloading. Defaults to "~/.objaverse".
341
+
342
+ Returns:
343
+ A dictionary mapping the object fileIdentifier to the local path of where
344
+ the object downloaded.
345
+ """
346
+ hf_object_paths = self._load_object_paths(
347
+ download_dir=download_dir if download_dir is not None else "~/.objaverse"
348
+ )
349
+
350
+ # make a copy of the objects so we don't modify the original
351
+ objects = objects.copy()
352
+ objects["uid"] = objects.apply(self._get_uid, axis=1)
353
+ uids_to_sha256 = dict(zip(objects["uid"], objects["sha256"]))
354
+ uids_set = set(uids_to_sha256.keys())
355
+
356
+ # create a new df where the uids are the index
357
+ objects_uid_index = objects.set_index("uid")
358
+
359
+ out = {}
360
+ objects_to_download = []
361
+ if download_dir is None:
362
+ for _, item in objects.iterrows():
363
+ uid = item["uid"]
364
+ if uid not in hf_object_paths:
365
+ logger.error(f"Could not find object with uid {uid}!")
366
+ if handle_missing_object is not None:
367
+ handle_missing_object(
368
+ file_identifier=item["fileIdentifier"],
369
+ sha256=item["sha256"],
370
+ metadata=dict(),
371
+ )
372
+ continue
373
+ objects_to_download.append(
374
+ (item["fileIdentifier"], hf_object_paths[uid], item["sha256"])
375
+ )
376
+ else:
377
+ versioned_dirname = os.path.join(download_dir, "hf-objaverse-v1")
378
+ fs, path = fsspec.core.url_to_fs(versioned_dirname)
379
+
380
+ # Get the existing file paths. This is much faster than calling fs.exists() for each
381
+ # file. `glob()` is like walk, but returns a list of files instead of the nested
382
+ # directory structure. glob() is also faster than find() / walk() since it doesn't
383
+ # need to traverse the entire directory structure.
384
+ existing_file_paths = fs.glob(
385
+ os.path.join(path, "glbs", "*", "*.glb"), refresh=True
386
+ )
387
+ existing_uids = {
388
+ file.split("/")[-1].split(".")[0]
389
+ for file in existing_file_paths
390
+ if file.endswith(".glb") # note partial files end with .glb.tmp
391
+ }
392
+
393
+ # add the existing downloaded uids to the return dict
394
+ already_downloaded_uids = uids_set.intersection(existing_uids)
395
+ for uid in already_downloaded_uids:
396
+ hf_object_path = hf_object_paths[uid]
397
+ fs_abs_object_path = os.path.join(versioned_dirname, hf_object_path)
398
+ out[self.uid_to_file_identifier(uid)] = fs_abs_object_path
399
+
400
+ logger.info(
401
+ f"Found {len(already_downloaded_uids)} objects already downloaded"
402
+ )
403
 
404
+ # get the uids that need to be downloaded
405
+ remaining_uids = uids_set - existing_uids
406
+ for uid in remaining_uids:
407
+ item = objects_uid_index.loc[uid]
408
+ if uid not in hf_object_paths:
409
+ logger.error(f"Could not find object with uid {uid}. Skipping it.")
410
+ if handle_missing_object is not None:
411
+ handle_missing_object(
412
+ file_identifier=item["fileIdentifier"],
413
+ sha256=item["sha256"],
414
+ metadata=dict(),
415
+ )
416
+ continue
417
+ objects_to_download.append(
418
+ (item["fileIdentifier"], hf_object_paths[uid], item["sha256"])
419
+ )
420
 
421
+ logger.info(f"Downloading {len(objects_to_download)} new objects")
 
 
 
 
 
 
422
 
423
+ # check if all objects are already downloaded
424
+ if len(objects_to_download) == 0:
425
+ return out
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
426
 
 
 
 
 
 
 
 
 
427
  args = [
428
+ (
429
+ file_identifier,
430
+ hf_object_path,
431
+ download_dir,
432
+ sha256,
433
+ handle_found_object,
434
+ handle_modified_object,
435
+ handle_missing_object,
436
+ )
437
+ for file_identifier, hf_object_path, sha256 in objects_to_download
438
  ]
439
 
440
  # download the objects in parallel
441
+ with Pool(processes) as pool:
442
  new_object_downloads = list(
443
  tqdm(
444
+ pool.imap_unordered(self._parallel_download_object, args),
445
  total=len(args),
446
  )
447
  )
448
 
449
+ for file_identifier, local_path in new_object_downloads:
450
+ out[file_identifier] = local_path
 
 
451
 
452
+ return out
453
 
454
+ def load_lvis_annotations(
455
+ self,
456
+ download_dir: str = "~/.objaverse",
457
+ ) -> Dict[str, List[str]]:
458
+ """Load the LVIS annotations.
459
 
460
+ If the annotations are not already downloaded, they will be downloaded.
461
 
462
+ Args:
463
+ download_dir: The base directory to download the annotations to. Supports all
464
+ file systems supported by fsspec. Defaults to "~/.objaverse".
465
 
466
+ Returns:
467
+ A dictionary mapping the LVIS category to the list of uids in that category.
468
+ """
469
+ hf_url = "https://huggingface.co/datasets/allenai/objaverse/resolve/main/lvis-annotations.json.gz"
470
 
471
+ download_path = os.path.join(
472
+ download_dir, "hf-objaverse-v1", "lvis-annotations.json.gz"
473
+ )
474
 
475
+ # use fsspec
476
+ fs, path = fsspec.core.url_to_fs(download_path)
477
+ if not fs.exists(path):
478
+ # make dir if it doesn't exist
479
+ fs.makedirs(os.path.dirname(path), exist_ok=True)
480
 
481
+ # download the file
482
+ with fs.open(path, "wb") as f:
483
+ with urllib.request.urlopen(hf_url) as response:
484
+ f.write(response.read())
485
 
486
+ # load the gzip file
487
+ with fs.open(path, "rb") as f:
488
+ with gzip.GzipFile(fileobj=f) as gfile:
489
+ content = gfile.read()
490
+ data = json.loads(content)
491
 
492
+ return data