mattdeitke
commited on
Commit
·
11fa1d5
1
Parent(s):
24cab06
add updated smithsonian support
Browse files- objaverse_xl/__init__.py +108 -6
- requirements.txt +4 -1
objaverse_xl/__init__.py
CHANGED
@@ -1,22 +1,42 @@
|
|
|
|
1 |
import os
|
|
|
|
|
|
|
|
|
2 |
|
3 |
import pandas as pd
|
4 |
import requests
|
|
|
|
|
5 |
|
6 |
|
7 |
-
def
|
8 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
9 |
) -> pd.DataFrame:
|
10 |
"""Loads the Smithsonian Object Metadata dataset as a Pandas DataFrame.
|
11 |
|
12 |
Args:
|
13 |
download_dir (str, optional): Directory to download the parquet metadata file.
|
14 |
-
Defaults to "~/.
|
15 |
|
16 |
Returns:
|
17 |
pd.DataFrame: Smithsonian Object Metadata dataset as a Pandas DataFrame with
|
18 |
-
columns for the object "title", "url", "quality",
|
19 |
-
The quality is always Medium and the file_type is always glb.
|
20 |
"""
|
21 |
dirname = os.path.expanduser(os.path.join(download_dir, "smithsonian"))
|
22 |
os.makedirs(dirname, exist_ok=True)
|
@@ -28,4 +48,86 @@ def load_smithsonian_df(
|
|
28 |
with open(filename, "wb") as file:
|
29 |
file.write(response.content)
|
30 |
|
31 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import multiprocessing
|
2 |
import os
|
3 |
+
import uuid
|
4 |
+
from functools import partial
|
5 |
+
from multiprocessing import Pool
|
6 |
+
from typing import Dict, List, Optional
|
7 |
|
8 |
import pandas as pd
|
9 |
import requests
|
10 |
+
from loguru import logger
|
11 |
+
from tqdm import tqdm
|
12 |
|
13 |
|
14 |
+
def get_uid_from_str(string: str) -> str:
|
15 |
+
"""Generates a UUID from a string.
|
16 |
+
|
17 |
+
Args:
|
18 |
+
string (str): String to generate a UUID from.
|
19 |
+
|
20 |
+
Returns:
|
21 |
+
str: UUID generated from the string.
|
22 |
+
"""
|
23 |
+
namespace = uuid.NAMESPACE_DNS
|
24 |
+
return str(uuid.uuid5(namespace, string))
|
25 |
+
|
26 |
+
|
27 |
+
def load_smithsonian_metadata(
|
28 |
+
download_dir: str = "~/.objaverse-xl",
|
29 |
) -> pd.DataFrame:
|
30 |
"""Loads the Smithsonian Object Metadata dataset as a Pandas DataFrame.
|
31 |
|
32 |
Args:
|
33 |
download_dir (str, optional): Directory to download the parquet metadata file.
|
34 |
+
Defaults to "~/.objaverse-xl".
|
35 |
|
36 |
Returns:
|
37 |
pd.DataFrame: Smithsonian Object Metadata dataset as a Pandas DataFrame with
|
38 |
+
columns for the object "title", "url", "quality", "file_type", "uid", and
|
39 |
+
"license". The quality is always Medium and the file_type is always glb.
|
40 |
"""
|
41 |
dirname = os.path.expanduser(os.path.join(download_dir, "smithsonian"))
|
42 |
os.makedirs(dirname, exist_ok=True)
|
|
|
48 |
with open(filename, "wb") as file:
|
49 |
file.write(response.content)
|
50 |
|
51 |
+
df = pd.read_parquet(filename)
|
52 |
+
df["uid"] = df["url"].apply(get_uid_from_str)
|
53 |
+
df["license"] = "CC0"
|
54 |
+
return df
|
55 |
+
|
56 |
+
|
57 |
+
def download_smithsonian_object(url: str, download_dir: str = "~/.objaverse-xl") -> str:
|
58 |
+
"""Downloads a Smithsonian Object from a URL.
|
59 |
+
|
60 |
+
Args:
|
61 |
+
url (str): URL to download the Smithsonian Object from.
|
62 |
+
download_dir (str, optional): Directory to download the Smithsonian Object to.
|
63 |
+
Defaults to "~/.objaverse-xl".
|
64 |
+
|
65 |
+
Returns:
|
66 |
+
str: Path to the downloaded Smithsonian Object.
|
67 |
+
"""
|
68 |
+
uid = get_uid_from_str(url)
|
69 |
+
|
70 |
+
dirname = os.path.expanduser(os.path.join(download_dir, "smithsonian", "objects"))
|
71 |
+
os.makedirs(dirname, exist_ok=True)
|
72 |
+
filename = os.path.join(dirname, f"{uid}.glb")
|
73 |
+
|
74 |
+
if os.path.exists(filename):
|
75 |
+
return filename
|
76 |
+
|
77 |
+
tmp_path = os.path.join(dirname, f"{uid}.glb.tmp")
|
78 |
+
response = requests.get(url)
|
79 |
+
if response.status_code == 404:
|
80 |
+
logger.warning(f"404 for {url}")
|
81 |
+
return None
|
82 |
+
with open(tmp_path, "wb") as file:
|
83 |
+
for chunk in response.iter_content(chunk_size=8192):
|
84 |
+
file.write(chunk)
|
85 |
+
os.rename(tmp_path, filename)
|
86 |
+
|
87 |
+
return filename
|
88 |
+
|
89 |
+
|
90 |
+
def download_smithsonian_objects(
|
91 |
+
urls: Optional[str] = None,
|
92 |
+
processes: Optional[int] = None,
|
93 |
+
download_dir: str = "~/.objaverse-xl",
|
94 |
+
) -> List[Dict[str, str]]:
|
95 |
+
"""Downloads all Smithsonian Objects.
|
96 |
+
|
97 |
+
Args:
|
98 |
+
urls (Optional[str], optional): List of URLs to download the Smithsonian Objects
|
99 |
+
from. If None, all Smithsonian Objects will be downloaded. Defaults to None.
|
100 |
+
processes (Optional[int], optional): Number of processes to use for downloading
|
101 |
+
the Smithsonian Objects. If None, the number of processes will be set to the
|
102 |
+
number of CPUs on the machine (multiprocessing.cpu_count()). Defaults to None.
|
103 |
+
download_dir (str, optional): Directory to download the Smithsonian Objects to.
|
104 |
+
Defaults to "~/.objaverse-xl".
|
105 |
+
|
106 |
+
Returns:
|
107 |
+
List[Dict[str, str]]: List of dictionaries with keys "download_path" and "url"
|
108 |
+
for each downloaded object.
|
109 |
+
"""
|
110 |
+
if processes is None:
|
111 |
+
processes = multiprocessing.cpu_count()
|
112 |
+
if urls is None:
|
113 |
+
df = load_smithsonian_metadata(download_dir=download_dir)
|
114 |
+
urls = df["url"].tolist()
|
115 |
+
|
116 |
+
logger.info(f"Downloading {len(urls)} Smithsonian Objects with {processes=}")
|
117 |
+
with Pool(processes=processes) as pool:
|
118 |
+
results = list(
|
119 |
+
tqdm(
|
120 |
+
pool.imap_unordered(
|
121 |
+
partial(download_smithsonian_object, download_dir=download_dir),
|
122 |
+
urls,
|
123 |
+
),
|
124 |
+
total=len(urls),
|
125 |
+
desc="Downloading Smithsonian Objects",
|
126 |
+
)
|
127 |
+
)
|
128 |
+
out = [
|
129 |
+
{"download_path": download_path, "url": url}
|
130 |
+
for download_path, url in zip(results, urls)
|
131 |
+
if download_path is not None
|
132 |
+
]
|
133 |
+
return out
|
requirements.txt
CHANGED
@@ -1,2 +1,5 @@
|
|
|
|
1 |
pandas
|
2 |
-
pyarrow
|
|
|
|
|
|
1 |
+
requests
|
2 |
pandas
|
3 |
+
pyarrow
|
4 |
+
tqdm
|
5 |
+
loguru
|