|
import multiprocessing |
|
import os |
|
from functools import partial |
|
from multiprocessing import Pool |
|
from typing import Dict, List, Optional, Tuple |
|
|
|
import fsspec |
|
import pandas as pd |
|
import requests |
|
from loguru import logger |
|
from tqdm import tqdm |
|
from utils import get_uid_from_str |
|
|
|
|
|
def load_smithsonian_metadata(download_dir: str = "~/.objaverse") -> pd.DataFrame: |
|
"""Loads the Smithsonian Object Metadata dataset as a Pandas DataFrame. |
|
|
|
Args: |
|
download_dir (str, optional): Directory to download the parquet metadata file. |
|
Supports all file systems supported by fsspec. Defaults to "~/.objaverse". |
|
|
|
Returns: |
|
pd.DataFrame: Smithsonian Object Metadata dataset as a Pandas DataFrame with |
|
columns for the object "title", "url", "quality", "file_type", "uid", and |
|
"license". The quality is always Medium and the file_type is always glb. |
|
""" |
|
filename = os.path.join(download_dir, "smithsonian", "object-metadata.parquet") |
|
fs, path = fsspec.core.url_to_fs(filename) |
|
fs.makedirs(os.path.dirname(path), exist_ok=True) |
|
|
|
|
|
if not fs.exists(path): |
|
url = "https://huggingface.co/datasets/allenai/objaverse-xl/resolve/main/smithsonian/object-metadata.parquet" |
|
response = requests.get(url) |
|
response.raise_for_status() |
|
with fs.open(path, "wb") as file: |
|
file.write(response.content) |
|
|
|
|
|
with fs.open(path) as f: |
|
df = pd.read_parquet(f) |
|
|
|
|
|
df["uid"] = df["url"].apply(get_uid_from_str) |
|
df["license"] = "CC0" |
|
|
|
return df |
|
|
|
|
|
def _download_smithsonian_object( |
|
url: str, download_dir: str = "~/.objaverse" |
|
) -> Tuple[str, Optional[str]]: |
|
"""Downloads a Smithsonian Object from a URL. |
|
|
|
Overwrites the file if it already exists and assumes this was previous checked. |
|
|
|
Args: |
|
url (str): URL to download the Smithsonian Object from. |
|
download_dir (str, optional): Directory to download the Smithsonian Object to. |
|
Supports all file systems supported by fsspec. Defaults to "~/.objaverse". |
|
|
|
Returns: |
|
Tuple[str, Optional[str]]: Tuple of the URL and the path to the downloaded |
|
Smithsonian Object. If the Smithsonian Object was not downloaded, the path |
|
will be None. |
|
""" |
|
uid = get_uid_from_str(url) |
|
|
|
filename = os.path.join(download_dir, "smithsonian", "objects", f"{uid}.glb") |
|
fs, path = fsspec.core.url_to_fs(filename) |
|
|
|
response = requests.get(url) |
|
|
|
|
|
if response.status_code == 404: |
|
logger.warning(f"404 for {url}") |
|
return url, None |
|
|
|
|
|
tmp_path = f"{path}.tmp" |
|
with fs.open(tmp_path, "wb") as file: |
|
for chunk in response.iter_content(chunk_size=8192): |
|
file.write(chunk) |
|
|
|
|
|
fs.rename(tmp_path, path) |
|
|
|
return url, filename |
|
|
|
|
|
def download_smithsonian_objects( |
|
urls: Optional[str] = None, |
|
processes: Optional[int] = None, |
|
download_dir: str = "~/.objaverse", |
|
) -> List[Dict[str, str]]: |
|
"""Downloads all Smithsonian Objects. |
|
|
|
Args: |
|
urls (Optional[str], optional): List of URLs to download the Smithsonian Objects |
|
from. If None, all Smithsonian Objects will be downloaded. Defaults to None. |
|
processes (Optional[int], optional): Number of processes to use for downloading |
|
the Smithsonian Objects. If None, the number of processes will be set to the |
|
number of CPUs on the machine (multiprocessing.cpu_count()). Defaults to |
|
None. |
|
download_dir (str, optional): Directory to download the Smithsonian Objects to. |
|
Supports all file systems supported by fsspec. Defaults to "~/.objaverse". |
|
|
|
Returns: |
|
List[Dict[str, str]]: List of dictionaries with keys "download_path" and "url" |
|
for each downloaded object. |
|
""" |
|
if processes is None: |
|
processes = multiprocessing.cpu_count() |
|
if urls is None: |
|
df = load_smithsonian_metadata(download_dir=download_dir) |
|
urls = df["url"].tolist() |
|
|
|
|
|
objects_dir = os.path.join(download_dir, "smithsonian", "objects") |
|
fs, path = fsspec.core.url_to_fs(objects_dir) |
|
fs.makedirs(path, exist_ok=True) |
|
|
|
|
|
existing_glb_files = fs.glob(os.path.join(objects_dir, "*.glb"), refresh=True) |
|
existing_uids = [ |
|
os.path.basename(file).split(".")[0] for file in existing_glb_files |
|
] |
|
|
|
|
|
out = [] |
|
urls_to_download = set([]) |
|
already_downloaded_urls = set([]) |
|
for url in urls: |
|
uid = get_uid_from_str(url) |
|
if uid not in existing_uids: |
|
urls_to_download.add(url) |
|
else: |
|
already_downloaded_urls.add(url) |
|
out.append( |
|
{"download_path": os.path.join(objects_dir, f"{uid}.glb"), "url": url} |
|
) |
|
|
|
logger.info( |
|
f"Found {len(already_downloaded_urls)} Smithsonian Objects already downloaded" |
|
) |
|
logger.info( |
|
f"Downloading {len(urls_to_download)} Smithsonian Objects with {processes=}" |
|
) |
|
|
|
if len(urls_to_download) == 0: |
|
return out |
|
|
|
with Pool(processes=processes) as pool: |
|
results = list( |
|
tqdm( |
|
pool.imap_unordered( |
|
partial(_download_smithsonian_object, download_dir=download_dir), |
|
urls_to_download, |
|
), |
|
total=len(urls_to_download), |
|
desc="Downloading Smithsonian Objects", |
|
) |
|
) |
|
|
|
out.extend( |
|
[ |
|
{"download_path": download_path, "url": url} |
|
for url, download_path in results |
|
if download_path is not None |
|
] |
|
) |
|
|
|
return out |
|
|