| | import os, math, glob |
| | import datasets |
| | from datasets import Features, Value, Array1D |
| | from transformers import CLIPProcessor, CLIPModel |
| | import torch |
| | from PIL import Image |
| | from tqdm import tqdm |
| | import numpy as np |
| |
|
| | |
| | torch.manual_seed(0) |
| |
|
| | |
| | MODEL_NAME = "openai/clip-vit-base-patch32" |
| | BATCH_SIZE = 32 |
| | SHARD_SIZE = 10_000 |
| | OUT_DIR = "metmuseum_embeddings_streaming" |
| | IMG_COL = "jpg" |
| | ID_COL = "Object ID" |
| | |
| |
|
| | |
| | ds_stream = datasets.load_dataset( |
| | "metmuseum/openaccess", split="train", streaming=True |
| | ) |
| |
|
| | |
| | model = CLIPModel.from_pretrained(MODEL_NAME) |
| | processor = CLIPProcessor.from_pretrained(MODEL_NAME) |
| | device = torch.device("mps" if torch.backends.mps.is_available() else "cpu") |
| | model.to(device).eval() |
| |
|
| | |
| | def l2_normalize(x, dim=-1, eps=1e-12): |
| | return x / (x.norm(p=2, dim=dim, keepdim=True) + eps) |
| |
|
| | |
| | os.makedirs(OUT_DIR, exist_ok=True) |
| | shard_idx = 0 |
| | rows_in_shard = 0 |
| | buffer_ids = [] |
| | buffer_vecs = [] |
| | emb_dim = None |
| |
|
| | def flush_shard(): |
| | """Write current buffer to a parquet shard and clear it.""" |
| | global shard_idx, rows_in_shard, buffer_ids, buffer_vecs, emb_dim |
| | if not buffer_ids: |
| | return |
| |
|
| | |
| | if emb_dim is None: |
| | emb_dim = len(buffer_vecs[0]) |
| |
|
| | |
| | features = Features({ |
| | ID_COL: Value("int32"), |
| | "Embedding": Array1D(emb_dim, dtype="float32"), |
| | }) |
| | shard_ds = datasets.Dataset.from_dict( |
| | {ID_COL: buffer_ids, "Embedding": buffer_vecs}, |
| | features=features, |
| | ) |
| | |
| | shard_path = os.path.join(OUT_DIR, f"part-{shard_idx:05d}.parquet") |
| | shard_ds.to_parquet(shard_path) |
| |
|
| | |
| | shard_idx += 1 |
| | rows_in_shard = 0 |
| | buffer_ids = [] |
| | buffer_vecs = [] |
| |
|
| | |
| | obj_ids_batch, images_batch = [], [] |
| |
|
| | def flush_batch(): |
| | """Run CLIP on the current image batch and append to shard buffer.""" |
| | global emb_dim, rows_in_shard, buffer_ids, buffer_vecs |
| | if not images_batch: |
| | return |
| | inputs = processor(images=images_batch, return_tensors="pt") |
| | pixel_values = inputs["pixel_values"].to(device) |
| |
|
| | with torch.no_grad(): |
| | feats = model.get_image_features(pixel_values=pixel_values) |
| | feats = l2_normalize(feats, dim=-1).cpu().numpy().astype("float32") |
| |
|
| | if emb_dim is None: |
| | emb_dim = feats.shape[1] |
| |
|
| | |
| | buffer_ids.extend([int(x) for x in obj_ids_batch]) |
| | buffer_vecs.extend([feats[i] for i in range(feats.shape[0])]) |
| | rows_in_shard += feats.shape[0] |
| |
|
| | |
| | obj_ids_batch.clear() |
| | images_batch.clear() |
| |
|
| | |
| | for item in tqdm(ds_stream, desc="Embedding (streaming)"): |
| | oid = item.get(ID_COL) |
| | img = item.get(IMG_COL) |
| |
|
| | if oid is None or img is None: |
| | continue |
| |
|
| | |
| | if isinstance(img, Image.Image): |
| | pil_img = img.convert("RGB") |
| | else: |
| | try: |
| | pil_img = Image.fromarray(img).convert("RGB") |
| | except Exception: |
| | continue |
| |
|
| | obj_ids_batch.append(oid) |
| | images_batch.append(pil_img) |
| |
|
| | if len(images_batch) >= BATCH_SIZE: |
| | flush_batch() |
| |
|
| | if rows_in_shard >= SHARD_SIZE: |
| | flush_shard() |
| |
|
| | |
| | flush_batch() |
| | flush_shard() |
| |
|
| | print(f"Wrote {shard_idx} shard(s) to {OUT_DIR}") |