CraterBench-R / examples /eval_timm_global.py
jfang's picture
Add splits, metadata, scripts, and documentation
3893463 verified
#!/usr/bin/env python3
"""
Minimal high-level example: evaluate a pretrained timm model on CraterBench-R
using one global descriptor per image and cosine retrieval.
"""
from __future__ import annotations
import argparse
import json
from pathlib import Path
from typing import Any
import numpy as np
import timm
import torch
import torch.nn.functional as F
from PIL import Image
from timm.data import create_transform, resolve_model_data_config
from torch.utils.data import DataLoader, Dataset
class ImageDataset(Dataset):
def __init__(self, entries: list[dict[str, Any]], root: Path, transform):
self.entries = entries
self.root = root
self.transform = transform
def __len__(self) -> int:
return len(self.entries)
def __getitem__(self, idx: int):
entry = self.entries[idx]
path = self.root / entry["path"]
image = Image.open(path).convert("RGB")
return self.transform(image), entry
def collate_with_entries(batch):
images = torch.stack([item[0] for item in batch])
entries = [item[1] for item in batch]
return images, entries
def unwrap_features(features: Any) -> torch.Tensor:
if isinstance(features, torch.Tensor):
return features
if isinstance(features, (list, tuple)):
return unwrap_features(features[0])
if isinstance(features, dict):
for key in ("x", "features", "last_hidden_state"):
if key in features:
return unwrap_features(features[key])
for value in features.values():
if isinstance(value, torch.Tensor):
return value
raise TypeError(f"Unsupported feature output type: {type(features)!r}")
def pool_features(features: torch.Tensor, pool: str) -> torch.Tensor:
if features.ndim == 2:
return features
if features.ndim == 3:
if pool == "cls":
return features[:, 0]
if pool == "mean":
return features.mean(dim=1)
if pool == "max":
return features.max(dim=1).values
if features.ndim == 4:
if pool == "mean":
return features.mean(dim=(2, 3))
if pool == "max":
return features.amax(dim=(2, 3))
raise ValueError(f"Unsupported feature shape {tuple(features.shape)} for pool={pool}")
def extract_embeddings(
model,
loader: DataLoader,
device: torch.device,
pool: str,
) -> tuple[np.ndarray, list[dict[str, Any]]]:
all_embeddings = []
all_entries: list[dict[str, Any]] = []
model.eval()
with torch.no_grad():
for images, entries in loader:
images = images.to(device)
features = unwrap_features(model.forward_features(images))
pooled = pool_features(features, pool)
pooled = F.normalize(pooled, dim=1)
all_embeddings.append(pooled.cpu().numpy().astype(np.float32))
all_entries.extend(entries)
return np.concatenate(all_embeddings, axis=0), all_entries
def compute_metrics(
ranking: np.ndarray,
query_ids: list[str],
gallery_ids: list[str],
ground_truth: dict[str, list[str]],
k_values: list[int],
) -> dict[str, float]:
metrics: dict[str, float] = {}
max_k = ranking.shape[1]
for k in k_values:
recalls = []
for row, query_id in enumerate(query_ids):
acceptable = set(ground_truth[query_id])
retrieved = [gallery_ids[idx] for idx in ranking[row, :k]]
unique_correct = set(retrieved) & acceptable
recalls.append(min(len(unique_correct) / len(acceptable), 1.0))
metrics[f"recall@{k}"] = float(np.mean(recalls))
aps = []
reciprocal_ranks = []
for row, query_id in enumerate(query_ids):
acceptable = set(ground_truth[query_id])
retrieved = [gallery_ids[idx] for idx in ranking[row, :max_k]]
seen = set()
precision_at_k = []
rr = 0.0
for rank, crater_id in enumerate(retrieved, start=1):
if crater_id in acceptable and crater_id not in seen:
seen.add(crater_id)
precision_at_k.append(len(seen) / rank)
if rr == 0.0:
rr = 1.0 / rank
aps.append(float(np.mean(precision_at_k)) if precision_at_k else 0.0)
reciprocal_ranks.append(rr)
metrics["map"] = float(np.mean(aps))
metrics["mrr"] = float(np.mean(reciprocal_ranks))
return metrics
def search_topk(
query_embeddings: np.ndarray,
gallery_embeddings: np.ndarray,
topk: int,
device: torch.device,
query_chunk_size: int,
) -> np.ndarray:
gallery = torch.from_numpy(gallery_embeddings).to(device)
results = []
for start in range(0, len(query_embeddings), query_chunk_size):
end = min(start + query_chunk_size, len(query_embeddings))
query = torch.from_numpy(query_embeddings[start:end]).to(device)
scores = query @ gallery.T
indices = torch.topk(scores, k=topk, dim=1).indices.cpu().numpy()
results.append(indices)
return np.concatenate(results, axis=0)
def main() -> None:
parser = argparse.ArgumentParser()
parser.add_argument("--data-root", type=Path, default=Path("."))
parser.add_argument("--split", type=str, default="test")
parser.add_argument("--model", type=str, required=True)
parser.add_argument("--pool", type=str, default="mean", choices=["cls", "mean", "max"])
parser.add_argument("--batch-size", type=int, default=64)
parser.add_argument("--query-chunk-size", type=int, default=256)
parser.add_argument("--device", type=str, default="cuda" if torch.cuda.is_available() else "cpu")
args = parser.parse_args()
split_path = args.data_root / "splits" / f"{args.split}.json"
with split_path.open("r") as handle:
split = json.load(handle)
model = timm.create_model(args.model, pretrained=True, num_classes=0)
model.to(args.device)
data_config = resolve_model_data_config(model)
transform = create_transform(**data_config, is_training=False)
gallery_loader = DataLoader(
ImageDataset(split["gallery_images"], args.data_root, transform),
batch_size=args.batch_size,
shuffle=False,
num_workers=4,
pin_memory=True,
collate_fn=collate_with_entries,
)
query_loader = DataLoader(
ImageDataset(split["query_images"], args.data_root, transform),
batch_size=args.batch_size,
shuffle=False,
num_workers=4,
pin_memory=True,
collate_fn=collate_with_entries,
)
gallery_embeddings, gallery_entries = extract_embeddings(
model, gallery_loader, torch.device(args.device), args.pool
)
query_embeddings, query_entries = extract_embeddings(
model, query_loader, torch.device(args.device), args.pool
)
ranking = search_topk(
query_embeddings,
gallery_embeddings,
topk=10,
device=torch.device(args.device),
query_chunk_size=args.query_chunk_size,
)
gallery_ids = [entry["crater_id"] for entry in gallery_entries]
query_ids = [entry["crater_id"] for entry in query_entries]
metrics = compute_metrics(ranking, query_ids, gallery_ids, split["ground_truth"], [1, 5, 10])
print("Model:", args.model)
print("Pool:", args.pool)
for key, value in metrics.items():
print(f"{key}: {value:.4f}")
if __name__ == "__main__":
main()