sam3 / detect-objects.py
davanstrien's picture
davanstrien HF Staff
Refactor imports for better organization and enable high-performance mode
a9a3a95
raw
history blame
12.5 kB
#!/usr/bin/env python3
# /// script
# requires-python = ">=3.10"
# dependencies = [
# "transformers@git+https://github.com/huggingface/transformers.git@1fba72361e8e0e865d569f7cd15e5aa50b41ac9a",
# "datasets",
# "huggingface-hub",
# "pillow",
# "tqdm",
# "torchvision",
# "accelerate",
# ]
# ///
"""
Detect objects in images using Meta's SAM3 (Segment Anything Model 3).
This script processes images from a HuggingFace dataset and detects a single object
type based on a text prompt, outputting bounding boxes in HuggingFace object detection format.
Examples:
# Detect photographs in historical newspapers
uv run detect-objects.py \\
davanstrien/newspapers-with-images-after-photography \\
my-username/newspapers-detected \\
--class-name photograph
# Detect animals in camera trap images
uv run detect-objects.py \\
wildlife-images \\
wildlife-detected \\
--class-name animal \\
--confidence-threshold 0.6
# Test on small subset
uv run detect-objects.py input output \\
--class-name table \\
--max-samples 10
# Run on HF Jobs with L4 GPU
hf jobs uv run --flavor l4x1 \\
-s HF_TOKEN=$HF_TOKEN \\
https://huggingface.co/datasets/uv-scripts/sam3/raw/main/detect-objects.py \\
input-dataset output-dataset \\
--class-name photograph \\
--confidence-threshold 0.5
Performance:
- L4 GPU: ~2-4 images/sec (depending on image size and batch size)
- Memory: ~8-12 GB VRAM
- Recommended batch size: 4-8 for L4, 8-16 for A10
Note: To detect multiple object types, run the script multiple times with different
--class-name values and merge the results.
"""
import argparse
import logging
import os
import sys
from typing import Any, Dict, List
import torch
from datasets import ClassLabel, Dataset, Features, Sequence, Value, load_dataset
from datasets import Image as ImageFeature
from huggingface_hub import HfApi, login
from PIL import Image
from tqdm.auto import tqdm
from transformers import Sam3Model, Sam3Processor
os.environ["HF_XET_HIGH_PERFORMANCE"] = "1"
# Configure logging
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s - %(levelname)s - %(message)s",
datefmt="%H:%M:%S",
)
logger = logging.getLogger(__name__)
# GPU availability check
if not torch.cuda.is_available():
logger.error("❌ CUDA is not available. This script requires a GPU.")
logger.error("For local testing, ensure you have a CUDA-capable GPU.")
logger.error("For cloud execution, use HF Jobs with --flavor l4x1 or similar.")
sys.exit(1)
def parse_args():
"""Parse command line arguments."""
parser = argparse.ArgumentParser(
description="Detect objects in images using SAM3",
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog=__doc__,
)
# Required arguments
parser.add_argument(
"input_dataset", help="Input HuggingFace dataset ID (e.g., 'username/dataset')"
)
parser.add_argument(
"output_dataset", help="Output HuggingFace dataset ID (e.g., 'username/output')"
)
# Object detection configuration
parser.add_argument(
"--class-name",
required=True,
help="Object class to detect (e.g., 'photograph', 'animal', 'table')",
)
parser.add_argument(
"--confidence-threshold",
type=float,
default=0.5,
help="Minimum confidence score for detections (default: 0.5)",
)
parser.add_argument(
"--mask-threshold",
type=float,
default=0.5,
help="Threshold for mask generation (default: 0.5)",
)
# Dataset configuration
parser.add_argument(
"--image-column",
default="image",
help="Name of the column containing images (default: 'image')",
)
parser.add_argument(
"--split", default="train", help="Dataset split to process (default: 'train')"
)
parser.add_argument(
"--max-samples",
type=int,
default=None,
help="Maximum number of samples to process (for testing)",
)
parser.add_argument(
"--shuffle", action="store_true", help="Shuffle dataset before processing"
)
# Processing configuration
parser.add_argument(
"--batch-size",
type=int,
default=4,
help="Batch size for processing (default: 4)",
)
parser.add_argument(
"--model",
default="facebook/sam3",
help="SAM3 model ID (default: 'facebook/sam3')",
)
parser.add_argument(
"--dtype",
default="bfloat16",
choices=["float32", "float16", "bfloat16"],
help="Model precision (default: 'bfloat16')",
)
# Output configuration
parser.add_argument(
"--private", action="store_true", help="Make output dataset private"
)
parser.add_argument(
"--hf-token",
default=None,
help="HuggingFace token (default: uses HF_TOKEN env var or cached token)",
)
return parser.parse_args()
def load_and_validate_dataset(
dataset_id: str,
split: str,
image_column: str,
max_samples: int = None,
shuffle: bool = False,
hf_token: str = None,
) -> Dataset:
"""Load dataset and validate it has the required image column."""
logger.info(f"πŸ“‚ Loading dataset: {dataset_id} (split: {split})")
try:
dataset = load_dataset(dataset_id, split=split, token=hf_token)
except Exception as e:
logger.error(f"Failed to load dataset '{dataset_id}': {e}")
sys.exit(1)
# Validate image column exists
if image_column not in dataset.column_names:
logger.error(f"Column '{image_column}' not found in dataset")
logger.error(f"Available columns: {dataset.column_names}")
sys.exit(1)
# Shuffle if requested
if shuffle:
logger.info("πŸ”€ Shuffling dataset")
dataset = dataset.shuffle()
# Limit samples if requested
if max_samples is not None:
logger.info(f"πŸ”’ Limiting to {max_samples} samples")
dataset = dataset.select(range(min(max_samples, len(dataset))))
logger.info(f"βœ… Loaded {len(dataset)} samples")
return dataset
def process_batch(
batch: Dict[str, List[Any]],
image_column: str,
class_name: str,
processor: Sam3Processor,
model: Sam3Model,
confidence_threshold: float,
mask_threshold: float,
) -> Dict[str, List[List[Dict[str, Any]]]]:
"""Process a batch of images and return detections for a single class."""
images = batch[image_column]
# Convert to PIL Images and ensure RGB
pil_images = []
for img in images:
if isinstance(img, str):
img = Image.open(img)
if img.mode == "L":
img = img.convert("RGB")
elif img.mode != "RGB":
img = img.convert("RGB")
pil_images.append(img)
# Process batch through model
try:
inputs = processor(
images=pil_images,
text=[class_name] * len(pil_images), # Same prompt for all images
return_tensors="pt",
).to(model.device, dtype=model.dtype)
with torch.no_grad():
outputs = model(**inputs)
# Post-process outputs using original_sizes from processor
results = processor.post_process_instance_segmentation(
outputs,
threshold=confidence_threshold,
mask_threshold=mask_threshold,
target_sizes=inputs.get("original_sizes").tolist(),
)
except Exception as e:
logger.warning(f"⚠️ Failed to process batch: {e}")
# Return empty detections for all images in batch
return {
"objects": [
{"bbox": [], "category": [], "score": []}
for _ in range(len(pil_images))
]
}
# Convert to HuggingFace object detection format (dict-of-lists per image)
batch_objects = []
for result in results:
boxes = result.get("boxes", torch.tensor([]))
scores = result.get("scores", torch.tensor([]))
# Handle empty results
if len(boxes) == 0:
batch_objects.append({"bbox": [], "category": [], "score": []})
continue
# Build lists for this image
image_bboxes = []
image_categories = []
image_scores = []
for box, score in zip(boxes.cpu().numpy(), scores.cpu().numpy()):
x1, y1, x2, y2 = box
width = x2 - x1
height = y2 - y1
image_bboxes.append([float(x1), float(y1), float(width), float(height)])
image_categories.append(0) # Single class, always index 0
image_scores.append(float(score))
batch_objects.append(
{
"bbox": image_bboxes,
"category": image_categories,
"score": image_scores,
}
)
return {"objects": batch_objects}
def main():
args = parse_args()
class_name = args.class_name.strip()
if not class_name:
logger.error("❌ Invalid --class-name argument. Provide a class name.")
sys.exit(1)
logger.info("πŸš€ SAM3 Object Detection")
logger.info(f" Input: {args.input_dataset}")
logger.info(f" Output: {args.output_dataset}")
logger.info(f" Class: {class_name}")
logger.info(f" Confidence threshold: {args.confidence_threshold}")
logger.info(f" Batch size: {args.batch_size}")
# Authentication
if args.hf_token:
login(token=args.hf_token)
elif os.getenv("HF_TOKEN"):
login(token=os.getenv("HF_TOKEN"))
# Load dataset
dataset = load_and_validate_dataset(
args.input_dataset,
args.split,
args.image_column,
args.max_samples,
args.shuffle,
args.hf_token,
)
# Load model
logger.info(f"πŸ€– Loading SAM3 model: {args.model}")
try:
processor = Sam3Processor.from_pretrained(args.model)
model = Sam3Model.from_pretrained(
args.model, torch_dtype=getattr(torch, args.dtype), device_map="auto"
)
logger.info(f"βœ… Model loaded on {model.device}")
except Exception as e:
logger.error(f"❌ Failed to load model: {e}")
logger.error("Ensure the model exists and you have access permissions")
sys.exit(1)
# Define output schema before processing (dict-of-lists format for object detection)
logger.info("πŸ“Š Creating output schema...")
new_features = dataset.features.copy()
new_features["objects"] = {
"bbox": Sequence(Sequence(Value("float32"), length=4)),
"category": Sequence(ClassLabel(names=[class_name])),
"score": Sequence(Value("float32")),
}
# Process dataset with explicit output features
logger.info("πŸ” Processing images...")
processed_dataset = dataset.map(
lambda batch: process_batch(
batch,
args.image_column,
class_name,
processor,
model,
args.confidence_threshold,
args.mask_threshold,
),
batched=True,
batch_size=args.batch_size,
features=new_features,
desc="Detecting objects",
)
# Calculate statistics
total_detections = sum(len(objs) for objs in processed_dataset["objects"])
images_with_detections = sum(len(objs) > 0 for objs in processed_dataset["objects"])
logger.info("βœ… Detection complete!")
logger.info(f" Total detections: {total_detections}")
logger.info(
f" Images with detections: {images_with_detections}/{len(processed_dataset)}"
)
logger.info(
f" Average detections per image: {total_detections / len(processed_dataset):.2f}"
)
# Push to hub
logger.info(f"πŸ“€ Pushing to HuggingFace Hub: {args.output_dataset}")
try:
processed_dataset.push_to_hub(args.output_dataset, private=args.private)
logger.info(
f"βœ… Dataset available at: https://huggingface.co/datasets/{args.output_dataset}"
)
except Exception as e:
logger.error(f"❌ Failed to push to hub: {e}")
logger.info("πŸ’Ύ Saving locally as backup...")
processed_dataset.save_to_disk("./output_dataset")
logger.info("βœ… Saved to ./output_dataset")
sys.exit(1)
if __name__ == "__main__":
main()