davanstrien HF Staff commited on
Commit
2063000
ยท
1 Parent(s): 6ba36e4

Add dataset card creation and visualization script for object detection results

Browse files
Files changed (2) hide show
  1. detect-objects.py +222 -12
  2. visualize-detections.py +241 -0
detect-objects.py CHANGED
@@ -37,19 +37,14 @@ Examples:
37
  --class-name table \\
38
  --max-samples 10
39
 
40
- # Run on HF Jobs with L4 GPU
41
- hf jobs uv run --flavor l4x1 \\
42
- -s HF_TOKEN=$HF_TOKEN \\
43
  https://huggingface.co/datasets/uv-scripts/sam3/raw/main/detect-objects.py \\
44
  input-dataset output-dataset \\
45
  --class-name photograph \\
46
  --confidence-threshold 0.5
47
 
48
- Performance:
49
- - L4 GPU: ~2-4 images/sec (depending on image size and batch size)
50
- - Memory: ~8-12 GB VRAM
51
- - Recommended batch size: 4-8 for L4, 8-16 for A10
52
-
53
  Note: To detect multiple object types, run the script multiple times with different
54
  --class-name values and merge the results.
55
  """
@@ -58,12 +53,13 @@ import argparse
58
  import logging
59
  import os
60
  import sys
 
61
  from typing import Any, Dict, List
62
 
63
  import torch
64
  from datasets import ClassLabel, Dataset, Features, Sequence, Value, load_dataset
65
  from datasets import Image as ImageFeature
66
- from huggingface_hub import HfApi, login
67
  from PIL import Image
68
  from tqdm.auto import tqdm
69
  from transformers import Sam3Model, Sam3Processor
@@ -171,6 +167,196 @@ def parse_args():
171
  return parser.parse_args()
172
 
173
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
174
  def load_and_validate_dataset(
175
  dataset_id: str,
176
  split: str,
@@ -225,9 +411,7 @@ def process_batch(
225
  for img in images:
226
  if isinstance(img, str):
227
  img = Image.open(img)
228
- if img.mode == "L":
229
- img = img.convert("RGB")
230
- elif img.mode != "RGB":
231
  img = img.convert("RGB")
232
  pil_images.append(img)
233
 
@@ -356,6 +540,7 @@ def main():
356
 
357
  # Process dataset with explicit output features
358
  logger.info("๐Ÿ” Processing images...")
 
359
  processed_dataset = dataset.map(
360
  lambda batch: process_batch(
361
  batch,
@@ -371,6 +556,9 @@ def main():
371
  features=new_features,
372
  desc="Detecting objects",
373
  )
 
 
 
374
 
375
  # Calculate statistics
376
  total_detections = sum(len(objs) for objs in processed_dataset["objects"])
@@ -399,6 +587,28 @@ def main():
399
  logger.info("โœ… Saved to ./output_dataset")
400
  sys.exit(1)
401
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
402
 
403
  if __name__ == "__main__":
404
  main()
 
37
  --class-name table \\
38
  --max-samples 10
39
 
40
+ # Run on HF Jobs with GPU
41
+ hf jobs uv run --flavor a100-large \\
42
+ -s HF_TOKEN=HF_TOKEN \\
43
  https://huggingface.co/datasets/uv-scripts/sam3/raw/main/detect-objects.py \\
44
  input-dataset output-dataset \\
45
  --class-name photograph \\
46
  --confidence-threshold 0.5
47
 
 
 
 
 
 
48
  Note: To detect multiple object types, run the script multiple times with different
49
  --class-name values and merge the results.
50
  """
 
53
  import logging
54
  import os
55
  import sys
56
+ import time
57
  from typing import Any, Dict, List
58
 
59
  import torch
60
  from datasets import ClassLabel, Dataset, Features, Sequence, Value, load_dataset
61
  from datasets import Image as ImageFeature
62
+ from huggingface_hub import DatasetCard, HfApi, login
63
  from PIL import Image
64
  from tqdm.auto import tqdm
65
  from transformers import Sam3Model, Sam3Processor
 
167
  return parser.parse_args()
168
 
169
 
170
+ def create_dataset_card(
171
+ source_dataset: str,
172
+ model: str,
173
+ class_name: str,
174
+ num_samples: int,
175
+ total_detections: int,
176
+ images_with_detections: int,
177
+ processing_time: str,
178
+ confidence_threshold: float,
179
+ mask_threshold: float,
180
+ batch_size: int,
181
+ dtype: str,
182
+ image_column: str = "image",
183
+ split: str = "train",
184
+ ) -> str:
185
+ """Create a dataset card documenting the object detection process."""
186
+ from datetime import datetime
187
+
188
+ model_name = model.split("/")[-1]
189
+ avg_detections = total_detections / num_samples if num_samples > 0 else 0
190
+ detection_rate = (
191
+ (images_with_detections / num_samples * 100) if num_samples > 0 else 0
192
+ )
193
+
194
+ return f"""---
195
+ tags:
196
+ - object-detection
197
+ - sam3
198
+ - segment-anything
199
+ - bounding-boxes
200
+ - uv-script
201
+ - generated
202
+ ---
203
+
204
+ # Object Detection: {class_name.title()} Detection using {model_name}
205
+
206
+ This dataset contains object detection results (bounding boxes) for **{class_name}** detected in images from [{source_dataset}](https://huggingface.co/datasets/{source_dataset}) using Meta's SAM3 (Segment Anything Model 3).
207
+
208
+ **Generated using**: [uv-scripts/sam3](https://huggingface.co/datasets/uv-scripts/sam3) detection script
209
+
210
+ ## Detection Statistics
211
+
212
+ - **Objects Detected**: {class_name}
213
+ - **Total Detections**: {total_detections:,}
214
+ - **Images with Detections**: {images_with_detections:,} / {num_samples:,} ({detection_rate:.1f}%)
215
+ - **Average Detections per Image**: {avg_detections:.2f}
216
+
217
+ ## Processing Details
218
+
219
+ - **Source Dataset**: [{source_dataset}](https://huggingface.co/datasets/{source_dataset})
220
+ - **Model**: [{model}](https://huggingface.co/{model})
221
+ - **Script Repository**: [uv-scripts/sam3](https://huggingface.co/datasets/uv-scripts/sam3)
222
+ - **Number of Samples Processed**: {num_samples:,}
223
+ - **Processing Time**: {processing_time}
224
+ - **Processing Date**: {datetime.now().strftime("%Y-%m-%d %H:%M UTC")}
225
+
226
+ ### Configuration
227
+
228
+ - **Image Column**: `{image_column}`
229
+ - **Dataset Split**: `{split}`
230
+ - **Class Name**: `{class_name}`
231
+ - **Confidence Threshold**: {confidence_threshold}
232
+ - **Mask Threshold**: {mask_threshold}
233
+ - **Batch Size**: {batch_size}
234
+ - **Model Dtype**: {dtype}
235
+
236
+ ## Model Information
237
+
238
+ SAM3 (Segment Anything Model 3) is Meta's state-of-the-art object detection and segmentation model that excels at:
239
+ - ๐ŸŽฏ **Zero-shot detection** - Detect objects using natural language prompts
240
+ - ๐Ÿ“ฆ **Bounding boxes** - Accurate object localization
241
+ - ๐ŸŽญ **Instance segmentation** - Pixel-perfect masks (not included in this dataset)
242
+ - ๐Ÿ–ผ๏ธ **Any image domain** - Works on photos, documents, medical images, etc.
243
+
244
+ This dataset uses SAM3 in text-prompted detection mode to find instances of "{class_name}" in the source images.
245
+
246
+ ## Dataset Structure
247
+
248
+ The dataset contains all original columns from the source dataset plus an `objects` column with detection results in HuggingFace object detection format (dict-of-lists):
249
+
250
+ - **bbox**: List of bounding boxes in `[x, y, width, height]` format (pixel coordinates)
251
+ - **category**: List of category indices (always `0` for single-class detection)
252
+ - **score**: List of confidence scores (0.0 to 1.0)
253
+
254
+ ### Schema
255
+
256
+ ```python
257
+ {{
258
+ "objects": {{
259
+ "bbox": [[x, y, w, h], ...], # List of bounding boxes
260
+ "category": [0, 0, ...], # All same class
261
+ "score": [0.95, 0.87, ...] # Confidence scores
262
+ }}
263
+ }}
264
+ ```
265
+
266
+ ## Usage
267
+
268
+ ```python
269
+ from datasets import load_dataset
270
+
271
+ # Load the dataset
272
+ dataset = load_dataset("{{{{output_dataset_id}}}}", split="{split}")
273
+
274
+ # Access detections for an image
275
+ example = dataset[0]
276
+ detections = example["objects"]
277
+
278
+ # Iterate through all detected objects in this image
279
+ for bbox, category, score in zip(
280
+ detections["bbox"],
281
+ detections["category"],
282
+ detections["score"]
283
+ ):
284
+ x, y, w, h = bbox
285
+ print(f"Detected {class_name} at ({x}, {y}) with confidence {{score:.2f}}")
286
+
287
+ # Filter high-confidence detections
288
+ high_conf_examples = [
289
+ ex for ex in dataset
290
+ if any(score > 0.8 for score in ex["objects"]["score"])
291
+ ]
292
+
293
+ # Count total detections across dataset
294
+ total = sum(len(ex["objects"]["bbox"]) for ex in dataset)
295
+ print(f"Total detections: {{total}}")
296
+ ```
297
+
298
+ ## Visualization
299
+
300
+ To visualize the detections, you can use the visualization script from the same repository:
301
+
302
+ ```bash
303
+ # Visualize first sample with detections
304
+ uv run https://huggingface.co/datasets/uv-scripts/sam3/raw/main/visualize-detections.py \\
305
+ {{{{output_dataset_id}}}} \\
306
+ --first-with-detections
307
+
308
+ # Visualize random samples
309
+ uv run https://huggingface.co/datasets/uv-scripts/sam3/raw/main/visualize-detections.py \\
310
+ {{{{output_dataset_id}}}} \\
311
+ --num-samples 5
312
+
313
+ # Save visualizations to files
314
+ uv run https://huggingface.co/datasets/uv-scripts/sam3/raw/main/visualize-detections.py \\
315
+ {{{{output_dataset_id}}}} \\
316
+ --num-samples 3 \\
317
+ --output-dir ./visualizations
318
+ ```
319
+
320
+ ## Reproduction
321
+
322
+ This dataset was generated using the [uv-scripts/sam3](https://huggingface.co/datasets/uv-scripts/sam3) object detection script:
323
+
324
+ ```bash
325
+ uv run https://huggingface.co/datasets/uv-scripts/sam3/raw/main/detect-objects.py \\
326
+ {source_dataset} \\
327
+ <output-dataset> \\
328
+ --class-name {class_name} \\
329
+ --confidence-threshold {confidence_threshold} \\
330
+ --mask-threshold {mask_threshold} \\
331
+ --batch-size {batch_size} \\
332
+ --dtype {dtype}
333
+ ```
334
+
335
+ ### Running on HuggingFace Jobs (GPU)
336
+
337
+ This script requires a GPU. To run on HuggingFace infrastructure:
338
+
339
+ ```bash
340
+ hf jobs uv run --flavor a100-large \\
341
+ -s HF_TOKEN=HF_TOKEN \\
342
+ https://huggingface.co/datasets/uv-scripts/sam3/raw/main/detect-objects.py \\
343
+ {source_dataset} \\
344
+ <output-dataset> \\
345
+ --class-name {class_name} \\
346
+ --confidence-threshold {confidence_threshold}
347
+ ```
348
+
349
+ ## Performance
350
+
351
+ - **Processing Speed**: ~{num_samples / (float(processing_time.split()[0]) * 60) if processing_time.split()[0].replace(".", "").isdigit() else "N/A":.1f} images/second
352
+ - **GPU Configuration**: CUDA with {dtype} precision
353
+
354
+ ---
355
+
356
+ Generated with ๐Ÿค– [UV Scripts](https://huggingface.co/uv-scripts)
357
+ """
358
+
359
+
360
  def load_and_validate_dataset(
361
  dataset_id: str,
362
  split: str,
 
411
  for img in images:
412
  if isinstance(img, str):
413
  img = Image.open(img)
414
+ if img.mode == "L" or img.mode != "RGB":
 
 
415
  img = img.convert("RGB")
416
  pil_images.append(img)
417
 
 
540
 
541
  # Process dataset with explicit output features
542
  logger.info("๐Ÿ” Processing images...")
543
+ start_time = time.time()
544
  processed_dataset = dataset.map(
545
  lambda batch: process_batch(
546
  batch,
 
556
  features=new_features,
557
  desc="Detecting objects",
558
  )
559
+ end_time = time.time()
560
+ processing_time_seconds = end_time - start_time
561
+ processing_time_str = f"{processing_time_seconds / 60:.1f} minutes"
562
 
563
  # Calculate statistics
564
  total_detections = sum(len(objs) for objs in processed_dataset["objects"])
 
587
  logger.info("โœ… Saved to ./output_dataset")
588
  sys.exit(1)
589
 
590
+ # Create and push dataset card
591
+ logger.info("๐Ÿ“ Creating dataset card...")
592
+ card_content = create_dataset_card(
593
+ source_dataset=args.input_dataset,
594
+ model=args.model,
595
+ class_name=class_name,
596
+ num_samples=len(processed_dataset),
597
+ total_detections=total_detections,
598
+ images_with_detections=images_with_detections,
599
+ processing_time=processing_time_str,
600
+ confidence_threshold=args.confidence_threshold,
601
+ mask_threshold=args.mask_threshold,
602
+ batch_size=args.batch_size,
603
+ dtype=args.dtype,
604
+ image_column=args.image_column,
605
+ split=args.split,
606
+ )
607
+
608
+ card = DatasetCard(card_content)
609
+ card.push_to_hub(args.output_dataset, token=args.hf_token or os.getenv("HF_TOKEN"))
610
+ logger.info("โœ… Dataset card created and pushed!")
611
+
612
 
613
  if __name__ == "__main__":
614
  main()
visualize-detections.py ADDED
@@ -0,0 +1,241 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ # /// script
3
+ # requires-python = ">=3.10"
4
+ # dependencies = [
5
+ # "datasets",
6
+ # "matplotlib",
7
+ # "pillow",
8
+ # ]
9
+ # ///
10
+
11
+ """
12
+ Visualize object detection predictions from a HuggingFace dataset.
13
+
14
+ This script loads a dataset with object detection predictions and visualizes
15
+ the bounding boxes on sample images.
16
+
17
+ Examples:
18
+ # Visualize the first sample with detections
19
+ uv run visualize-detections.py my-username/detected-objects --first-with-detections
20
+
21
+ # Visualize a specific sample
22
+ uv run visualize-detections.py my-username/detected-objects --index 0
23
+
24
+ # Visualize multiple random samples
25
+ uv run visualize-detections.py my-username/detected-objects --num-samples 5
26
+
27
+ # Save visualizations to files instead of displaying
28
+ uv run visualize-detections.py my-username/detected-objects --num-samples 3 --output-dir ./visualizations
29
+
30
+ # Visualize specific split
31
+ uv run visualize-detections.py my-username/detected-objects --split train --num-samples 5
32
+ """
33
+
34
+ import argparse
35
+ import random
36
+ from pathlib import Path
37
+
38
+ import matplotlib.patches as patches
39
+ import matplotlib.pyplot as plt
40
+ from datasets import load_dataset
41
+
42
+
43
+ def parse_args():
44
+ """Parse command line arguments."""
45
+ parser = argparse.ArgumentParser(
46
+ description="Visualize object detection predictions",
47
+ formatter_class=argparse.RawDescriptionHelpFormatter,
48
+ epilog=__doc__,
49
+ )
50
+
51
+ parser.add_argument(
52
+ "dataset_id", help="HuggingFace dataset ID (e.g., 'username/dataset')"
53
+ )
54
+ parser.add_argument(
55
+ "--index",
56
+ type=int,
57
+ default=None,
58
+ help="Index of sample to visualize (default: random)",
59
+ )
60
+ parser.add_argument(
61
+ "--num-samples",
62
+ type=int,
63
+ default=1,
64
+ help="Number of samples to visualize (default: 1)",
65
+ )
66
+ parser.add_argument(
67
+ "--first-with-detections",
68
+ action="store_true",
69
+ help="Find and visualize the first sample with detections",
70
+ )
71
+ parser.add_argument(
72
+ "--split", default="train", help="Dataset split to use (default: 'train')"
73
+ )
74
+ parser.add_argument(
75
+ "--image-column",
76
+ default="image",
77
+ help="Name of the image column (default: 'image')",
78
+ )
79
+ parser.add_argument(
80
+ "--objects-column",
81
+ default="objects",
82
+ help="Name of the objects column (default: 'objects')",
83
+ )
84
+ parser.add_argument(
85
+ "--output-dir",
86
+ type=str,
87
+ default=None,
88
+ help="Directory to save visualizations (default: show interactively)",
89
+ )
90
+ parser.add_argument(
91
+ "--figsize-width",
92
+ type=int,
93
+ default=15,
94
+ help="Figure width in inches (default: 15)",
95
+ )
96
+ parser.add_argument(
97
+ "--figsize-height",
98
+ type=int,
99
+ default=20,
100
+ help="Figure height in inches (default: 20)",
101
+ )
102
+ parser.add_argument(
103
+ "--bbox-color",
104
+ default="red",
105
+ help="Color for bounding boxes (default: 'red')",
106
+ )
107
+ parser.add_argument(
108
+ "--show-scores",
109
+ action="store_true",
110
+ default=True,
111
+ help="Show confidence scores on bounding boxes",
112
+ )
113
+
114
+ return parser.parse_args()
115
+
116
+
117
+ def visualize_sample(
118
+ sample,
119
+ image_column="image",
120
+ objects_column="objects",
121
+ figsize=(15, 20),
122
+ bbox_color="red",
123
+ show_scores=True,
124
+ title=None,
125
+ ):
126
+ """Visualize a single sample with bounding boxes."""
127
+ image = sample[image_column]
128
+ objects = sample[objects_column]
129
+
130
+ fig, ax = plt.subplots(1, figsize=figsize)
131
+ ax.imshow(image, cmap="gray" if image.mode == "L" else None)
132
+
133
+ # Draw bounding boxes
134
+ num_detections = len(objects["bbox"])
135
+ for i in range(num_detections):
136
+ bbox = objects["bbox"][i]
137
+ score = objects["score"][i]
138
+ category = objects["category"][i]
139
+
140
+ x, y, w, h = bbox
141
+ rect = patches.Rectangle(
142
+ (x, y), w, h, linewidth=2, edgecolor=bbox_color, facecolor="none"
143
+ )
144
+ ax.add_patch(rect)
145
+
146
+ if show_scores:
147
+ label = f"{score:.2f}"
148
+ ax.text(
149
+ x,
150
+ y - 5,
151
+ label,
152
+ color=bbox_color,
153
+ fontsize=10,
154
+ bbox=dict(facecolor="white", alpha=0.7),
155
+ )
156
+
157
+ # Set title
158
+ if title:
159
+ ax.set_title(title, fontsize=14, pad=20)
160
+ else:
161
+ ax.set_title(f"Detections: {num_detections}", fontsize=14, pad=20)
162
+
163
+ ax.axis("off")
164
+ plt.tight_layout()
165
+
166
+ return fig, ax
167
+
168
+
169
+ def main():
170
+ args = parse_args()
171
+
172
+ # Load dataset
173
+ print(f"๐Ÿ“‚ Loading dataset: {args.dataset_id} (split: {args.split})")
174
+ dataset = load_dataset(args.dataset_id, split=args.split)
175
+ print(f"โœ… Loaded {len(dataset)} samples")
176
+
177
+ # Determine indices to visualize
178
+ if args.index is not None:
179
+ indices = [args.index]
180
+ elif args.first_with_detections:
181
+ # Find first sample with detections
182
+ print("๐Ÿ” Finding first sample with detections...")
183
+ first_idx = None
184
+ for idx in range(len(dataset)):
185
+ sample = dataset[idx]
186
+ if len(sample[args.objects_column]["bbox"]) > 0:
187
+ first_idx = idx
188
+ break
189
+
190
+ if first_idx is None:
191
+ print("โŒ No samples with detections found in dataset")
192
+ return
193
+
194
+ print(f"โœ… Found first sample with detections at index {first_idx}")
195
+ indices = [first_idx]
196
+ else:
197
+ # Select random samples
198
+ indices = random.sample(range(len(dataset)), min(args.num_samples, len(dataset)))
199
+
200
+ # Create output directory if saving
201
+ if args.output_dir:
202
+ output_path = Path(args.output_dir)
203
+ output_path.mkdir(parents=True, exist_ok=True)
204
+ print(f"๐Ÿ’พ Saving visualizations to: {output_path}")
205
+
206
+ # Visualize samples
207
+ figsize = (args.figsize_width, args.figsize_height)
208
+
209
+ for idx in indices:
210
+ sample = dataset[idx]
211
+ num_detections = len(sample[args.objects_column]["bbox"])
212
+
213
+ print(f"\n๐Ÿ–ผ๏ธ Sample {idx}: {num_detections} detections")
214
+
215
+ # Create visualization
216
+ title = f"Sample {idx} - {num_detections} detections"
217
+ fig, ax = visualize_sample(
218
+ sample,
219
+ image_column=args.image_column,
220
+ objects_column=args.objects_column,
221
+ figsize=figsize,
222
+ bbox_color=args.bbox_color,
223
+ show_scores=args.show_scores,
224
+ title=title,
225
+ )
226
+
227
+ # Save or show
228
+ if args.output_dir:
229
+ output_file = output_path / f"sample_{idx}.png"
230
+ plt.savefig(output_file, dpi=150, bbox_inches="tight")
231
+ print(f" Saved: {output_file}")
232
+ plt.close(fig)
233
+ else:
234
+ plt.show()
235
+
236
+ if args.output_dir:
237
+ print(f"\nโœ… Saved {len(indices)} visualizations to {args.output_dir}")
238
+
239
+
240
+ if __name__ == "__main__":
241
+ main()