Datasets:
Update README.md
Browse files
README.md
CHANGED
|
@@ -7,7 +7,6 @@ size_categories:
|
|
| 7 |
- 10K<n<100K
|
| 8 |
task_categories:
|
| 9 |
- object-detection
|
| 10 |
-
- visual-question-answering
|
| 11 |
tags:
|
| 12 |
- computer-vision
|
| 13 |
- diffusion-priors
|
|
@@ -21,67 +20,26 @@ configs:
|
|
| 21 |
path: "ho_irany_test_rel_full.jsonl"
|
| 22 |
---
|
| 23 |
|
| 24 |
-
#
|
| 25 |
|
| 26 |
-
|
| 27 |
-
It provides image-object pairs with localized bounding boxes, designed to help models learn realistic object placement and spatial relationships within background scenes.
|
| 28 |
|
| 29 |
-
|
| 30 |
-
|
| 31 |
|
| 32 |
-
##
|
| 33 |
-
Each entry consists of a foreground object (`fg_class`) to be inserted within a background image (`bg_path`).
|
| 34 |
|
| 35 |
| Field | Type | Description |
|
| 36 |
|:---|:---|:---|
|
| 37 |
-
|
|
| 38 |
-
|
|
| 39 |
-
|
|
| 40 |
-
|
|
| 41 |
-
|
|
| 42 |
-
|
|
| 43 |
-
|
|
|
|
|
| 44 |
|
| 45 |
-
---
|
| 46 |
-
|
| 47 |
-
## 📐 Preprocessing & Bounding Boxes
|
| 48 |
-
The bounding boxes are defined relative to a **512x512 center-cropped** version of the background image.
|
| 49 |
-
1. Resize the shortest side of the original image to **512px**.
|
| 50 |
-
2. Perform a **center crop** to reach 512x512.
|
| 51 |
-
3. The upper-left corner of the crop is `(0, 0)`.
|
| 52 |
-
|
| 53 |
-
**Coordinate Conversion:**
|
| 54 |
-
```python
|
| 55 |
-
# Convert normalized [x, y, w, h] to 512x512 pixel coordinates
|
| 56 |
-
px_x, px_y = bbox[0] * 512, bbox[1] * 512
|
| 57 |
-
px_w, px_h = bbox[2] * 512, bbox[3] * 512
|
| 58 |
-
```
|
| 59 |
-
|
| 60 |
-
|
| 61 |
-
## Example Setup
|
| 62 |
-
huggingface-cli login
|
| 63 |
-
|
| 64 |
-
|
| 65 |
-
### Download Background Images from Places
|
| 66 |
-
|
| 67 |
-
```python
|
| 68 |
-
|
| 69 |
-
import torchvision.datasets as datasets
|
| 70 |
-
|
| 71 |
-
root = "INSERT_YOUR_PATH"
|
| 72 |
-
dataset = datasets.Places365(root=root, split='train-standard', small=False, download=True)
|
| 73 |
-
print(f"Downloaded {len(dataset)} images to {root}")
|
| 74 |
-
```
|
| 75 |
-
|
| 76 |
-
|
| 77 |
-
### Load as JSONL
|
| 78 |
-
```Python
|
| 79 |
-
from datasets import load_dataset
|
| 80 |
-
|
| 81 |
-
dataset = load_dataset("marco-schouten/hidden-objects", streaming=True)
|
| 82 |
-
first_row = next(iter(dataset["train"]))
|
| 83 |
-
print(first_row)
|
| 84 |
-
```
|
| 85 |
Sample:
|
| 86 |
```json
|
| 87 |
{
|
|
@@ -92,48 +50,44 @@ Sample:
|
|
| 92 |
"label": 1,
|
| 93 |
"image_reward_score": -1.542461,
|
| 94 |
"confidence": 0.388181,
|
| 95 |
-
"source": "
|
| 96 |
}
|
| 97 |
```
|
| 98 |
|
| 99 |
-
##
|
| 100 |
|
| 101 |
-
|
| 102 |
-
|
| 103 |
-
|
| 104 |
-
|
| 105 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 106 |
from datasets import load_dataset
|
| 107 |
-
import torchvision.transforms as T
|
| 108 |
|
| 109 |
-
|
| 110 |
-
|
| 111 |
-
|
| 112 |
-
self.places_root = places_root
|
| 113 |
-
self.transform = T.Compose([
|
| 114 |
-
T.Resize(512),
|
| 115 |
-
T.CenterCrop(512),
|
| 116 |
-
T.ToTensor()
|
| 117 |
-
])
|
| 118 |
|
| 119 |
-
|
| 120 |
-
return len(self.hf_data)
|
| 121 |
|
| 122 |
-
|
| 123 |
-
item = self.hf_data[idx]
|
| 124 |
-
img_path = os.path.join(self.places_root, item['bg_path'])
|
| 125 |
-
image = self.transform(Image.open(img_path).convert("RGB"))
|
| 126 |
-
bbox = torch.tensor(item['bbox']) * 512
|
| 127 |
-
return {"image": image, "bbox": bbox, "label": item['label'], "class": item['fg_class'], "image_reward_score" : item['image_reward_score']
|
| 128 |
-
"confidence" : item['confidence']}
|
| 129 |
|
| 130 |
-
|
| 131 |
-
|
| 132 |
```
|
| 133 |
|
| 134 |
-
|
|
|
|
| 135 |
|
| 136 |
-
|
|
|
|
|
|
|
|
|
|
| 137 |
import os
|
| 138 |
import torch
|
| 139 |
from PIL import Image
|
|
@@ -143,57 +97,26 @@ import torchvision.transforms as T
|
|
| 143 |
|
| 144 |
class HiddenObjectsDataset(Dataset):
|
| 145 |
def __init__(self, places_root, split="train"):
|
| 146 |
-
self.
|
| 147 |
self.places_root = places_root
|
| 148 |
-
self.transform = T.Compose([
|
| 149 |
-
T.Resize(512),
|
| 150 |
-
T.CenterCrop(512),
|
| 151 |
-
T.ToTensor()
|
| 152 |
-
])
|
| 153 |
|
| 154 |
def __len__(self):
|
| 155 |
-
return len(self.
|
| 156 |
|
| 157 |
def __getitem__(self, idx):
|
| 158 |
-
item = self.
|
| 159 |
-
|
| 160 |
-
image = self.transform(Image.open(img_path).convert("RGB"))
|
| 161 |
-
bbox = torch.tensor(item['bbox']) * 512
|
| 162 |
return {
|
| 163 |
-
"entry_id": item[
|
| 164 |
-
"image": image,
|
| 165 |
-
"bbox": bbox
|
| 166 |
-
"label": item[
|
| 167 |
-
"class": item[
|
|
|
|
|
|
|
| 168 |
}
|
| 169 |
|
| 170 |
-
#
|
| 171 |
-
|
| 172 |
-
|
| 173 |
-
import torchvision.transforms as T
|
| 174 |
-
import os
|
| 175 |
-
from PIL import Image
|
| 176 |
-
import torch
|
| 177 |
-
|
| 178 |
-
def get_streaming_loader(places_root, batch_size=32):
|
| 179 |
-
dataset = load_dataset("marco-schouten/hidden-objects", split="train", streaming=True)
|
| 180 |
-
preprocess = T.Compose([T.Resize(512), T.CenterCrop(512), T.ToTensor()])
|
| 181 |
-
|
| 182 |
-
def collate_fn(batch):
|
| 183 |
-
images, bboxes, ids = [], [], []
|
| 184 |
-
for item in batch:
|
| 185 |
-
path = os.path.join(places_root, item['bg_path'])
|
| 186 |
-
try:
|
| 187 |
-
img = Image.open(path).convert("RGB")
|
| 188 |
-
images.append(preprocess(img))
|
| 189 |
-
bboxes.append(torch.tensor(item['bbox']) * 512)
|
| 190 |
-
ids.append(item['entry_id'])
|
| 191 |
-
except FileNotFoundError:
|
| 192 |
-
continue
|
| 193 |
-
return {
|
| 194 |
-
"entry_id": ids,
|
| 195 |
-
"pixel_values": torch.stack(images),
|
| 196 |
-
"bboxes": torch.stack(bboxes)
|
| 197 |
-
}
|
| 198 |
-
return DataLoader(dataset, batch_size=batch_size, collate_fn=collate_fn)
|
| 199 |
-
```
|
|
|
|
| 7 |
- 10K<n<100K
|
| 8 |
task_categories:
|
| 9 |
- object-detection
|
|
|
|
| 10 |
tags:
|
| 11 |
- computer-vision
|
| 12 |
- diffusion-priors
|
|
|
|
| 20 |
path: "ho_irany_test_rel_full.jsonl"
|
| 21 |
---
|
| 22 |
|
| 23 |
+
# Hidden-Objects
|
| 24 |
|
| 25 |
+
Image-object pairs with localized bounding boxes for learning realistic object placement in background scenes.
|
|
|
|
| 26 |
|
| 27 |
+
- **Project page:** https://hidden-objects.github.io/
|
| 28 |
+
- **Backgrounds:** [Places365](http://places2.csail.mit.edu/download.html)
|
| 29 |
|
| 30 |
+
## Schema
|
|
|
|
| 31 |
|
| 32 |
| Field | Type | Description |
|
| 33 |
|:---|:---|:---|
|
| 34 |
+
| `entry_id` | int64 | Unique row identifier |
|
| 35 |
+
| `bg_path` | string | Relative path to background image in Places365 |
|
| 36 |
+
| `fg_class` | string | Foreground object category (e.g. `"bottle"`) |
|
| 37 |
+
| `bbox` | list | Normalized bounding box `[x, y, w, h]` in range 0–1 |
|
| 38 |
+
| `label` | int64 | 1 = positive, 0 = negative |
|
| 39 |
+
| `image_reward_score` | float64 | [ImageReward](https://github.com/THUDM/ImageReward) quality score |
|
| 40 |
+
| `confidence` | float64 | GroundingDINO detection confidence |
|
| 41 |
+
| `source` | string | Origin tag of the annotation |
|
| 42 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 43 |
Sample:
|
| 44 |
```json
|
| 45 |
{
|
|
|
|
| 50 |
"label": 1,
|
| 51 |
"image_reward_score": -1.542461,
|
| 52 |
"confidence": 0.388181,
|
| 53 |
+
"source": "ho"
|
| 54 |
}
|
| 55 |
```
|
| 56 |
|
| 57 |
+
## Bounding Boxes
|
| 58 |
|
| 59 |
+
Bounding boxes are relative to a **512×512 center crop** of the background image:
|
| 60 |
+
```python
|
| 61 |
+
# Normalized → pixel coordinates
|
| 62 |
+
x, y, w, h = [v * 512 for v in bbox]
|
| 63 |
+
```
|
| 64 |
+
|
| 65 |
+
## Usage
|
| 66 |
+
|
| 67 |
+
### Quick start
|
| 68 |
+
|
| 69 |
+
```python
|
| 70 |
from datasets import load_dataset
|
|
|
|
| 71 |
|
| 72 |
+
dataset = load_dataset("marco-schouten/hidden-objects")
|
| 73 |
+
print(dataset["train"][0])
|
| 74 |
+
```
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 75 |
|
| 76 |
+
### PyTorch Dataset
|
|
|
|
| 77 |
|
| 78 |
+
Requires Places365 backgrounds downloaded locally:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 79 |
|
| 80 |
+
```bash
|
| 81 |
+
huggingface-cli login
|
| 82 |
```
|
| 83 |
|
| 84 |
+
```python
|
| 85 |
+
import torchvision.datasets as datasets
|
| 86 |
|
| 87 |
+
background_images = datasets.Places365(root="./data/places365", split="train-standard", small=False, download=True)
|
| 88 |
+
```
|
| 89 |
+
|
| 90 |
+
```python
|
| 91 |
import os
|
| 92 |
import torch
|
| 93 |
from PIL import Image
|
|
|
|
| 97 |
|
| 98 |
class HiddenObjectsDataset(Dataset):
|
| 99 |
def __init__(self, places_root, split="train"):
|
| 100 |
+
self.data = load_dataset("marco-schouten/hidden-objects", split=split)
|
| 101 |
self.places_root = places_root
|
| 102 |
+
self.transform = T.Compose([T.Resize(512), T.CenterCrop(512), T.ToTensor()])
|
|
|
|
|
|
|
|
|
|
|
|
|
| 103 |
|
| 104 |
def __len__(self):
|
| 105 |
+
return len(self.data)
|
| 106 |
|
| 107 |
def __getitem__(self, idx):
|
| 108 |
+
item = self.data[idx]
|
| 109 |
+
image = self.transform(Image.open(os.path.join(self.places_root, item["bg_path"])).convert("RGB"))
|
|
|
|
|
|
|
| 110 |
return {
|
| 111 |
+
"entry_id": item["entry_id"],
|
| 112 |
+
"image": image,
|
| 113 |
+
"bbox": torch.tensor(item["bbox"]) * 512,
|
| 114 |
+
"label": item["label"],
|
| 115 |
+
"class": item["fg_class"],
|
| 116 |
+
"image_reward_score": item["image_reward_score"],
|
| 117 |
+
"confidence": item["confidence"],
|
| 118 |
}
|
| 119 |
|
| 120 |
+
# Usage
|
| 121 |
+
hidden_object_dataset = HiddenObjectsDataset(places_root="./data/places365")
|
| 122 |
+
```
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|