shunk031 commited on
Commit
3ce159a
1 Parent(s): a058bbe

Initialize (#1)

Browse files

* add files

* update files

* update

* update COCOA.py

* update

* update

* update

* add `push_to_hub.yaml`

* update

* update

* add README.md

* update

* update

.github/workflows/ci.yaml ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: CI
2
+
3
+ on:
4
+ push:
5
+ branches: [main]
6
+ pull_request:
7
+ branches: [main]
8
+ paths-ignore:
9
+ - 'README.md'
10
+
11
+ jobs:
12
+ test:
13
+ runs-on: ubuntu-latest
14
+ strategy:
15
+ matrix:
16
+ python-version: ['3.9', '3.10']
17
+
18
+ steps:
19
+ - uses: actions/checkout@v3
20
+
21
+ - name: Set up Python ${{ matrix.python-version }}
22
+ uses: actions/setup-python@v4
23
+ with:
24
+ python-version: ${{ matrix.python-version }}
25
+
26
+ - name: Install dependencies
27
+ run: |
28
+ pip install -U pip setuptools wheel poetry
29
+ poetry install
30
+
31
+ - name: Format
32
+ run: |
33
+ poetry run black --check .
34
+
35
+ - name: Lint
36
+ run: |
37
+ poetry run ruff .
38
+
39
+ - name: Type check
40
+ run: |
41
+ poetry run mypy . \
42
+ --ignore-missing-imports \
43
+ --no-strict-optional \
44
+ --no-site-packages \
45
+ --cache-dir=/dev/null
46
+
47
+ # - name: Run tests
48
+ # run: |
49
+ # poetry run pytest --color=yes -rf
.github/workflows/push_to_hub.yaml ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: Sync to Hugging Face Hub
2
+
3
+ on:
4
+ workflow_run:
5
+ workflows:
6
+ - CI
7
+ branches:
8
+ - main
9
+ types:
10
+ - completed
11
+
12
+ jobs:
13
+ push_to_hub:
14
+ runs-on: ubuntu-latest
15
+
16
+ steps:
17
+ - name: Checkout repository
18
+ uses: actions/checkout@v3
19
+
20
+ - name: Push to Huggingface hub
21
+ env:
22
+ HF_TOKEN: ${{ secrets.HF_TOKEN }}
23
+ HF_USERNAME: ${{ secrets.HF_USERNAME }}
24
+ run: |
25
+ git fetch --unshallow
26
+ git push --force https://${HF_USERNAME}:${HF_TOKEN}@huggingface.co/datasets/${HF_USERNAME}/COCOA main
.gitignore ADDED
@@ -0,0 +1,180 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Created by https://www.toptal.com/developers/gitignore/api/python
2
+ # Edit at https://www.toptal.com/developers/gitignore?templates=python
3
+
4
+ *.tar.gz
5
+ *.jpg
6
+ *.png
7
+
8
+ ### Python ###
9
+ # Byte-compiled / optimized / DLL files
10
+ __pycache__/
11
+ *.py[cod]
12
+ *$py.class
13
+
14
+ # C extensions
15
+ *.so
16
+
17
+ # Distribution / packaging
18
+ .Python
19
+ build/
20
+ develop-eggs/
21
+ dist/
22
+ downloads/
23
+ eggs/
24
+ .eggs/
25
+ lib/
26
+ lib64/
27
+ parts/
28
+ sdist/
29
+ var/
30
+ wheels/
31
+ share/python-wheels/
32
+ *.egg-info/
33
+ .installed.cfg
34
+ *.egg
35
+ MANIFEST
36
+
37
+ # PyInstaller
38
+ # Usually these files are written by a python script from a template
39
+ # before PyInstaller builds the exe, so as to inject date/other infos into it.
40
+ *.manifest
41
+ *.spec
42
+
43
+ # Installer logs
44
+ pip-log.txt
45
+ pip-delete-this-directory.txt
46
+
47
+ # Unit test / coverage reports
48
+ htmlcov/
49
+ .tox/
50
+ .nox/
51
+ .coverage
52
+ .coverage.*
53
+ .cache
54
+ nosetests.xml
55
+ coverage.xml
56
+ *.cover
57
+ *.py,cover
58
+ .hypothesis/
59
+ .pytest_cache/
60
+ cover/
61
+
62
+ # Translations
63
+ *.mo
64
+ *.pot
65
+
66
+ # Django stuff:
67
+ *.log
68
+ local_settings.py
69
+ db.sqlite3
70
+ db.sqlite3-journal
71
+
72
+ # Flask stuff:
73
+ instance/
74
+ .webassets-cache
75
+
76
+ # Scrapy stuff:
77
+ .scrapy
78
+
79
+ # Sphinx documentation
80
+ docs/_build/
81
+
82
+ # PyBuilder
83
+ .pybuilder/
84
+ target/
85
+
86
+ # Jupyter Notebook
87
+ .ipynb_checkpoints
88
+
89
+ # IPython
90
+ profile_default/
91
+ ipython_config.py
92
+
93
+ # pyenv
94
+ # For a library or package, you might want to ignore these files since the code is
95
+ # intended to run in multiple environments; otherwise, check them in:
96
+ .python-version
97
+
98
+ # pipenv
99
+ # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
100
+ # However, in case of collaboration, if having platform-specific dependencies or dependencies
101
+ # having no cross-platform support, pipenv may install dependencies that don't work, or not
102
+ # install all needed dependencies.
103
+ #Pipfile.lock
104
+
105
+ # poetry
106
+ # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
107
+ # This is especially recommended for binary packages to ensure reproducibility, and is more
108
+ # commonly ignored for libraries.
109
+ # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
110
+ #poetry.lock
111
+
112
+ # pdm
113
+ # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
114
+ #pdm.lock
115
+ # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
116
+ # in version control.
117
+ # https://pdm.fming.dev/#use-with-ide
118
+ .pdm.toml
119
+
120
+ # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
121
+ __pypackages__/
122
+
123
+ # Celery stuff
124
+ celerybeat-schedule
125
+ celerybeat.pid
126
+
127
+ # SageMath parsed files
128
+ *.sage.py
129
+
130
+ # Environments
131
+ .env
132
+ .venv
133
+ env/
134
+ venv/
135
+ ENV/
136
+ env.bak/
137
+ venv.bak/
138
+
139
+ # Spyder project settings
140
+ .spyderproject
141
+ .spyproject
142
+
143
+ # Rope project settings
144
+ .ropeproject
145
+
146
+ # mkdocs documentation
147
+ /site
148
+
149
+ # mypy
150
+ .mypy_cache/
151
+ .dmypy.json
152
+ dmypy.json
153
+
154
+ # Pyre type checker
155
+ .pyre/
156
+
157
+ # pytype static type analyzer
158
+ .pytype/
159
+
160
+ # Cython debug symbols
161
+ cython_debug/
162
+
163
+ # PyCharm
164
+ # JetBrains specific template is maintained in a separate JetBrains.gitignore that can
165
+ # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
166
+ # and can be added to the global gitignore or merged into this file. For a more nuclear
167
+ # option (not recommended) you can uncomment the following to ignore the entire idea folder.
168
+ #.idea/
169
+
170
+ ### Python Patch ###
171
+ # Poetry local configuration file - https://python-poetry.org/docs/configuration/#local-configuration
172
+ poetry.toml
173
+
174
+ # ruff
175
+ .ruff_cache/
176
+
177
+ # LSP config files
178
+ pyrightconfig.json
179
+
180
+ # End of https://www.toptal.com/developers/gitignore/api/python
COCOA.py ADDED
@@ -0,0 +1,522 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import logging
3
+ import os
4
+ from collections import defaultdict
5
+ from dataclasses import asdict, dataclass
6
+ from typing import Any, Dict, List, Literal, Optional, Tuple, Type, Union
7
+
8
+ import datasets as ds
9
+ import numpy as np
10
+ from PIL import Image
11
+ from PIL.Image import Image as PilImage
12
+ from pycocotools import mask as cocomask
13
+ from tqdm.auto import tqdm
14
+
15
+ logger = logging.getLogger(__name__)
16
+ JsonDict = Dict[str, Any]
17
+
18
+ ImageId = int
19
+ AnnotationId = int
20
+ LicenseId = int
21
+
22
+
23
+ _CITATION = """\
24
+ @inproceedings{zhu2017semantic,
25
+ title={Semantic amodal segmentation},
26
+ author={Zhu, Yan and Tian, Yuandong and Metaxas, Dimitris and Doll{\'a}r, Piotr},
27
+ booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition},
28
+ pages={1464--1472},
29
+ year={2017}
30
+ }
31
+ @inproceedings{lin2014microsoft,
32
+ title={Microsoft coco: Common objects in context},
33
+ author={Lin, Tsung-Yi and Maire, Michael and Belongie, Serge and Hays, James and Perona, Pietro and Ramanan, Deva and Doll{\'a}r, Piotr and Zitnick, C Lawrence},
34
+ booktitle={Computer Vision--ECCV 2014: 13th European Conference, Zurich, Switzerland, September 6-12, 2014, Proceedings, Part V 13},
35
+ pages={740--755},
36
+ year={2014},
37
+ organization={Springer}
38
+ }
39
+ @article{arbelaez2010contour,
40
+ title={Contour detection and hierarchical image segmentation},
41
+ author={Arbelaez, Pablo and Maire, Michael and Fowlkes, Charless and Malik, Jitendra},
42
+ journal={IEEE transactions on pattern analysis and machine intelligence},
43
+ volume={33},
44
+ number={5},
45
+ pages={898--916},
46
+ year={2010},
47
+ publisher={IEEE}
48
+ }
49
+ """
50
+
51
+ _DESCRIPTION = """\
52
+ COCOA dataset targets amodal segmentation, which aims to recognize and segment objects beyond their visible parts. \
53
+ This dataset includes labels not only for the visible parts of objects, but also for their occluded parts hidden \
54
+ by other objects. This enables learning to understand the full shape and position of objects.
55
+ """
56
+
57
+ _HOMEPAGE = "https://github.com/Wakeupbuddy/amodalAPI"
58
+
59
+ _LICENSE = """\
60
+ The annotations in the COCO dataset along with this website belong to the COCO Consortium and are licensed under a Creative Commons Attribution 4.0 License.
61
+ """
62
+
63
+ _URLS = {
64
+ "COCO": {
65
+ "images": {
66
+ "train": "http://images.cocodataset.org/zips/train2014.zip",
67
+ "validation": "http://images.cocodataset.org/zips/val2014.zip",
68
+ "test": "http://images.cocodataset.org/zips/test2014.zip",
69
+ },
70
+ },
71
+ "BSDS": {
72
+ "images": "http://www.eecs.berkeley.edu/Research/Projects/CS/vision/grouping/BSR/BSR_bsds500.tgz",
73
+ },
74
+ }
75
+
76
+
77
+ def _load_image(image_path: str) -> PilImage:
78
+ return Image.open(image_path)
79
+
80
+
81
+ @dataclass
82
+ class ImageData(object):
83
+ image_id: ImageId
84
+ license_id: LicenseId
85
+ file_name: str
86
+ height: int
87
+ width: int
88
+ date_captured: str
89
+ flickr_url: str
90
+
91
+ @classmethod
92
+ def get_date_captured(cls, json_dict: JsonDict) -> str:
93
+ date_captured = json_dict.get("date_captured")
94
+ if date_captured is None:
95
+ date_captured = json_dict["data_captured"] # typo?
96
+ return date_captured
97
+
98
+ @classmethod
99
+ def get_license_id(cls, json_dict: JsonDict) -> int:
100
+ license_id = json_dict["license"]
101
+ if license_id == "?":
102
+ # Since the test data in BSDS has a license id of `?`,
103
+ # convert it to -100 instead.
104
+ return -100
105
+ else:
106
+ return int(license_id)
107
+
108
+ @classmethod
109
+ def to_base_dict(cls, json_dict: JsonDict) -> JsonDict:
110
+ return {
111
+ "image_id": json_dict["id"],
112
+ "file_name": json_dict["file_name"],
113
+ "height": json_dict["height"],
114
+ "width": json_dict["width"],
115
+ "flickr_url": json_dict["flickr_url"],
116
+ "license_id": cls.get_license_id(json_dict),
117
+ "date_captured": cls.get_date_captured(json_dict),
118
+ }
119
+
120
+ @property
121
+ def shape(self) -> Tuple[int, int]:
122
+ return (self.height, self.width)
123
+
124
+
125
+ @dataclass
126
+ class CocoImageData(ImageData):
127
+ coco_url: str
128
+
129
+ @classmethod
130
+ def from_dict(cls, json_dict: JsonDict) -> "CocoImageData":
131
+ return cls(
132
+ **cls.to_base_dict(json_dict),
133
+ coco_url=json_dict["coco_url"],
134
+ )
135
+
136
+
137
+ @dataclass
138
+ class BsDsImageData(ImageData):
139
+ bsds_url: str
140
+
141
+ @classmethod
142
+ def from_dict(cls, json_dict: JsonDict) -> "BsDsImageData":
143
+ return cls(
144
+ **cls.to_base_dict(json_dict),
145
+ bsds_url=json_dict["bsds_url"],
146
+ )
147
+
148
+
149
+ @dataclass
150
+ class RegionAnnotationData(object):
151
+ segmentation: np.ndarray
152
+ name: str
153
+ area: float
154
+ is_stuff: bool
155
+ occlude_rate: float
156
+ order: int
157
+ visible_mask: Optional[np.ndarray] = None
158
+ invisible_mask: Optional[np.ndarray] = None
159
+
160
+ @classmethod
161
+ def rle_segmentation_to_binary_mask(
162
+ cls, segmentation, height: int, width: int
163
+ ) -> np.ndarray:
164
+ if isinstance(segmentation, list):
165
+ rles = cocomask.frPyObjects([segmentation], h=height, w=width)
166
+ rle = cocomask.merge(rles)
167
+ else:
168
+ raise NotImplementedError
169
+
170
+ return cocomask.decode(rle)
171
+
172
+ @classmethod
173
+ def rle_segmentation_to_mask(
174
+ cls, segmentation, height: int, width: int
175
+ ) -> np.ndarray:
176
+ binary_mask = cls.rle_segmentation_to_binary_mask(
177
+ segmentation=segmentation, height=height, width=width
178
+ )
179
+ return binary_mask * 255
180
+
181
+ @classmethod
182
+ def get_visible_binary_mask(cls, rle_visible_mask=None) -> Optional[np.ndarray]:
183
+ if rle_visible_mask is None:
184
+ return None
185
+ return cocomask.decode(rle_visible_mask)
186
+
187
+ @classmethod
188
+ def get_invisible_binary_mask(cls, rle_invisible_mask=None) -> Optional[np.ndarray]:
189
+ return cls.get_visible_binary_mask(rle_invisible_mask)
190
+
191
+ @classmethod
192
+ def get_visible_mask(cls, rle_visible_mask=None) -> Optional[np.ndarray]:
193
+ visible_mask = cls.get_visible_binary_mask(rle_visible_mask=rle_visible_mask)
194
+ return visible_mask * 255 if visible_mask is not None else None
195
+
196
+ @classmethod
197
+ def get_invisible_mask(cls, rle_invisible_mask=None) -> Optional[np.ndarray]:
198
+ return cls.get_visible_mask(rle_invisible_mask)
199
+
200
+ @classmethod
201
+ def from_dict(
202
+ cls, json_dict: JsonDict, image_data: ImageData
203
+ ) -> "RegionAnnotationData":
204
+ segmentation = json_dict["segmentation"]
205
+
206
+ segmentation_mask = cls.rle_segmentation_to_mask(
207
+ segmentation=segmentation,
208
+ height=image_data.height,
209
+ width=image_data.width,
210
+ )
211
+ visible_mask = cls.get_visible_mask(
212
+ rle_visible_mask=json_dict.get("visible_mask")
213
+ )
214
+ invisible_mask = cls.get_invisible_mask(
215
+ rle_invisible_mask=json_dict.get("invisible_mask")
216
+ )
217
+ return cls(
218
+ segmentation=segmentation_mask,
219
+ visible_mask=visible_mask,
220
+ invisible_mask=invisible_mask,
221
+ name=json_dict["name"],
222
+ area=json_dict["area"],
223
+ is_stuff=json_dict["isStuff"],
224
+ occlude_rate=json_dict["occlude_rate"],
225
+ order=json_dict["order"],
226
+ )
227
+
228
+
229
+ @dataclass
230
+ class CocoaAnnotationData(object):
231
+ author: str
232
+ url: str
233
+ regions: List[RegionAnnotationData]
234
+ image_id: ImageId
235
+ depth_constraint: str
236
+ size: int
237
+
238
+ @classmethod
239
+ def from_dict(
240
+ cls, json_dict: JsonDict, images: Dict[ImageId, ImageData]
241
+ ) -> "CocoaAnnotationData":
242
+ image_id = json_dict["image_id"]
243
+
244
+ regions = [
245
+ RegionAnnotationData.from_dict(
246
+ json_dict=region_dict, image_data=images[image_id]
247
+ )
248
+ for region_dict in json_dict["regions"]
249
+ ]
250
+
251
+ return cls(
252
+ author=json_dict["author"],
253
+ url=json_dict["url"],
254
+ regions=regions,
255
+ image_id=image_id,
256
+ depth_constraint=json_dict["depth_constraint"],
257
+ size=json_dict["size"],
258
+ )
259
+
260
+
261
+ def _load_images_data(
262
+ image_dicts: List[JsonDict],
263
+ dataset_name: Literal["COCO", "BSDS"],
264
+ tqdm_desc: str = "Load images",
265
+ ) -> Dict[ImageId, ImageData]:
266
+ ImageDataClass: Union[Type[CocoImageData], Type[BsDsImageData]]
267
+
268
+ if dataset_name == "COCO":
269
+ ImageDataClass = CocoImageData
270
+ elif dataset_name == "BSDS":
271
+ ImageDataClass = BsDsImageData
272
+ else:
273
+ raise ValueError(f"Invalid dataset name: {dataset_name}")
274
+
275
+ images: Dict[ImageId, Union[CocoImageData, BsDsImageData]] = {}
276
+ for image_dict in tqdm(image_dicts, desc=tqdm_desc):
277
+ image_data = ImageDataClass.from_dict(image_dict)
278
+ images[image_data.image_id] = image_data
279
+ return images # type: ignore
280
+
281
+
282
+ def _load_cocoa_data(
283
+ ann_dicts: List[JsonDict],
284
+ images: Dict[ImageId, ImageData],
285
+ tqdm_desc: str = "Load COCOA annotations",
286
+ ):
287
+ annotations = defaultdict(list)
288
+ ann_dicts = sorted(ann_dicts, key=lambda d: d["image_id"])
289
+
290
+ for ann_dict in tqdm(ann_dicts, desc=tqdm_desc):
291
+ cocoa_data = CocoaAnnotationData.from_dict(ann_dict, images=images)
292
+ annotations[cocoa_data.image_id].append(cocoa_data)
293
+
294
+ return annotations
295
+
296
+
297
+ class CocoaDataset(ds.GeneratorBasedBuilder):
298
+ VERSION = ds.Version("1.0.0")
299
+ BUILDER_CONFIGS = [
300
+ ds.BuilderConfig(name="COCO", version=VERSION),
301
+ ds.BuilderConfig(name="BSDS", version=VERSION),
302
+ ]
303
+
304
+ def load_amodal_annotation(self, ann_json_path: str) -> JsonDict:
305
+ logger.info(f"Load from {ann_json_path}")
306
+ with open(ann_json_path, "r") as rf:
307
+ ann_json = json.load(rf)
308
+ return ann_json
309
+
310
+ @property
311
+ def manual_download_instructions(self) -> str:
312
+ return (
313
+ "To use COCOA, you need to download the annotations "
314
+ "from the google drive in the official repositories "
315
+ "(https://github.com/Wakeupbuddy/amodalAPI#setup)."
316
+ "Downloading of annotations currently appears to be restricted, "
317
+ "but the author will allow us to download them if we request access privileges."
318
+ )
319
+
320
+ def _info(self) -> ds.DatasetInfo:
321
+ features_dict = {
322
+ "image_id": ds.Value("int64"),
323
+ "license_id": ds.Value("int32"),
324
+ "file_name": ds.Value("string"),
325
+ "height": ds.Value("int32"),
326
+ "width": ds.Value("int32"),
327
+ "date_captured": ds.Value("string"),
328
+ "flickr_url": ds.Value("string"),
329
+ "image": ds.Image(),
330
+ }
331
+
332
+ if self.config.name == "COCO":
333
+ features_dict["coco_url"] = ds.Value("string")
334
+ elif self.config.name == "BSDS":
335
+ features_dict["bsds_url"] = ds.Value("string")
336
+ else:
337
+ raise ValueError(f"Invalid dataset name: {self.config.name}")
338
+
339
+ features_dict["annotations"] = ds.Sequence(
340
+ {
341
+ "author": ds.Value("string"),
342
+ "url": ds.Value("string"),
343
+ "regions": ds.Sequence(
344
+ {
345
+ "segmentation": ds.Image(),
346
+ "name": ds.Value("string"),
347
+ "area": ds.Value("float32"),
348
+ "is_stuff": ds.Value("bool"),
349
+ "occlude_rate": ds.Value("float32"),
350
+ "order": ds.Value("int32"),
351
+ "visible_mask": ds.Image(),
352
+ "invisible_mask": ds.Image(),
353
+ }
354
+ ),
355
+ "image_id": ds.Value("int64"),
356
+ "depth_constraint": ds.Value("string"),
357
+ "size": ds.Value("int32"),
358
+ }
359
+ )
360
+ features = ds.Features(features_dict)
361
+
362
+ return ds.DatasetInfo(
363
+ description=_DESCRIPTION,
364
+ citation=_CITATION,
365
+ homepage=_HOMEPAGE,
366
+ license=_LICENSE,
367
+ features=features,
368
+ )
369
+
370
+ def _split_generators_coco(self, ann_dir: str, image_dirs: Dict[str, str]):
371
+ tng_ann_path = os.path.join(
372
+ ann_dir,
373
+ f"{self.config.name}_amodal_train2014.json",
374
+ )
375
+ val_ann_path = os.path.join(
376
+ ann_dir,
377
+ f"{self.config.name}_amodal_val2014.json",
378
+ )
379
+ tst_ann_path = os.path.join(
380
+ ann_dir,
381
+ f"{self.config.name}_amodal_test2014.json",
382
+ )
383
+ return [
384
+ ds.SplitGenerator(
385
+ name=ds.Split.TRAIN, # type: ignore
386
+ gen_kwargs={
387
+ "base_image_dir": image_dirs["train"],
388
+ "amodal_annotation_path": tng_ann_path,
389
+ "split": "train",
390
+ },
391
+ ),
392
+ ds.SplitGenerator(
393
+ name=ds.Split.VALIDATION, # type: ignore
394
+ gen_kwargs={
395
+ "base_image_dir": image_dirs["validation"],
396
+ "amodal_annotation_path": val_ann_path,
397
+ "split": "val",
398
+ },
399
+ ),
400
+ ds.SplitGenerator(
401
+ name=ds.Split.TEST, # type: ignore
402
+ gen_kwargs={
403
+ "base_image_dir": image_dirs["test"],
404
+ "amodal_annotation_path": tst_ann_path,
405
+ "split": "test",
406
+ },
407
+ ),
408
+ ]
409
+
410
+ def _split_generators_bsds(self, ann_dir: str, image_dir: str):
411
+ tng_ann_path = os.path.join(
412
+ ann_dir,
413
+ f"{self.config.name}_amodal_train.json",
414
+ )
415
+ val_ann_path = os.path.join(
416
+ ann_dir,
417
+ f"{self.config.name}_amodal_val.json",
418
+ )
419
+ tst_ann_path = os.path.join(
420
+ ann_dir,
421
+ f"{self.config.name}_amodal_test.json",
422
+ )
423
+ image_dir = os.path.join(image_dir, "BSR", "BSDS500", "data", "images")
424
+ return [
425
+ ds.SplitGenerator(
426
+ name=ds.Split.TRAIN, # type: ignore
427
+ gen_kwargs={
428
+ "base_image_dir": os.path.join(image_dir, "train"),
429
+ "amodal_annotation_path": tng_ann_path,
430
+ "split": "train",
431
+ },
432
+ ),
433
+ ds.SplitGenerator(
434
+ name=ds.Split.VALIDATION, # type: ignore
435
+ gen_kwargs={
436
+ "base_image_dir": os.path.join(image_dir, "val"),
437
+ "amodal_annotation_path": val_ann_path,
438
+ "split": "validation",
439
+ },
440
+ ),
441
+ ds.SplitGenerator(
442
+ name=ds.Split.TEST, # type: ignore
443
+ gen_kwargs={
444
+ "base_image_dir": os.path.join(image_dir, "test"),
445
+ "amodal_annotation_path": tst_ann_path,
446
+ "split": "test",
447
+ },
448
+ ),
449
+ ]
450
+
451
+ def _split_generators(self, dl_manager: ds.DownloadManager):
452
+ file_paths = dl_manager.download_and_extract(_URLS[self.config.name])
453
+ image_dirs = file_paths["images"] # type: ignore
454
+
455
+ assert dl_manager.manual_dir is not None, dl_manager.manual_dir
456
+ data_path = os.path.expanduser(dl_manager.manual_dir)
457
+
458
+ if not os.path.exists(data_path):
459
+ raise FileNotFoundError(
460
+ f"{data_path} does not exists. Make sure you insert a manual dir "
461
+ 'via `datasets.load_dataset("shunk031/COCOA", data_dir=...)` '
462
+ "that includes tar/untar files from the COCOA annotation tar.gz. "
463
+ f"Manual download instructions: {self.manual_download_instructions}"
464
+ )
465
+ else:
466
+ data_path = (
467
+ dl_manager.extract(data_path)
468
+ if not os.path.isdir(data_path)
469
+ else data_path
470
+ )
471
+
472
+ assert isinstance(data_path, str)
473
+ ann_dir = os.path.join(data_path, "annotations")
474
+
475
+ if self.config.name == "COCO":
476
+ return self._split_generators_coco(ann_dir=ann_dir, image_dirs=image_dirs)
477
+
478
+ elif self.config.name == "BSDS":
479
+ return self._split_generators_bsds(ann_dir=ann_dir, image_dir=image_dirs)
480
+
481
+ else:
482
+ raise ValueError(f"Invalid name: {self.config.name}")
483
+
484
+ def _generate_examples(
485
+ self,
486
+ split: str,
487
+ base_image_dir: str,
488
+ amodal_annotation_path: str,
489
+ ):
490
+ if self.config.name == "COCO":
491
+ image_dir = os.path.join(base_image_dir, f"{split}2014")
492
+ elif self.config.name == "BSDS":
493
+ image_dir = base_image_dir
494
+ else:
495
+ raise ValueError(f"Invalid task: {self.config.name}")
496
+
497
+ ann_json = self.load_amodal_annotation(amodal_annotation_path)
498
+
499
+ images = _load_images_data(
500
+ image_dicts=ann_json["images"],
501
+ dataset_name=self.config.name,
502
+ )
503
+ annotations = _load_cocoa_data(ann_dicts=ann_json["annotations"], images=images)
504
+
505
+ for idx, image_id in enumerate(images.keys()):
506
+ image_data = images[image_id]
507
+ image_anns = annotations[image_id]
508
+
509
+ if len(image_anns) < 1:
510
+ continue
511
+
512
+ image = _load_image(
513
+ image_path=os.path.join(image_dir, image_data.file_name)
514
+ )
515
+ example = asdict(image_data)
516
+ example["image"] = image
517
+ example["annotations"] = []
518
+ for ann in image_anns:
519
+ ann_dict = asdict(ann)
520
+ example["annotations"].append(ann_dict)
521
+
522
+ yield idx, example
README.md ADDED
@@ -0,0 +1,496 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ language:
3
+ - en
4
+ license: cc-by-4.0
5
+
6
+ tags:
7
+ - computer-vision
8
+ - object-detection
9
+ - ms-coco
10
+
11
+ datasets:
12
+ - stuff-thing
13
+ - stuff-only
14
+
15
+ metrics:
16
+ - accuracy
17
+ - iou
18
+ ---
19
+
20
+ # Dataset Card for COCOA
21
+
22
+ [![CI](https://github.com/shunk031/huggingface-datasets_COCOA/actions/workflows/ci.yaml/badge.svg)](https://github.com/shunk031/huggingface-datasets_COCOA/actions/workflows/ci.yaml)
23
+
24
+ ## Table of Contents
25
+ - [Table of Contents](#table-of-contents)
26
+ - [Dataset Description](#dataset-description)
27
+ - [Dataset Summary](#dataset-summary)
28
+ - [Dataset Preprocessing](#dataset-preprocessing)
29
+ - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards)
30
+ - [Languages](#languages)
31
+ - [Dataset Structure](#dataset-structure)
32
+ - [Data Instances](#data-instances)
33
+ - [Data Fields](#data-fields)
34
+ - [Data Splits](#data-splits)
35
+ - [Dataset Creation](#dataset-creation)
36
+ - [Curation Rationale](#curation-rationale)
37
+ - [Source Data](#source-data)
38
+ - [Annotations](#annotations)
39
+ - [Personal and Sensitive Information](#personal-and-sensitive-information)
40
+ - [Considerations for Using the Data](#considerations-for-using-the-data)
41
+ - [Social Impact of Dataset](#social-impact-of-dataset)
42
+ - [Discussion of Biases](#discussion-of-biases)
43
+ - [Other Known Limitations](#other-known-limitations)
44
+ - [Additional Information](#additional-information)
45
+ - [Dataset Curators](#dataset-curators)
46
+ - [Licensing Information](#licensing-information)
47
+ - [Citation Information](#citation-information)
48
+ - [Contributions](#contributions)
49
+
50
+ ## Dataset Description
51
+
52
+ - Homepage: https://github.com/Wakeupbuddy/amodalAPI
53
+ - Repository: https://github.com/shunk031/huggingface-datasets_COCOA
54
+ - Paper (preprint): https://arxiv.org/abs/1509.01329
55
+ - Paper (CVPR2017): https://openaccess.thecvf.com/content_cvpr_2017/html/Zhu_Semantic_Amodal_Segmentation_CVPR_2017_paper.html
56
+
57
+ ### Dataset Summary
58
+
59
+ COCOA dataset targets amodal segmentation, which aims to recognize and segment objects beyond their visible parts. This dataset includes labels not only for the visible parts of objects, but also for their occluded parts hidden by other objects. This enables learning to understand the full shape and position of objects.
60
+
61
+ From the paper:
62
+
63
+ > We propose a detailed image annotation that captures information beyond the visible pixels and requires complex reasoning about full scene structure. Specifically, we create an amodal segmentation of each image: the full extent of each region is marked, not just the visible pixels. Annotators outline and name all salient regions in the image and specify a partial depth order. The result is a rich scene structure, including visible and occluded portions of each region, figure-ground edge information, semantic labels, and object overlap. We create two datasets for semantic amodal segmentation. First, we label 500 images in the BSDS dataset with multiple annotators per image, allowing us to study the statistics of human annotations. We show that the proposed full scene annotation is surprisingly consistent between annotators, including for regions and edges. Second, we annotate 5000 images from COCO. This larger dataset allows us to explore a number of algorithmic ideas for amodal segmentation and depth ordering.
64
+
65
+ ### Dataset Preprocessing
66
+
67
+ ### Supported Tasks and Leaderboards
68
+
69
+ ### Languages
70
+
71
+ All of annotations use English as primary language.
72
+
73
+ ## Dataset Structure
74
+
75
+ ### Data Instances
76
+
77
+ To use COCOA, you need to download the annotations from [the google drive](https://drive.google.com/open?id=0B8e3LNo7STslZURoTzhhMFpCelE) in the official repositories (https://github.com/Wakeupbuddy/amodalAPI#setup). Downloading of annotations currently appears to be restricted, but the author will allow us to download them if we request access privileges.
78
+
79
+ When loading a specific configuration, users has to append a version dependent suffix:
80
+
81
+ ```python
82
+ import datasets as ds
83
+
84
+ dataset = ds.load_dataset("shunk031/COCOA", name="COCO", data_dir="/path/to/cocoa_annotation.tar.gz")
85
+ ```
86
+
87
+ #### COCO
88
+
89
+ An example of looks as follows.
90
+
91
+ ```json
92
+ {
93
+ "image_id": 321,
94
+ "license_id": 1,
95
+ "file_name": "COCO_train2014_000000000321.jpg",
96
+ "height": 480,
97
+ "width": 640,
98
+ "date_captured": "2013-11-20 12: 36: 25",
99
+ "flickr_url": "http: //farm5.staticflickr.com/4096/4750559893_49fb0baf7f_z.jpg",
100
+ "image": <PIL.JpegImagePlugin.JpegImageFile image mode=RGB size=640x480 at 0x7FD21970F5E0>,
101
+ "coco_url": "http://mscoco.org/images/321",
102
+ "annotations": {
103
+ "author": ["ash2"],
104
+ "url": ["https://s3-us-west-1.amazonaws.com/coco-ann/coco-train/COCO_train2014_000000000321.jpg"],
105
+ "regions": [
106
+ {
107
+ "segmentation": [
108
+ <PIL.PngImagePlugin.PngImageFile image mode=L size=640x480 at 0x7FD21970FBE0>,
109
+ <PIL.PngImagePlugin.PngImageFile image mode=L size=640x480 at 0x7FD21970F8E0>,
110
+ <PIL.PngImagePlugin.PngImageFile image mode=L size=640x480 at 0x7FD21970F400>,
111
+ <PIL.PngImagePlugin.PngImageFile image mode=L size=640x480 at 0x7FD21970F790>,
112
+ <PIL.PngImagePlugin.PngImageFile image mode=L size=640x480 at 0x7FD21970FCA0>,
113
+ <PIL.PngImagePlugin.PngImageFile image mode=L size=640x480 at 0x7FD21970FF40>
114
+ ],
115
+ "name": ["sandwich", "container", "hot dog", "hot dog", "container", "table"],
116
+ "area": [63328.0, 141246.0, 31232.0, 28735.0, 265844.0, 307200.0],
117
+ "is_stuff": [False, False, False, False, False, True],
118
+ "occlude_rate": [0.0, 0.44835251569747925, 0.0, 0.022307291626930237, 0.7122523188591003, 0.9019140601158142],
119
+ "order": [1, 2, 3, 4, 5, 6],
120
+ "visible_mask": [
121
+ None,
122
+ <PIL.PngImagePlugin.PngImageFile image mode=L size=640x480 at 0x7FD21970FD90>,
123
+ None,
124
+ <PIL.PngImagePlugin.PngImageFile image mode=L size=640x480 at 0x7FD21970FB50>,
125
+ <PIL.PngImagePlugin.PngImageFile image mode=L size=640x480 at 0x7FD21970FE80>,
126
+ <PIL.PngImagePlugin.PngImageFile image mode=L size=640x480 at 0x7FD219479460>
127
+ ],
128
+ "invisible_mask": [
129
+ None,
130
+ <PIL.PngImagePlugin.PngImageFile image mode=L size=640x480 at 0x7FD219479160>,
131
+ None,
132
+ <PIL.PngImagePlugin.PngImageFile image mode=L size=640x480 at 0x7FD2194793A0>,
133
+ <PIL.PngImagePlugin.PngImageFile image mode=L size=640x480 at 0x7FD219479490>,
134
+ <PIL.PngImagePlugin.PngImageFile image mode=L size=640x480 at 0x7FD219479130>
135
+ ]
136
+ }
137
+ ],
138
+ "image_id": [321],
139
+ "depth_constraint": ["1-2,1-5,1-6,2-5,2-6,3-4,3-5,3-6,4-5,4-6,5-6"],
140
+ "size": [6]
141
+ }
142
+ }
143
+ ```
144
+
145
+ #### BSDS
146
+
147
+ An example of looks as follows.
148
+
149
+ ```json
150
+ {
151
+ "image_id": 100075,
152
+ "license_id": -100,
153
+ "file_name": "100075.jpg",
154
+ "height": 321,
155
+ "width": 481,
156
+ "date_captured": "?",
157
+ "flickr_url": "https://s3-us-west-1.amazonaws.com/coco-ann/BSDS/BSDS_train_100075.jpg",
158
+ "image": <PIL.JpegImagePlugin.JpegImageFile image mode=RGB size=481x321 at 0x7FD22A328CA0>,
159
+ "bsds_url": "https://s3-us-west-1.amazonaws.com/coco-ann/BSDS/BSDS_train_100075.jpg",
160
+ "annotations": {
161
+ "author": ["acherian", "amorgan", "dromero", "jdayal", "kjyou", "ttouneh"],
162
+ "url": [
163
+ "https://s3-us-west-1.amazonaws.com/coco-ann/BSDS/BSDS_train_100075.jpg",
164
+ "https://s3-us-west-1.amazonaws.com/coco-ann/BSDS/BSDS_train_100075.jpg",
165
+ "https://s3-us-west-1.amazonaws.com/coco-ann/BSDS/BSDS_train_100075.jpg",
166
+ "https://s3-us-west-1.amazonaws.com/coco-ann/BSDS/BSDS_train_100075.jpg",
167
+ "https://s3-us-west-1.amazonaws.com/coco-ann/BSDS/BSDS_train_100075.jpg",
168
+ "https://s3-us-west-1.amazonaws.com/coco-ann/BSDS/BSDS_train_100075.jpg"
169
+ ],
170
+ "regions": [
171
+ {
172
+ "segmentation": [
173
+ <PIL.PngImagePlugin.PngImageFile image mode=L size=481x321 at 0x7FD22A3288E0>,
174
+ <PIL.PngImagePlugin.PngImageFile image mode=L size=481x321 at 0x7FD22A328430>,
175
+ <PIL.PngImagePlugin.PngImageFile image mode=L size=481x321 at 0x7FD22A328070>,
176
+ <PIL.PngImagePlugin.PngImageFile image mode=L size=481x321 at 0x7FD22A328610>,
177
+ <PIL.PngImagePlugin.PngImageFile image mode=L size=481x321 at 0x7FD22A3280D0>,
178
+ <PIL.PngImagePlugin.PngImageFile image mode=L size=481x321 at 0x7FD22A328BE0>
179
+ ],
180
+ "name": ["rocks", "bear", "bear", "bear", "sand", "water"],
181
+ "area": [31872.0, 5603.0, 38819.0, 12869.0, 27883.0, 124695.0],
182
+ "is_stuff": [False, False, False, False, False, False],
183
+ "occlude_rate": [0.0, 0.0, 0.0, 0.3645193874835968, 0.13043789565563202, 0.6487349271774292],
184
+ "order": [1, 2, 3, 4, 5, 6],
185
+ "visible_mask": [
186
+ None,
187
+ None,
188
+ None,
189
+ <PIL.PngImagePlugin.PngImageFile image mode=L size=481x321 at 0x7FD22A328AF0>,
190
+ <PIL.PngImagePlugin.PngImageFile image mode=L size=481x321 at 0x7FD22A328A30>,
191
+ <PIL.PngImagePlugin.PngImageFile image mode=L size=481x321 at 0x7FD22A328220>
192
+ ],
193
+ "invisible_mask": [
194
+ None,
195
+ None,
196
+ None,
197
+ <PIL.PngImagePlugin.PngImageFile image mode=L size=481x321 at 0x7FD22A3282E0>,
198
+ <PIL.PngImagePlugin.PngImageFile image mode=L size=481x321 at 0x7FD22A328400>,
199
+ <PIL.PngImagePlugin.PngImageFile image mode=L size=481x321 at 0x7FD22A328310>
200
+ ]
201
+ },
202
+ {
203
+ "segmentation": [
204
+ <PIL.PngImagePlugin.PngImageFile image mode=L size=481x321 at 0x7FD22A328340>,
205
+ <PIL.PngImagePlugin.PngImageFile image mode=L size=481x321 at 0x7FD22A328B80>,
206
+ <PIL.PngImagePlugin.PngImageFile image mode=L size=481x321 at 0x7FD22A328670>,
207
+ <PIL.PngImagePlugin.PngImageFile image mode=L size=481x321 at 0x7FD22A328520>,
208
+ <PIL.PngImagePlugin.PngImageFile image mode=L size=481x321 at 0x7FD22A328460>,
209
+ <PIL.PngImagePlugin.PngImageFile image mode=L size=481x321 at 0x7FD22A328D00>
210
+ ],
211
+ "name": ["bear", "bear", "bear", "shore line", "water", "shore line"],
212
+ "area": [38772.0, 5178.0, 13575.0, 31977.0, 84224.0, 37418.0],
213
+ "is_stuff": [False, False, False, False, False, False],
214
+ "occlude_rate": [0.0, 0.0, 0.35889503359794617, 0.1458861082792282, 0.5715591907501221, 0.0],
215
+ "order": [1, 2, 3, 4, 5, 6],
216
+ "visible_mask": [
217
+ None,
218
+ None,
219
+ <PIL.PngImagePlugin.PngImageFile image mode=L size=481x321 at 0x7FD22A328A00>,
220
+ <PIL.PngImagePlugin.PngImageFile image mode=L size=481x321 at 0x7FD22A328D60>,
221
+ <PIL.PngImagePlugin.PngImageFile image mode=L size=481x321 at 0x7FD22A3285E0>,
222
+ None
223
+ ],
224
+ "invisible_mask": [
225
+ None,
226
+ None,
227
+ <PIL.PngImagePlugin.PngImageFile image mode=L size=481x321 at 0x7FD22A3286A0>,
228
+ <PIL.PngImagePlugin.PngImageFile image mode=L size=481x321 at 0x7FD22A328490>,
229
+ <PIL.PngImagePlugin.PngImageFile image mode=L size=481x321 at 0x7FD22A328100>,
230
+ None
231
+ ]
232
+ },
233
+ {
234
+ "segmentation": [
235
+ <PIL.PngImagePlugin.PngImageFile image mode=L size=481x321 at 0x7FD22A3282B0>,
236
+ <PIL.PngImagePlugin.PngImageFile image mode=L size=481x321 at 0x7FD22A328EE0>,
237
+ <PIL.PngImagePlugin.PngImageFile image mode=L size=481x321 at 0x7FD22A3284C0>,
238
+ <PIL.PngImagePlugin.PngImageFile image mode=L size=481x321 at 0x7FD22A3285B0>,
239
+ <PIL.PngImagePlugin.PngImageFile image mode=L size=481x321 at 0x7FD22A328C40>
240
+ ],
241
+ "name": ["bear", "bear", "bear", "beach", "ocean"],
242
+ "area": [38522.0, 5496.0, 12581.0, 27216.0, 126090.0],
243
+ "is_stuff": [False, False, False, False, False],
244
+ "occlude_rate": [0.0, 0.0, 0.3449646234512329, 0.11258083581924438, 0.39141881465911865],
245
+ "order": [1, 2, 3, 4, 5],
246
+ "visible_mask": [
247
+ None,
248
+ None,
249
+ <PIL.PngImagePlugin.PngImageFile image mode=L size=481x321 at 0x7FD22A328940>,
250
+ <PIL.PngImagePlugin.PngImageFile image mode=L size=481x321 at 0x7FD22A328880>,
251
+ <PIL.PngImagePlugin.PngImageFile image mode=L size=481x321 at 0x7FD219830A00>
252
+ ],
253
+ "invisible_mask": [
254
+ None,
255
+ None,
256
+ <PIL.PngImagePlugin.PngImageFile image mode=L size=481x321 at 0x7FD219830CD0>,
257
+ <PIL.PngImagePlugin.PngImageFile image mode=L size=481x321 at 0x7FD219830BB0>,
258
+ <PIL.PngImagePlugin.PngImageFile image mode=L size=481x321 at 0x7FD219830940>
259
+ ]
260
+ },
261
+ {
262
+ "segmentation": [
263
+ <PIL.PngImagePlugin.PngImageFile image mode=L size=481x321 at 0x7FD219830910>,
264
+ <PIL.PngImagePlugin.PngImageFile image mode=L size=481x321 at 0x7FD2198308E0>,
265
+ <PIL.PngImagePlugin.PngImageFile image mode=L size=481x321 at 0x7FD219830C70>,
266
+ <PIL.PngImagePlugin.PngImageFile image mode=L size=481x321 at 0x7FD219830970>,
267
+ <PIL.PngImagePlugin.PngImageFile image mode=L size=481x321 at 0x7FD219830CA0>,
268
+ <PIL.PngImagePlugin.PngImageFile image mode=L size=481x321 at 0x7FD2198309A0>
269
+ ],
270
+ "name": ["Bear", "Bear", "Bear", "Water", "ground", "Ground"],
271
+ "area": [39133.0, 7120.0, 13053.0, 97052.0, 33441.0, 26313.0],
272
+ "is_stuff": [False, False, False, False, False, False],
273
+ "occlude_rate": [0.0, 0.0, 0.4422737956047058, 0.5332708358764648, 0.007117012050002813, 0.1584388017654419],
274
+ "order": [1, 2, 3, 4, 5, 6],
275
+ "visible_mask": [
276
+ None,
277
+ None,
278
+ <PIL.PngImagePlugin.PngImageFile image mode=L size=481x321 at 0x7FD219830A30>,
279
+ <PIL.PngImagePlugin.PngImageFile image mode=L size=481x321 at 0x7FD219830C40>,
280
+ <PIL.PngImagePlugin.PngImageFile image mode=L size=481x321 at 0x7FD219830B80>,
281
+ <PIL.PngImagePlugin.PngImageFile image mode=L size=481x321 at 0x7FD2191A6820>
282
+ ],
283
+ "invisible_mask": [
284
+ None,
285
+ None,
286
+ <PIL.PngImagePlugin.PngImageFile image mode=L size=481x321 at 0x7FD2191A68B0>,
287
+ <PIL.PngImagePlugin.PngImageFile image mode=L size=481x321 at 0x7FD2191A6610>,
288
+ <PIL.PngImagePlugin.PngImageFile image mode=L size=481x321 at 0x7FD2191A69D0>,
289
+ <PIL.PngImagePlugin.PngImageFile image mode=L size=481x321 at 0x7FD2191A6730>
290
+ ]
291
+ },
292
+ {
293
+ "segmentation": [
294
+ <PIL.PngImagePlugin.PngImageFile image mode=L size=481x321 at 0x7FD2191A6790>,
295
+ <PIL.PngImagePlugin.PngImageFile image mode=L size=481x321 at 0x7FD2191A6550>,
296
+ <PIL.PngImagePlugin.PngImageFile image mode=L size=481x321 at 0x7FD2191A6850>,
297
+ <PIL.PngImagePlugin.PngImageFile image mode=L size=481x321 at 0x7FD2191A6940>,
298
+ <PIL.PngImagePlugin.PngImageFile image mode=L size=481x321 at 0x7FD2191A66D0>
299
+ ],
300
+ "name": ["bear", "bear", "bear", "water", "rock beach"],
301
+ "area": [38378.0, 6130.0, 12649.0, 98377.0, 153118.0],
302
+ "is_stuff": [False, False, False, False, False],
303
+ "occlude_rate": [0.0, 0.0, 0.41094157099723816, 0.5013265013694763, 0.65973299741745],
304
+ "order": [1, 2, 3, 4, 5],
305
+ "visible_mask": [
306
+ None,
307
+ None,
308
+ <PIL.PngImagePlugin.PngImageFile image mode=L size=481x321 at 0x7FD268700F10>,
309
+ <PIL.PngImagePlugin.PngImageFile image mode=L size=481x321 at 0x7FD2687004F0>,
310
+ <PIL.PngImagePlugin.PngImageFile image mode=L size=481x321 at 0x7FD2687002B0>
311
+ ],
312
+ "invisible_mask": [
313
+ None,
314
+ None,
315
+ <PIL.PngImagePlugin.PngImageFile image mode=L size=481x321 at 0x7FD2191A64C0>,
316
+ <PIL.PngImagePlugin.PngImageFile image mode=L size=481x321 at 0x7FD28805FB50>,
317
+ <PIL.PngImagePlugin.PngImageFile image mode=L size=481x321 at 0x7FD28805F580>
318
+ ]
319
+ },
320
+ {
321
+ "segmentation": [
322
+ <PIL.PngImagePlugin.PngImageFile image mode=L size=481x321 at 0x7FD2191A6880>,
323
+ <PIL.PngImagePlugin.PngImageFile image mode=L size=481x321 at 0x7FD2480FB190>,
324
+ <PIL.PngImagePlugin.PngImageFile image mode=L size=481x321 at 0x7FD2480FB8E0>,
325
+ <PIL.PngImagePlugin.PngImageFile image mode=L size=481x321 at 0x7FD2480FB070>,
326
+ <PIL.PngImagePlugin.PngImageFile image mode=L size=481x321 at 0x7FD2480FB610>
327
+ ],
328
+ "name": ["bear", "bear", "bear", "sand", "water"],
329
+ "area": [38802.0, 5926.0, 12248.0, 27857.0, 126748.0],
330
+ "is_stuff": [False, False, False, False, False],
331
+ "occlude_rate": [0.0, 0.0, 0.37026453018188477, 0.13170836865901947, 0.3872092664241791],
332
+ "order": [1, 2, 3, 4, 5],
333
+ "visible_mask": [
334
+ None,
335
+ None,
336
+ <PIL.PngImagePlugin.PngImageFile image mode=L size=481x321 at 0x7FD219479DC0>,
337
+ <PIL.PngImagePlugin.PngImageFile image mode=L size=481x321 at 0x7FD219479C70>,
338
+ <PIL.PngImagePlugin.PngImageFile image mode=L size=481x321 at 0x7FD219479A90>
339
+ ],
340
+ "invisible_mask": [
341
+ None,
342
+ None,
343
+ <PIL.PngImagePlugin.PngImageFile image mode=L size=481x321 at 0x7FD219479AF0>,
344
+ <PIL.PngImagePlugin.PngImageFile image mode=L size=481x321 at 0x7FD2194795B0>,
345
+ <PIL.PngImagePlugin.PngImageFile image mode=L size=481x321 at 0x7FD219479670>
346
+ ]
347
+ }
348
+ ],
349
+ "image_id": [100075, 100075, 100075, 100075, 100075, 100075],
350
+ "depth_constraint": [
351
+ "1-6,2-4,2-5,2-6,3-4,3-5,3-6,4-5,4-6,5-6",
352
+ "1-3,1-4,1-5,2-3,2-4,2-5,3-4,3-5,4-5",
353
+ "1-3,1-4,1-5,2-3,2-4,2-5,3-4,3-5,4-5",
354
+ "1-3,1-4,1-6,2-3,2-4,2-6,3-4,3-6,4-5,4-6",
355
+ "1-4,1-5,2-3,2-4,2-5,3-4,3-5,4-5",
356
+ "1-3,1-4,1-5,2-3,2-4,2-5,3-4,3-5,4-5"
357
+ ],
358
+ "size": [6, 6, 5, 6, 5, 5]
359
+ }
360
+ }
361
+ ```
362
+
363
+ ### Data Fields
364
+
365
+ #### COCO
366
+
367
+ - `image_id`: Unique numeric ID of the image.
368
+ - `license_id`: Unique numeric ID of the image license.
369
+ - `file_name`: File name of the image.
370
+ - `width`: Image width.
371
+ - `height`: Image height.
372
+ - `date_captured`: Date of capturing data
373
+ - `flickr_url`: Original flickr url of the image.
374
+ - `image`: A `PIL.Image.Image` object containing the image.
375
+ - `coco_url`: COCO url of the image.
376
+ - `annotations`: Holds a list of `Annotation` data classes:
377
+ - `author`: TBD
378
+ - `url`: TBD
379
+ - `image_id`: TBD
380
+ - `depth_constraint`: TBD
381
+ - `size`: TBD
382
+ - `regions`: TBD
383
+ - `segmentation`: TBD
384
+ - `name`: TBD
385
+ - `area`: TBD
386
+ - `is_stuff`: TBD
387
+ - `occlude_rate`: TBD
388
+ - `order`: TBD
389
+ - `visible_mask`: TBD
390
+ - `invisible_mask`: TBD
391
+
392
+ #### BSDS
393
+
394
+ - `image_id`: Unique numeric ID of the image.
395
+ - `license_id`: Unique numeric ID of the image license.
396
+ - `file_name`: File name of the image.
397
+ - `width`: Image width.
398
+ - `height`: Image height.
399
+ - `date_captured`: Date of capturing data
400
+ - `flickr_url`: Original flickr url of the image.
401
+ - `image`: A `PIL.Image.Image` object containing the image.
402
+ - `bsds_url`: BSDS url of the image.
403
+ - `annotations`: Holds a list of `Annotation` data classes:
404
+ - `author`: TBD
405
+ - `url`: TBD
406
+ - `image_id`: TBD
407
+ - `depth_constraint`: TBD
408
+ - `size`: TBD
409
+ - `regions`: TBD
410
+ - `segmentation`: TBD
411
+ - `name`: TBD
412
+ - `area`: TBD
413
+ - `is_stuff`: TBD
414
+ - `occlude_rate`: TBD
415
+ - `order`: TBD
416
+ - `visible_mask`: TBD
417
+ - `invisible_mask`: TBD
418
+
419
+ ### Data Splits
420
+
421
+ | name | train | validation | test |
422
+ |------|------:|-----------:|------:|
423
+ | COCO | 2,500 | 1,323 | 1,250 |
424
+ | BSDS | 200 | 100 | 200 |
425
+
426
+ ## Dataset Creation
427
+
428
+ ### Curation Rationale
429
+
430
+ ### Source Data
431
+
432
+ #### Initial Data Collection and Normalization
433
+
434
+ #### Who are the source language producers?
435
+
436
+ ### Annotations
437
+
438
+ #### Annotation process
439
+
440
+ #### Who are the annotators?
441
+
442
+ ### Personal and Sensitive Information
443
+
444
+ ## Considerations for Using the Data
445
+
446
+ ### Social Impact of Dataset
447
+
448
+ ### Discussion of Biases
449
+
450
+ ### Other Known Limitations
451
+
452
+ ## Additional Information
453
+
454
+ ### Dataset Curators
455
+
456
+ ### Licensing Information
457
+
458
+ COCOA is a derivative work of the COCO dataset. The authors of COCO do not in any form endorse this work. Different licenses apply:
459
+ - COCO images: [Flickr Terms of use](http://cocodataset.org/#termsofuse)
460
+ - COCO annotations: [Creative Commons Attribution 4.0 License](http://cocodataset.org/#termsofuse)
461
+
462
+ ### Citation Information
463
+
464
+ ```bibtex
465
+ @inproceedings{zhu2017semantic,
466
+ title={Semantic amodal segmentation},
467
+ author={Zhu, Yan and Tian, Yuandong and Metaxas, Dimitris and Doll{\'a}r, Piotr},
468
+ booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition},
469
+ pages={1464--1472},
470
+ year={2017}
471
+ }
472
+
473
+ @inproceedings{lin2014microsoft,
474
+ title={Microsoft coco: Common objects in context},
475
+ author={Lin, Tsung-Yi and Maire, Michael and Belongie, Serge and Hays, James and Perona, Pietro and Ramanan, Deva and Doll{\'a}r, Piotr and Zitnick, C Lawrence},
476
+ booktitle={Computer Vision--ECCV 2014: 13th European Conference, Zurich, Switzerland, September 6-12, 2014, Proceedings, Part V 13},
477
+ pages={740--755},
478
+ year={2014},
479
+ organization={Springer}
480
+ }
481
+
482
+ @article{arbelaez2010contour,
483
+ title={Contour detection and hierarchical image segmentation},
484
+ author={Arbelaez, Pablo and Maire, Michael and Fowlkes, Charless and Malik, Jitendra},
485
+ journal={IEEE transactions on pattern analysis and machine intelligence},
486
+ volume={33},
487
+ number={5},
488
+ pages={898--916},
489
+ year={2010},
490
+ publisher={IEEE}
491
+ }
492
+ ```
493
+
494
+ ### Contributions
495
+
496
+ Thanks to [@Wakeupbuddy](https://github.com/Wakeupbuddy) for publishing the COCOA dataset.
poetry.lock ADDED
The diff for this file is too large to render. See raw diff
 
pyproject.toml ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [tool.poetry]
2
+ name = "huggingface-datasets-cocoa"
3
+ version = "0.1.0"
4
+ description = ""
5
+ authors = ["Shunsuke KITADA <shunsuke.kitada.0831@gmail.com>"]
6
+ readme = "README.md"
7
+
8
+ [tool.poetry.dependencies]
9
+ python = "^3.9"
10
+ datasets = { extras = ["vision"], version = "^2.14.4" }
11
+ pycocotools = "^2.0.7"
12
+
13
+ [tool.poetry.group.dev.dependencies]
14
+ ruff = "^0.0.287"
15
+ black = "^23.7.0"
16
+ mypy = "^1.5.1"
17
+ pytest = "^7.4.1"
18
+
19
+ [tool.ruff]
20
+ line-length = 170
21
+
22
+ [build-system]
23
+ requires = ["poetry-core"]
24
+ build-backend = "poetry.core.masonry.api"
tests/COCOA_test.py ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+
3
+ import datasets as ds
4
+ import pytest
5
+
6
+
7
+ @pytest.fixture
8
+ def dataset_path() -> str:
9
+ return "COCOA.py"
10
+
11
+
12
+ @pytest.fixture
13
+ def data_dir() -> str:
14
+ is_ci = bool(os.environ.get("CI", False))
15
+ if is_ci:
16
+ raise NotImplementedError
17
+ else:
18
+ return "annotations.tar.gz"
19
+
20
+
21
+ @pytest.mark.parametrize(
22
+ argnames=(
23
+ "dataset_name",
24
+ "expected_num_train",
25
+ "expected_num_validation",
26
+ "expected_num_test",
27
+ ),
28
+ argvalues=(
29
+ ("COCO", 2500, 1323, 1250),
30
+ ("BSDS", 200, 100, 200),
31
+ ),
32
+ )
33
+ def test_load_dataset(
34
+ dataset_path: str,
35
+ dataset_name: str,
36
+ data_dir: str,
37
+ expected_num_train: int,
38
+ expected_num_validation: int,
39
+ expected_num_test: int,
40
+ ):
41
+ dataset = ds.load_dataset(path=dataset_path, name=dataset_name, data_dir=data_dir)
42
+
43
+ assert dataset["train"].num_rows == expected_num_train # type: ignore
44
+ assert dataset["validation"].num_rows == expected_num_validation # type: ignore
45
+ assert dataset["test"].num_rows == expected_num_test # type: ignore
tests/__init__.py ADDED
File without changes