shunk031 commited on
Commit
db6eb0a
1 Parent(s): 8aaa598

add files (#1)

Browse files
Files changed (8) hide show
  1. .github/workflows/ci.yaml +55 -0
  2. .gitignore +166 -0
  3. README.md +276 -0
  4. cocostuff.py +368 -0
  5. poetry.lock +0 -0
  6. pyproject.toml +24 -0
  7. tests/__init__.py +0 -0
  8. tests/cocostuff_test.py +61 -0
.github/workflows/ci.yaml ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: CI
2
+
3
+ on:
4
+ push:
5
+ branches: [main]
6
+ pull_request:
7
+ branches: [main]
8
+
9
+ jobs:
10
+ build_and_release:
11
+ runs-on: ubuntu-latest
12
+ strategy:
13
+ matrix:
14
+ python-version: ['3.8', '3.9', '3.10']
15
+
16
+ steps:
17
+ - uses: actions/checkout@v2
18
+ - name: Set up Python ${{ matrix.python-version }}
19
+ uses: actions/setup-python@v2
20
+ with:
21
+ python-version: ${{ matrix.python-version }}
22
+
23
+ - name: Install dependencies
24
+ run: |
25
+ pip install -U pip setuptools wheel poetry
26
+ poetry install
27
+
28
+ - name: Format
29
+ run: |
30
+ poetry run black --check .
31
+
32
+ - name: Lint
33
+ run: |
34
+ poetry run flake8 . --ignore=E501,W503,E203
35
+
36
+ - name: Type check
37
+ run: |
38
+ poetry run mypy . \
39
+ --ignore-missing-imports \
40
+ --no-strict-optional \
41
+ --no-site-packages \
42
+ --cache-dir=/dev/null
43
+
44
+ - name: Run tests
45
+ run: |
46
+ poetry run pytest --color=yes -rf
47
+
48
+ - name: Push to Huggingface hub
49
+ if: github.event_name == 'push' && github.ref == 'refs/heads/main'
50
+ env:
51
+ HF_TOKEN: ${{ secrets.HF_TOKEN }}
52
+ HF_USERNAME: ${{ secrets.HF_USERNAME }}
53
+ run: |
54
+ git fetch --unshallow
55
+ git push --force https://${HF_USERNAME}:${HF_TOKEN}@huggingface.co/datasets/${HF_USERNAME}/cocostuff main
.gitignore ADDED
@@ -0,0 +1,166 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Created by https://www.toptal.com/developers/gitignore/api/python
2
+ # Edit at https://www.toptal.com/developers/gitignore?templates=python
3
+
4
+ ### Python ###
5
+ # Byte-compiled / optimized / DLL files
6
+ __pycache__/
7
+ *.py[cod]
8
+ *$py.class
9
+
10
+ # C extensions
11
+ *.so
12
+
13
+ # Distribution / packaging
14
+ .Python
15
+ build/
16
+ develop-eggs/
17
+ dist/
18
+ downloads/
19
+ eggs/
20
+ .eggs/
21
+ lib/
22
+ lib64/
23
+ parts/
24
+ sdist/
25
+ var/
26
+ wheels/
27
+ share/python-wheels/
28
+ *.egg-info/
29
+ .installed.cfg
30
+ *.egg
31
+ MANIFEST
32
+
33
+ # PyInstaller
34
+ # Usually these files are written by a python script from a template
35
+ # before PyInstaller builds the exe, so as to inject date/other infos into it.
36
+ *.manifest
37
+ *.spec
38
+
39
+ # Installer logs
40
+ pip-log.txt
41
+ pip-delete-this-directory.txt
42
+
43
+ # Unit test / coverage reports
44
+ htmlcov/
45
+ .tox/
46
+ .nox/
47
+ .coverage
48
+ .coverage.*
49
+ .cache
50
+ nosetests.xml
51
+ coverage.xml
52
+ *.cover
53
+ *.py,cover
54
+ .hypothesis/
55
+ .pytest_cache/
56
+ cover/
57
+
58
+ # Translations
59
+ *.mo
60
+ *.pot
61
+
62
+ # Django stuff:
63
+ *.log
64
+ local_settings.py
65
+ db.sqlite3
66
+ db.sqlite3-journal
67
+
68
+ # Flask stuff:
69
+ instance/
70
+ .webassets-cache
71
+
72
+ # Scrapy stuff:
73
+ .scrapy
74
+
75
+ # Sphinx documentation
76
+ docs/_build/
77
+
78
+ # PyBuilder
79
+ .pybuilder/
80
+ target/
81
+
82
+ # Jupyter Notebook
83
+ .ipynb_checkpoints
84
+
85
+ # IPython
86
+ profile_default/
87
+ ipython_config.py
88
+
89
+ # pyenv
90
+ # For a library or package, you might want to ignore these files since the code is
91
+ # intended to run in multiple environments; otherwise, check them in:
92
+ .python-version
93
+
94
+ # pipenv
95
+ # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
96
+ # However, in case of collaboration, if having platform-specific dependencies or dependencies
97
+ # having no cross-platform support, pipenv may install dependencies that don't work, or not
98
+ # install all needed dependencies.
99
+ #Pipfile.lock
100
+
101
+ # poetry
102
+ # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
103
+ # This is especially recommended for binary packages to ensure reproducibility, and is more
104
+ # commonly ignored for libraries.
105
+ # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
106
+ #poetry.lock
107
+
108
+ # pdm
109
+ # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
110
+ #pdm.lock
111
+ # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
112
+ # in version control.
113
+ # https://pdm.fming.dev/#use-with-ide
114
+ .pdm.toml
115
+
116
+ # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
117
+ __pypackages__/
118
+
119
+ # Celery stuff
120
+ celerybeat-schedule
121
+ celerybeat.pid
122
+
123
+ # SageMath parsed files
124
+ *.sage.py
125
+
126
+ # Environments
127
+ .env
128
+ .venv
129
+ env/
130
+ venv/
131
+ ENV/
132
+ env.bak/
133
+ venv.bak/
134
+
135
+ # Spyder project settings
136
+ .spyderproject
137
+ .spyproject
138
+
139
+ # Rope project settings
140
+ .ropeproject
141
+
142
+ # mkdocs documentation
143
+ /site
144
+
145
+ # mypy
146
+ .mypy_cache/
147
+ .dmypy.json
148
+ dmypy.json
149
+
150
+ # Pyre type checker
151
+ .pyre/
152
+
153
+ # pytype static type analyzer
154
+ .pytype/
155
+
156
+ # Cython debug symbols
157
+ cython_debug/
158
+
159
+ # PyCharm
160
+ # JetBrains specific template is maintained in a separate JetBrains.gitignore that can
161
+ # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
162
+ # and can be added to the global gitignore or merged into this file. For a more nuclear
163
+ # option (not recommended) you can uncomment the following to ignore the entire idea folder.
164
+ #.idea/
165
+
166
+ # End of https://www.toptal.com/developers/gitignore/api/python
README.md ADDED
@@ -0,0 +1,276 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ language:
3
+ - en
4
+ license: cc-by-4.0
5
+
6
+ tags:
7
+ - computer-vision
8
+ - object-detection
9
+ - ms-coco
10
+
11
+ datasets:
12
+ - stuff-thing
13
+ - stuff-only
14
+
15
+ metrics:
16
+ - accuracy
17
+ - iou
18
+ ---
19
+
20
+ # Dataset Card for COCO-Stuff
21
+
22
+ [![CI](https://github.com/shunk031/huggingface-datasets_cocostuff/actions/workflows/ci.yaml/badge.svg)](https://github.com/shunk031/huggingface-datasets_cocostuff/actions/workflows/ci.yaml)
23
+
24
+ ## Table of Contents
25
+ - [Table of Contents](#table-of-contents)
26
+ - [Dataset Description](#dataset-description)
27
+ - [Dataset Summary](#dataset-summary)
28
+ - [Dataset Preprocessing](#dataset-preprocessing)
29
+ - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards)
30
+ - [Languages](#languages)
31
+ - [Dataset Structure](#dataset-structure)
32
+ - [Data Instances](#data-instances)
33
+ - [Data Fields](#data-fields)
34
+ - [Data Splits](#data-splits)
35
+ - [Dataset Creation](#dataset-creation)
36
+ - [Curation Rationale](#curation-rationale)
37
+ - [Source Data](#source-data)
38
+ - [Annotations](#annotations)
39
+ - [Personal and Sensitive Information](#personal-and-sensitive-information)
40
+ - [Considerations for Using the Data](#considerations-for-using-the-data)
41
+ - [Social Impact of Dataset](#social-impact-of-dataset)
42
+ - [Discussion of Biases](#discussion-of-biases)
43
+ - [Other Known Limitations](#other-known-limitations)
44
+ - [Additional Information](#additional-information)
45
+ - [Dataset Curators](#dataset-curators)
46
+ - [Licensing Information](#licensing-information)
47
+ - [Citation Information](#citation-information)
48
+ - [Contributions](#contributions)
49
+
50
+ ## Dataset Description
51
+
52
+ - Homepage: https://github.com/nightrome/cocostuff
53
+ - Repository: https://github.com/nightrome/cocostuff
54
+ - Paper (preprint): https://arxiv.org/abs/1612.03716
55
+ - Paper (CVPR2018): https://openaccess.thecvf.com/content_cvpr_2018/html/Caesar_COCO-Stuff_Thing_and_CVPR_2018_paper.html
56
+
57
+ ### Dataset Summary
58
+
59
+ COCO-Stuff is the largest existing dataset with dense stuff and thing annotations.
60
+
61
+ From the paper:
62
+
63
+ > Semantic classes can be either things (objects with a well-defined shape, e.g. car, person) or stuff (amorphous background regions, e.g. grass, sky). While lots of classification and detection works focus on thing classes, less attention has been given to stuff classes. Nonetheless, stuff classes are important as they allow to explain important aspects of an image, including (1) scene type; (2) which thing classes are likely to be present and their location (through contextual reasoning); (3) physical attributes, material types and geometric properties of the scene. To understand stuff and things in context we introduce COCO-Stuff, which augments all 164K images of the COCO 2017 dataset with pixel-wise annotations for 91 stuff classes. We introduce an efficient stuff annotation protocol based on superpixels, which leverages the original thing annotations. We quantify the speed versus quality trade-off of our protocol and explore the relation between annotation time and boundary complexity. Furthermore, we use COCO-Stuff to analyze: (a) the importance of stuff and thing classes in terms of their surface cover and how frequently they are mentioned in image captions; (b) the spatial relations between stuff and things, highlighting the rich contextual relations that make our dataset unique; (c) the performance of a modern semantic segmentation method on stuff and thing classes, and whether stuff is easier to segment than things.
64
+
65
+ ### Dataset Preprocessing
66
+
67
+ ### Supported Tasks and Leaderboards
68
+
69
+ ### Languages
70
+
71
+ All of annotations use English as primary language.
72
+
73
+ ## Dataset Structure
74
+
75
+ ### Data Instances
76
+
77
+ When loading a specific configuration, users has to append a version dependent suffix:
78
+
79
+ ```python
80
+ from datasets import load_dataset
81
+ load_dataset("shunk031/cocostuff", "stuff-things")
82
+ ```
83
+
84
+ #### stuff-things
85
+
86
+ An example of looks as follows.
87
+
88
+ ```json
89
+ {
90
+ 'image': <PIL.JpegImagePlugin.JpegImageFile image mode=RGB size=640x480 at 0x7FCA033C9C40>,
91
+ 'image_filename': '000000000009.jpg',
92
+ 'image_id': '9',
93
+ 'width': 640
94
+ 'height': 480,
95
+ 'objects': [
96
+ {
97
+ 'object_id': '121',
98
+ 'x': 0,
99
+ 'y': 11,
100
+ 'w': 640,
101
+ 'h': 469,
102
+ 'name': 'food-other'
103
+ },
104
+ {
105
+ 'object_id': '143',
106
+ 'x': 0,
107
+ 'y': 0
108
+ 'w': 640,
109
+ 'h': 480,
110
+ 'name': 'plastic'
111
+ },
112
+ {
113
+ 'object_id': '165',
114
+ 'x': 0,
115
+ 'y': 0,
116
+ 'w': 319,
117
+ 'h': 118,
118
+ 'name': 'table'
119
+ },
120
+ {
121
+ 'object_id': '183',
122
+ 'x': 0,
123
+ 'y': 2,
124
+ 'w': 631,
125
+ 'h': 472,
126
+ 'name': 'unknown-183'
127
+ }
128
+ ],
129
+ 'stuff_map': <PIL.PngImagePlugin.PngImageFile image mode=L size=640x480 at 0x7FCA0222D880>,
130
+ }
131
+ ```
132
+
133
+ #### stuff-only
134
+
135
+ An example of looks as follows.
136
+
137
+ ```json
138
+ {
139
+ 'image': <PIL.JpegImagePlugin.JpegImageFile image mode=RGB size=640x480 at 0x7FCA033C9C40>,
140
+ 'image_filename': '000000000009.jpg',
141
+ 'image_id': '9',
142
+ 'width': 640
143
+ 'height': 480,
144
+ 'objects': [
145
+ {
146
+ 'object_id': '121',
147
+ 'x': 0,
148
+ 'y': 11,
149
+ 'w': 640,
150
+ 'h': 469,
151
+ 'name': 'food-other'
152
+ },
153
+ {
154
+ 'object_id': '143',
155
+ 'x': 0,
156
+ 'y': 0
157
+ 'w': 640,
158
+ 'h': 480,
159
+ 'name': 'plastic'
160
+ },
161
+ {
162
+ 'object_id': '165',
163
+ 'x': 0,
164
+ 'y': 0,
165
+ 'w': 319,
166
+ 'h': 118,
167
+ 'name': 'table'
168
+ },
169
+ {
170
+ 'object_id': '183',
171
+ 'x': 0,
172
+ 'y': 2,
173
+ 'w': 631,
174
+ 'h': 472,
175
+ 'name': 'unknown-183'
176
+ }
177
+ ]
178
+ }
179
+ ```
180
+
181
+ ### Data Fields
182
+
183
+ #### stuff-things
184
+
185
+ - `image`: A `PIL.Image.Image` object containing the image.
186
+ - `image_id`: Unique numeric ID of the image.
187
+ - `image_filename`: File name of the image.
188
+ - `width`: Image width.
189
+ - `height`: Image height.
190
+ - `stuff_map`: A `PIL.Image.Image` object containing the Stuff + thing PNG-style annotations
191
+ - `objects`: Holds a list of `Object` data classes:
192
+ - `object_id`: Unique numeric ID of the object.
193
+ - `x`: x coordinate of bounding box's top left corner.
194
+ - `y`: y coordinate of bounding box's top left corner.
195
+ - `w`: Bounding box width.
196
+ - `h`: Bounding box height.
197
+ - `name`: object name
198
+
199
+ #### stuff-only
200
+
201
+ - `image`: A `PIL.Image.Image` object containing the image.
202
+ - `image_id`: Unique numeric ID of the image.
203
+ - `image_filename`: File name of the image.
204
+ - `width`: Image width.
205
+ - `height`: Image height.
206
+ - `objects`: Holds a list of `Object` data classes:
207
+ - `object_id`: Unique numeric ID of the object.
208
+ - `x`: x coordinate of bounding box's top left corner.
209
+ - `y`: y coordinate of bounding box's top left corner.
210
+ - `w`: Bounding box width.
211
+ - `h`: Bounding box height.
212
+ - `name`: object name
213
+
214
+ ### Data Splits
215
+
216
+ | name | train | validation |
217
+ |-------------|--------:|-----------:|
218
+ | stuff-thing | 118,280 | 5,000 |
219
+ | stuff-only | 118,280 | 5,000 |
220
+
221
+ ## Dataset Creation
222
+
223
+ ### Curation Rationale
224
+
225
+ ### Source Data
226
+
227
+ #### Initial Data Collection and Normalization
228
+
229
+ #### Who are the source language producers?
230
+
231
+ ### Annotations
232
+
233
+ #### Annotation process
234
+
235
+ #### Who are the annotators?
236
+
237
+ From the paper:
238
+
239
+ > COCO-Stuff contains 172 classes: 80 thing, 91 stuff, and 1 class unlabeled. The 80 thing classes are the same as in COCO [35]. The 91 stuff classes are curated by an expert annotator. The class unlabeled is used in two situations: if a label does not belong to any of the 171 predefined classes, or if the annotator cannot infer the label of a pixel.
240
+
241
+ ### Personal and Sensitive Information
242
+
243
+ ## Considerations for Using the Data
244
+
245
+ ### Social Impact of Dataset
246
+
247
+ ### Discussion of Biases
248
+
249
+ ### Other Known Limitations
250
+
251
+ ## Additional Information
252
+
253
+ ### Dataset Curators
254
+
255
+ ### Licensing Information
256
+
257
+ COCO-Stuff is a derivative work of the COCO dataset. The authors of COCO do not in any form endorse this work. Different licenses apply:
258
+ - COCO images: [Flickr Terms of use](http://cocodataset.org/#termsofuse)
259
+ - COCO annotations: [Creative Commons Attribution 4.0 License](http://cocodataset.org/#termsofuse)
260
+ - COCO-Stuff annotations & code: [Creative Commons Attribution 4.0 License](http://cocodataset.org/#termsofuse)
261
+
262
+ ### Citation Information
263
+
264
+ ```bibtex
265
+ @INPROCEEDINGS{caesar2018cvpr,
266
+ title={COCO-Stuff: Thing and stuff classes in context},
267
+ author={Caesar, Holger and Uijlings, Jasper and Ferrari, Vittorio},
268
+ booktitle={Computer vision and pattern recognition (CVPR), 2018 IEEE conference on},
269
+ organization={IEEE},
270
+ year={2018}
271
+ }
272
+ ```
273
+
274
+ ### Contributions
275
+
276
+ Thanks to [@nightrome](https://github.com/nightrome) for publishing the COCO-Stuff dataset.
cocostuff.py ADDED
@@ -0,0 +1,368 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import copy
2
+ import json
3
+ import logging
4
+ import os
5
+ from collections import defaultdict
6
+ from typing import Dict, TypedDict
7
+
8
+ import datasets as ds
9
+
10
+ logger = logging.getLogger(__name__)
11
+
12
+ _CITATION = """\
13
+ @INPROCEEDINGS{caesar2018cvpr,
14
+ title={COCO-Stuff: Thing and stuff classes in context},
15
+ author={Caesar, Holger and Uijlings, Jasper and Ferrari, Vittorio},
16
+ booktitle={Computer vision and pattern recognition (CVPR), 2018 IEEE conference on},
17
+ organization={IEEE},
18
+ year={2018}
19
+ }
20
+ """
21
+
22
+ _DESCRIPTION = """\
23
+ COCO-Stuff augments all 164K images of the popular COCO dataset with pixel-level stuff annotations. These annotations can be used for scene understanding tasks like semantic segmentation, object detection and image captioning.
24
+ """
25
+
26
+ _HOMEPAGE = "https://github.com/nightrome/cocostuff"
27
+
28
+ _LICENSE = """\
29
+ COCO-Stuff is a derivative work of the COCO dataset. The authors of COCO do not in any form endorse this work. Different licenses apply:
30
+ - COCO images: Flickr Terms of use
31
+ - COCO annotations: Creative Commons Attribution 4.0 License
32
+ - COCO-Stuff annotations & code: Creative Commons Attribution 4.0 License
33
+ """
34
+
35
+
36
+ class URLs(TypedDict):
37
+ train: str
38
+ val: str
39
+ stuffthingmaps_trainval: str
40
+ stuff_trainval: str
41
+ labels: str
42
+
43
+
44
+ _URLS: URLs = {
45
+ "train": "http://images.cocodataset.org/zips/train2017.zip",
46
+ "val": "http://images.cocodataset.org/zips/val2017.zip",
47
+ "stuffthingmaps_trainval": "http://calvin.inf.ed.ac.uk/wp-content/uploads/data/cocostuffdataset/stuffthingmaps_trainval2017.zip",
48
+ "stuff_trainval": "http://calvin.inf.ed.ac.uk/wp-content/uploads/data/cocostuffdataset/stuff_trainval2017.zip",
49
+ "labels": "https://raw.githubusercontent.com/nightrome/cocostuff/master/labels.txt",
50
+ }
51
+
52
+
53
+ class GenerateExamplesArguments(TypedDict):
54
+ image_dirpath: str
55
+ stuff_dirpath: str
56
+ stuff_thing_maps_dirpath: str
57
+ labels_path: str
58
+ split: str
59
+
60
+
61
+ def _load_json(json_path: str):
62
+ logger.info(f"Load json from {json_path}")
63
+ with open(json_path, "r") as rf:
64
+ json_data = json.load(rf)
65
+ return json_data
66
+
67
+
68
+ def _load_labels(labels_path: str) -> Dict[int, str]:
69
+ label_id_to_label_name: Dict[int, str] = {}
70
+
71
+ logger.info(f"Load labels from {labels_path}")
72
+ with open(labels_path, "r") as rf:
73
+ for line in rf:
74
+ label_id, label_name = line.strip().split(": ")
75
+ label_id_to_label_name[int(label_id)] = label_name
76
+ return label_id_to_label_name
77
+
78
+
79
+ class CocoStuffDataset(ds.GeneratorBasedBuilder):
80
+
81
+ VERSION = ds.Version("1.0.0") # type: ignore
82
+
83
+ BUILDER_CONFIGS = [
84
+ ds.BuilderConfig(
85
+ name="stuff-thing",
86
+ version=VERSION, # type: ignore
87
+ description="Stuff+thing PNG-style annotations on COCO 2017 trainval",
88
+ ),
89
+ ds.BuilderConfig(
90
+ name="stuff-only",
91
+ version=VERSION, # type: ignore
92
+ description="Stuff-only COCO-style annotations on COCO 2017 trainval",
93
+ ),
94
+ ]
95
+
96
+ def _info(self) -> ds.DatasetInfo:
97
+ if self.config.name == "stuff-thing":
98
+ features = ds.Features(
99
+ {
100
+ "image": ds.Image(),
101
+ "image_id": ds.Value("int32"),
102
+ "image_filename": ds.Value("string"),
103
+ "width": ds.Value("int32"),
104
+ "height": ds.Value("int32"),
105
+ "stuff_map": ds.Image(),
106
+ "objects": [
107
+ {
108
+ "object_id": ds.Value("string"),
109
+ "x": ds.Value("int32"),
110
+ "y": ds.Value("int32"),
111
+ "w": ds.Value("int32"),
112
+ "h": ds.Value("int32"),
113
+ "name": ds.Value("string"),
114
+ }
115
+ ],
116
+ }
117
+ )
118
+ elif self.config.name == "stuff-only":
119
+ features = ds.Features(
120
+ {
121
+ "image": ds.Image(),
122
+ "image_id": ds.Value("int32"),
123
+ "image_filename": ds.Value("string"),
124
+ "width": ds.Value("int32"),
125
+ "height": ds.Value("int32"),
126
+ "objects": [
127
+ {
128
+ "object_id": ds.Value("int32"),
129
+ "x": ds.Value("int32"),
130
+ "y": ds.Value("int32"),
131
+ "w": ds.Value("int32"),
132
+ "h": ds.Value("int32"),
133
+ "name": ds.Value("string"),
134
+ }
135
+ ],
136
+ }
137
+ )
138
+ else:
139
+ raise ValueError(f"Invalid dataset name: {self.config.name}")
140
+
141
+ return ds.DatasetInfo(
142
+ description=_DESCRIPTION,
143
+ features=features,
144
+ homepage=_HOMEPAGE,
145
+ license=_LICENSE,
146
+ citation=_CITATION,
147
+ )
148
+
149
+ def load_stuff_json(self, stuff_dirpath: str, split: str):
150
+ return _load_json(
151
+ json_path=os.path.join(stuff_dirpath, f"stuff_{split}2017.json")
152
+ )
153
+
154
+ def get_image_id_to_image_infos(self, images):
155
+
156
+ image_id_to_image_infos = {}
157
+ for img_dict in images:
158
+ image_id = img_dict.pop("id")
159
+ image_id_to_image_infos[image_id] = img_dict
160
+
161
+ image_id_to_image_infos = dict(sorted(image_id_to_image_infos.items()))
162
+ return image_id_to_image_infos
163
+
164
+ def get_image_id_to_annotations(self, annotations):
165
+
166
+ image_id_to_annotations = defaultdict(list)
167
+ for ann_dict in annotations:
168
+ image_id = ann_dict.pop("image_id")
169
+ image_id_to_annotations[image_id].append(ann_dict)
170
+
171
+ image_id_to_annotations = dict(sorted(image_id_to_annotations.items()))
172
+ return image_id_to_annotations
173
+
174
+ def _split_generators(self, dl_manager: ds.DownloadManager):
175
+ downloaded_files = dl_manager.download_and_extract(_URLS)
176
+
177
+ tng_image_dirpath = os.path.join(downloaded_files["train"], "train2017")
178
+ val_image_dirpath = os.path.join(downloaded_files["val"], "val2017")
179
+
180
+ stuff_dirpath = downloaded_files["stuff_trainval"]
181
+ stuff_things_maps_dirpath = downloaded_files["stuffthingmaps_trainval"]
182
+ labels_path = downloaded_files["labels"]
183
+
184
+ tng_gen_kwargs: GenerateExamplesArguments = {
185
+ "image_dirpath": tng_image_dirpath,
186
+ "stuff_dirpath": stuff_dirpath,
187
+ "stuff_thing_maps_dirpath": stuff_things_maps_dirpath,
188
+ "labels_path": labels_path,
189
+ "split": "train",
190
+ }
191
+ val_gen_kwargs: GenerateExamplesArguments = {
192
+ "image_dirpath": val_image_dirpath,
193
+ "stuff_dirpath": stuff_dirpath,
194
+ "stuff_thing_maps_dirpath": stuff_things_maps_dirpath,
195
+ "labels_path": labels_path,
196
+ "split": "val",
197
+ }
198
+ return [
199
+ ds.SplitGenerator(
200
+ name=ds.Split.TRAIN, # type: ignore
201
+ gen_kwargs=tng_gen_kwargs, # type: ignore
202
+ ),
203
+ ds.SplitGenerator(
204
+ name=ds.Split.VALIDATION, # type: ignore
205
+ gen_kwargs=val_gen_kwargs, # type: ignore
206
+ ),
207
+ ]
208
+
209
+ def _generate_examples_for_stuff_thing(
210
+ self,
211
+ image_dirpath: str,
212
+ stuff_dirpath: str,
213
+ stuff_thing_maps_dirpath: str,
214
+ labels_path: str,
215
+ split: str,
216
+ ):
217
+ id_to_label = _load_labels(labels_path=labels_path)
218
+ stuff_json = self.load_stuff_json(stuff_dirpath=stuff_dirpath, split=split)
219
+
220
+ image_id_to_image_infos = self.get_image_id_to_image_infos(
221
+ images=copy.deepcopy(stuff_json["images"])
222
+ )
223
+ image_id_to_stuff_annotations = self.get_image_id_to_annotations(
224
+ annotations=copy.deepcopy(stuff_json["annotations"])
225
+ )
226
+
227
+ assert len(image_id_to_image_infos.keys()) >= len(
228
+ image_id_to_stuff_annotations.keys()
229
+ )
230
+
231
+ for image_id in image_id_to_stuff_annotations.keys():
232
+
233
+ img_info = image_id_to_image_infos[image_id]
234
+ image_filename = img_info["file_name"]
235
+ image_filepath = os.path.join(image_dirpath, image_filename)
236
+ img_example_dict = {
237
+ "image": image_filepath,
238
+ "image_id": image_id,
239
+ "image_filename": image_filename,
240
+ "width": img_info["width"],
241
+ "height": img_info["height"],
242
+ }
243
+
244
+ img_anns = image_id_to_stuff_annotations[image_id]
245
+ bboxes = [list(map(int, ann["bbox"])) for ann in img_anns]
246
+ category_ids = [ann["category_id"] for ann in img_anns]
247
+ category_labels = list(
248
+ map(
249
+ lambda cat_id: id_to_label.get(cat_id, f"unknown-{cat_id}"),
250
+ category_ids,
251
+ )
252
+ )
253
+ assert len(bboxes) == len(category_ids) == len(category_labels)
254
+ zip_it = zip(bboxes, category_ids, category_labels)
255
+ objects_example = [
256
+ {
257
+ "object_id": category_id,
258
+ "x": bbox[0],
259
+ "y": bbox[1],
260
+ "w": bbox[2],
261
+ "h": bbox[3],
262
+ "name": category_label,
263
+ }
264
+ for bbox, category_id, category_label in zip_it
265
+ ]
266
+
267
+ root, _ = os.path.splitext(img_example_dict["image_filename"])
268
+ stuff_map_filepath = os.path.join(
269
+ stuff_thing_maps_dirpath, f"{split}2017", f"{root}.png"
270
+ )
271
+
272
+ example_dict = {
273
+ **img_example_dict,
274
+ "objects": objects_example,
275
+ "stuff_map": stuff_map_filepath,
276
+ }
277
+ yield image_id, example_dict
278
+
279
+ def _generate_examples_for_stuff_only(
280
+ self,
281
+ image_dirpath: str,
282
+ stuff_dirpath: str,
283
+ labels_path: str,
284
+ split: str,
285
+ ):
286
+ id_to_label = _load_labels(labels_path=labels_path)
287
+ stuff_json = self.load_stuff_json(stuff_dirpath=stuff_dirpath, split=split)
288
+
289
+ image_id_to_image_infos = self.get_image_id_to_image_infos(
290
+ images=copy.deepcopy(stuff_json["images"])
291
+ )
292
+ image_id_to_stuff_annotations = self.get_image_id_to_annotations(
293
+ annotations=copy.deepcopy(stuff_json["annotations"])
294
+ )
295
+
296
+ assert len(image_id_to_image_infos.keys()) >= len(
297
+ image_id_to_stuff_annotations.keys()
298
+ )
299
+
300
+ for image_id in image_id_to_stuff_annotations.keys():
301
+
302
+ img_info = image_id_to_image_infos[image_id]
303
+ image_filename = img_info["file_name"]
304
+ image_filepath = os.path.join(image_dirpath, image_filename)
305
+ img_example_dict = {
306
+ "image": image_filepath,
307
+ "image_id": image_id,
308
+ "image_filename": image_filename,
309
+ "width": img_info["width"],
310
+ "height": img_info["height"],
311
+ }
312
+
313
+ img_anns = image_id_to_stuff_annotations[image_id]
314
+ bboxes = [list(map(int, ann["bbox"])) for ann in img_anns]
315
+ category_ids = [ann["category_id"] for ann in img_anns]
316
+ category_labels = list(
317
+ map(
318
+ lambda cat_id: id_to_label.get(cat_id, f"unknown-{cat_id}"),
319
+ category_ids,
320
+ )
321
+ )
322
+ assert len(bboxes) == len(category_ids) == len(category_labels)
323
+ zip_it = zip(bboxes, category_ids, category_labels)
324
+ objects_example = [
325
+ {
326
+ "object_id": category_id,
327
+ "x": bbox[0],
328
+ "y": bbox[1],
329
+ "w": bbox[2],
330
+ "h": bbox[3],
331
+ "name": category_label,
332
+ }
333
+ for bbox, category_id, category_label in zip_it
334
+ ]
335
+
336
+ example_dict = {
337
+ **img_example_dict,
338
+ "objects": objects_example,
339
+ }
340
+ yield image_id, example_dict
341
+
342
+ def _generate_examples( # type: ignore
343
+ self,
344
+ image_dirpath: str,
345
+ stuff_dirpath: str,
346
+ stuff_thing_maps_dirpath: str,
347
+ labels_path: str,
348
+ split: str,
349
+ ):
350
+ logger.info(f"Generating examples for {split}.")
351
+
352
+ if "stuff-thing" in self.config.name:
353
+ return self._generate_examples_for_stuff_thing(
354
+ image_dirpath=image_dirpath,
355
+ stuff_dirpath=stuff_dirpath,
356
+ stuff_thing_maps_dirpath=stuff_thing_maps_dirpath,
357
+ labels_path=labels_path,
358
+ split=split,
359
+ )
360
+ elif "stuff-only" in self.config.name:
361
+ return self._generate_examples_for_stuff_only(
362
+ image_dirpath=image_dirpath,
363
+ stuff_dirpath=stuff_dirpath,
364
+ labels_path=labels_path,
365
+ split=split,
366
+ )
367
+ else:
368
+ raise ValueError(f"Invalid dataset name: {self.config.name}")
poetry.lock ADDED
The diff for this file is too large to render. See raw diff
 
pyproject.toml ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [tool.poetry]
2
+ name = "huggingface-datasets-cocostuff"
3
+ version = "0.1.0"
4
+ description = ""
5
+ authors = ["Shunsuke KITADA <shunsuke.kitada.0831@gmail.com>"]
6
+ readme = "README.md"
7
+ packages = []
8
+
9
+ [tool.poetry.dependencies]
10
+ python = "^3.8"
11
+ datasets = "^2.6.1"
12
+ pillow = "^9.3.0"
13
+
14
+
15
+ [tool.poetry.group.dev.dependencies]
16
+ black = "^22.10.0"
17
+ isort = "^5.10.1"
18
+ flake8 = "^5.0.4"
19
+ mypy = "^0.982"
20
+ pytest = "^7.2.0"
21
+
22
+ [build-system]
23
+ requires = ["poetry-core"]
24
+ build-backend = "poetry.core.masonry.api"
tests/__init__.py ADDED
File without changes
tests/cocostuff_test.py ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import logging
2
+ import os
3
+
4
+ import datasets as ds
5
+ import pytest
6
+
7
+ logging.basicConfig(
8
+ format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", level=logging.INFO
9
+ )
10
+
11
+
12
+ @pytest.fixture
13
+ def dataset_path() -> str:
14
+ return "cocostuff.py"
15
+
16
+
17
+ @pytest.mark.skipif(
18
+ bool(os.environ.get("CI", False)),
19
+ reason="Because this test downloads a large data set, we will skip running it on CI.",
20
+ )
21
+ def test_load_stuff_thing_dataset(dataset_path: str):
22
+ dataset = ds.load_dataset(path=dataset_path, name="stuff-thing")
23
+
24
+ expected_features = [
25
+ "image",
26
+ "image_id",
27
+ "image_filename",
28
+ "width",
29
+ "height",
30
+ "stuff_map",
31
+ "objects",
32
+ ]
33
+ for expected_feature in expected_features:
34
+ assert expected_feature in dataset["train"].features.keys() # type: ignore
35
+ assert expected_feature in dataset["validation"].features.keys() # type: ignore
36
+
37
+ assert dataset["train"].num_rows == 118280 # type: ignore
38
+ assert dataset["validation"].num_rows == 5000 # type: ignore
39
+
40
+
41
+ @pytest.mark.skipif(
42
+ bool(os.environ.get("CI", False)),
43
+ reason="Because this test downloads a large data set, we will skip running it on CI.",
44
+ )
45
+ def test_load_stuff_only_dataset(dataset_path: str):
46
+ dataset = ds.load_dataset(path=dataset_path, name="stuff-only")
47
+
48
+ expected_features = [
49
+ "image",
50
+ "image_id",
51
+ "image_filename",
52
+ "width",
53
+ "height",
54
+ "objects",
55
+ ]
56
+ for expected_feature in expected_features:
57
+ assert expected_feature in dataset["train"].features.keys() # type: ignore
58
+ assert expected_feature in dataset["validation"].features.keys() # type: ignore
59
+
60
+ assert dataset["train"].num_rows == 118280 # type: ignore
61
+ assert dataset["validation"].num_rows == 5000 # type: ignore