dolfim-ibm commited on
Commit
2501633
1 Parent(s): 8c2ec74

Add doclaynet load script and README skeleton

Browse files

Signed-off-by: Michele Dolfi <dol@zurich.ibm.com>

Files changed (3) hide show
  1. .gitignore +304 -0
  2. README.md +139 -1
  3. doclaynet.py +206 -0
.gitignore ADDED
@@ -0,0 +1,304 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Created by https://www.gitignore.io/api/linux,macos,python,windows,pycharm+all,visualstudiocode,virtualenv
2
+ # Edit at https://www.gitignore.io/?templates=linux,macos,python,windows,pycharm+all,visualstudiocode,virtualenv
3
+
4
+ ### Linux ###
5
+ *~
6
+
7
+ # temporary files which can be created if a process still has a handle open of a deleted file
8
+ .fuse_hidden*
9
+
10
+ # KDE directory preferences
11
+ .directory
12
+
13
+ # Linux trash folder which might appear on any partition or disk
14
+ .Trash-*
15
+
16
+ # .nfs files are created when an open file is removed but is still being accessed
17
+ .nfs*
18
+
19
+ ### macOS ###
20
+ # General
21
+ .DS_Store
22
+ .AppleDouble
23
+ .LSOverride
24
+
25
+ # Icon must end with two \r
26
+ Icon
27
+
28
+ # Thumbnails
29
+ ._*
30
+
31
+ # Files that might appear in the root of a volume
32
+ .DocumentRevisions-V100
33
+ .fseventsd
34
+ .Spotlight-V100
35
+ .TemporaryItems
36
+ .Trashes
37
+ .VolumeIcon.icns
38
+ .com.apple.timemachine.donotpresent
39
+
40
+ # Directories potentially created on remote AFP share
41
+ .AppleDB
42
+ .AppleDesktop
43
+ Network Trash Folder
44
+ Temporary Items
45
+ .apdisk
46
+
47
+ ### PyCharm+all ###
48
+ # Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio and WebStorm
49
+ # Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839
50
+
51
+ # User-specific stuff
52
+ .idea/**/workspace.xml
53
+ .idea/**/tasks.xml
54
+ .idea/**/usage.statistics.xml
55
+ .idea/**/dictionaries
56
+ .idea/**/shelf
57
+
58
+ # Generated files
59
+ .idea/**/contentModel.xml
60
+
61
+ # Sensitive or high-churn files
62
+ .idea/**/dataSources/
63
+ .idea/**/dataSources.ids
64
+ .idea/**/dataSources.local.xml
65
+ .idea/**/sqlDataSources.xml
66
+ .idea/**/dynamic.xml
67
+ .idea/**/uiDesigner.xml
68
+ .idea/**/dbnavigator.xml
69
+
70
+ # Gradle
71
+ .idea/**/gradle.xml
72
+ .idea/**/libraries
73
+
74
+ # Gradle and Maven with auto-import
75
+ # When using Gradle or Maven with auto-import, you should exclude module files,
76
+ # since they will be recreated, and may cause churn. Uncomment if using
77
+ # auto-import.
78
+ # .idea/modules.xml
79
+ # .idea/*.iml
80
+ # .idea/modules
81
+ # *.iml
82
+ # *.ipr
83
+
84
+ # CMake
85
+ cmake-build-*/
86
+
87
+ # Mongo Explorer plugin
88
+ .idea/**/mongoSettings.xml
89
+
90
+ # File-based project format
91
+ *.iws
92
+
93
+ # IntelliJ
94
+ out/
95
+
96
+ # mpeltonen/sbt-idea plugin
97
+ .idea_modules/
98
+
99
+ # JIRA plugin
100
+ atlassian-ide-plugin.xml
101
+
102
+ # Cursive Clojure plugin
103
+ .idea/replstate.xml
104
+
105
+ # Crashlytics plugin (for Android Studio and IntelliJ)
106
+ com_crashlytics_export_strings.xml
107
+ crashlytics.properties
108
+ crashlytics-build.properties
109
+ fabric.properties
110
+
111
+ # Editor-based Rest Client
112
+ .idea/httpRequests
113
+
114
+ # Android studio 3.1+ serialized cache file
115
+ .idea/caches/build_file_checksums.ser
116
+
117
+ ### PyCharm+all Patch ###
118
+ # Ignores the whole .idea folder and all .iml files
119
+ # See https://github.com/joeblau/gitignore.io/issues/186 and https://github.com/joeblau/gitignore.io/issues/360
120
+
121
+ .idea/
122
+
123
+ # Reason: https://github.com/joeblau/gitignore.io/issues/186#issuecomment-249601023
124
+
125
+ *.iml
126
+ modules.xml
127
+ .idea/misc.xml
128
+ *.ipr
129
+
130
+ # Sonarlint plugin
131
+ .idea/sonarlint
132
+
133
+ ### Python ###
134
+ # Byte-compiled / optimized / DLL files
135
+ __pycache__/
136
+ *.py[cod]
137
+ *$py.class
138
+
139
+ # C extensions
140
+ *.so
141
+
142
+ # Distribution / packaging
143
+ .Python
144
+ build/
145
+ develop-eggs/
146
+ dist/
147
+ downloads/
148
+ eggs/
149
+ .eggs/
150
+ lib/
151
+ lib64/
152
+ parts/
153
+ sdist/
154
+ var/
155
+ wheels/
156
+ pip-wheel-metadata/
157
+ share/python-wheels/
158
+ *.egg-info/
159
+ .installed.cfg
160
+ *.egg
161
+ MANIFEST
162
+
163
+ # PyInstaller
164
+ # Usually these files are written by a python script from a template
165
+ # before PyInstaller builds the exe, so as to inject date/other infos into it.
166
+ *.manifest
167
+ *.spec
168
+
169
+ # Installer logs
170
+ pip-log.txt
171
+ pip-delete-this-directory.txt
172
+
173
+ # Unit test / coverage reports
174
+ htmlcov/
175
+ .tox/
176
+ .nox/
177
+ .coverage
178
+ .coverage.*
179
+ .cache
180
+ nosetests.xml
181
+ coverage.xml
182
+ *.cover
183
+ .hypothesis/
184
+ .pytest_cache/
185
+
186
+ # Translations
187
+ *.mo
188
+ *.pot
189
+
190
+ # Scrapy stuff:
191
+ .scrapy
192
+
193
+ # Sphinx documentation
194
+ docs/_build/
195
+
196
+ # PyBuilder
197
+ target/
198
+
199
+ # pyenv
200
+ .python-version
201
+
202
+ # pipenv
203
+ # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
204
+ # However, in case of collaboration, if having platform-specific dependencies or dependencies
205
+ # having no cross-platform support, pipenv may install dependencies that don't work, or not
206
+ # install all needed dependencies.
207
+ #Pipfile.lock
208
+
209
+ # celery beat schedule file
210
+ celerybeat-schedule
211
+
212
+ # SageMath parsed files
213
+ *.sage.py
214
+
215
+ # Spyder project settings
216
+ .spyderproject
217
+ .spyproject
218
+
219
+ # Rope project settings
220
+ .ropeproject
221
+
222
+ # Mr Developer
223
+ .mr.developer.cfg
224
+ .project
225
+ .pydevproject
226
+
227
+ # mkdocs documentation
228
+ /site
229
+
230
+ # mypy
231
+ .mypy_cache/
232
+ .dmypy.json
233
+ dmypy.json
234
+
235
+ # Pyre type checker
236
+ .pyre/
237
+
238
+ ### VirtualEnv ###
239
+ # Virtualenv
240
+ # http://iamzed.com/2009/05/07/a-primer-on-virtualenv/
241
+ pyvenv.cfg
242
+ .env
243
+ .venv
244
+ env/
245
+ venv/
246
+ ENV/
247
+ env.bak/
248
+ venv.bak/
249
+ pip-selfcheck.json
250
+
251
+ ### VisualStudioCode ###
252
+ .vscode/*
253
+
254
+ ### VisualStudioCode Patch ###
255
+ # Ignore all local history of files
256
+ .history
257
+
258
+ ### Windows ###
259
+ # Windows thumbnail cache files
260
+ Thumbs.db
261
+ Thumbs.db:encryptable
262
+ ehthumbs.db
263
+ ehthumbs_vista.db
264
+
265
+ # Dump file
266
+ *.stackdump
267
+
268
+ # Folder config file
269
+ [Dd]esktop.ini
270
+
271
+ # Recycle Bin used on file shares
272
+ $RECYCLE.BIN/
273
+
274
+ # Windows Installer files
275
+ *.cab
276
+ *.msi
277
+ *.msix
278
+ *.msm
279
+ *.msp
280
+
281
+ # Windows shortcuts
282
+ *.lnk
283
+
284
+ # End of https://www.gitignore.io/api/linux,macos,python,windows,pycharm+all,visualstudiocode,virtualenv
285
+
286
+
287
+ # Created by https://www.toptal.com/developers/gitignore/api/jupyternotebooks
288
+ # Edit at https://www.toptal.com/developers/gitignore?templates=jupyternotebooks
289
+
290
+ ### JupyterNotebooks ###
291
+ # gitignore template for Jupyter Notebooks
292
+ # website: http://jupyter.org/
293
+
294
+ .ipynb_checkpoints
295
+ */.ipynb_checkpoints/*
296
+
297
+ # IPython
298
+ profile_default/
299
+ ipython_config.py
300
+
301
+ # Remove previous ipynb_checkpoints
302
+ # git rm -r .ipynb_checkpoints/
303
+
304
+ # End of https://www.toptal.com/developers/gitignore/api/jupyternotebooks
README.md CHANGED
@@ -1,3 +1,141 @@
1
  ---
2
- license: other
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3
  ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  ---
2
+ annotations_creators:
3
+ - crowdsourced
4
+ license: CDLA-Permissive-1.0
5
+ pretty_name: DocLayNet
6
+ size_categories:
7
+ - 10K<n<100K
8
+ tags:
9
+ - layout-segmentation
10
+ - COCO
11
+ - document-understanding
12
+ - PDF
13
+ task_categories:
14
+ - object-detection
15
+ - image-segmentation
16
+ task_ids:
17
+ - instance-segmentation
18
  ---
19
+
20
+ # Dataset Card for DocLayNet
21
+
22
+ ## Table of Contents
23
+ - [Table of Contents](#table-of-contents)
24
+ - [Dataset Description](#dataset-description)
25
+ - [Dataset Summary](#dataset-summary)
26
+ - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards)
27
+ - [Languages](#languages)
28
+ - [Dataset Structure](#dataset-structure)
29
+ - [Data Instances](#data-instances)
30
+ - [Data Fields](#data-fields)
31
+ - [Data Splits](#data-splits)
32
+ - [Dataset Creation](#dataset-creation)
33
+ - [Curation Rationale](#curation-rationale)
34
+ - [Source Data](#source-data)
35
+ - [Annotations](#annotations)
36
+ - [Personal and Sensitive Information](#personal-and-sensitive-information)
37
+ - [Considerations for Using the Data](#considerations-for-using-the-data)
38
+ - [Social Impact of Dataset](#social-impact-of-dataset)
39
+ - [Discussion of Biases](#discussion-of-biases)
40
+ - [Other Known Limitations](#other-known-limitations)
41
+ - [Additional Information](#additional-information)
42
+ - [Dataset Curators](#dataset-curators)
43
+ - [Licensing Information](#licensing-information)
44
+ - [Citation Information](#citation-information)
45
+ - [Contributions](#contributions)
46
+
47
+ ## Dataset Description
48
+
49
+ - **Homepage:**
50
+ - **Repository:**
51
+ - **Paper:**
52
+ - **Leaderboard:**
53
+ - **Point of Contact:**
54
+
55
+ ### Dataset Summary
56
+
57
+ [More Information Needed]
58
+
59
+ ### Supported Tasks and Leaderboards
60
+
61
+ [More Information Needed]
62
+
63
+ ### Languages
64
+
65
+ [More Information Needed]
66
+
67
+ ## Dataset Structure
68
+
69
+ ### Data Instances
70
+
71
+ [More Information Needed]
72
+
73
+ ### Data Fields
74
+
75
+ [More Information Needed]
76
+
77
+ ### Data Splits
78
+
79
+ [More Information Needed]
80
+
81
+ ## Dataset Creation
82
+
83
+ ### Curation Rationale
84
+
85
+ [More Information Needed]
86
+
87
+ ### Source Data
88
+
89
+ #### Initial Data Collection and Normalization
90
+
91
+ [More Information Needed]
92
+
93
+ #### Who are the source language producers?
94
+
95
+ [More Information Needed]
96
+
97
+ ### Annotations
98
+
99
+ #### Annotation process
100
+
101
+ [More Information Needed]
102
+
103
+ #### Who are the annotators?
104
+
105
+ [More Information Needed]
106
+
107
+ ### Personal and Sensitive Information
108
+
109
+ [More Information Needed]
110
+
111
+ ## Considerations for Using the Data
112
+
113
+ ### Social Impact of Dataset
114
+
115
+ [More Information Needed]
116
+
117
+ ### Discussion of Biases
118
+
119
+ [More Information Needed]
120
+
121
+ ### Other Known Limitations
122
+
123
+ [More Information Needed]
124
+
125
+ ## Additional Information
126
+
127
+ ### Dataset Curators
128
+
129
+ [More Information Needed]
130
+
131
+ ### Licensing Information
132
+
133
+ [More Information Needed]
134
+
135
+ ### Citation Information
136
+
137
+ [More Information Needed]
138
+
139
+ ### Contributions
140
+
141
+ Thanks to [@github-username](https://github.com/<github-username>) for adding this dataset.
doclaynet.py ADDED
@@ -0,0 +1,206 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Inspired from
3
+ https://huggingface.co/datasets/ydshieh/coco_dataset_script/blob/main/coco_dataset_script.py
4
+ """
5
+
6
+ import json
7
+ import os
8
+ import datasets
9
+
10
+
11
+ class COCOBuilderConfig(datasets.BuilderConfig):
12
+
13
+ def __init__(self, name, splits, **kwargs):
14
+ super().__init__(name, **kwargs)
15
+ self.splits = splits
16
+
17
+
18
+ # Add BibTeX citation
19
+ # Find for instance the citation on arxiv or on the dataset repo/website
20
+ _CITATION = """\
21
+ @article{doclaynet2022,
22
+ title = {DocLayNet: A Large Human-Annotated Dataset for Document-Layout Analysis},
23
+ doi = {10.1145/3534678.353904},
24
+ url = {https://arxiv.org/abs/2206.01062},
25
+ author = {Pfitzmann, Birgit and Auer, Christoph and Dolfi, Michele and Nassar, Ahmed S and Staar, Peter W J},
26
+ year = {2022}
27
+ }
28
+ """
29
+
30
+ # Add description of the dataset here
31
+ # You can copy an official description
32
+ _DESCRIPTION = """\
33
+ DocLayNet is a human-annotated document layout segmentation dataset from a broad variety of document sources.
34
+ """
35
+
36
+ # Add a link to an official homepage for the dataset here
37
+ _HOMEPAGE = "https://developer.ibm.com/exchanges/data/all/doclaynet/"
38
+
39
+ # Add the licence for the dataset here if you can find it
40
+ _LICENSE = "CDLA-Permissive-1.0"
41
+
42
+ # Add link to the official dataset URLs here
43
+ # The HuggingFace dataset library don't host the datasets but only point to the original files
44
+ # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
45
+
46
+ # This script is supposed to work with local (downloaded) COCO dataset.
47
+ _URLs = {
48
+ "core": "https://codait-cos-dax.s3.us.cloud-object-storage.appdomain.cloud/dax-doclaynet/1.0.0/DocLayNet_core.zip",
49
+ }
50
+
51
+
52
+ # Name of the dataset usually match the script name with CamelCase instead of snake_case
53
+ class COCODataset(datasets.GeneratorBasedBuilder):
54
+ """An example dataset script to work with the local (downloaded) COCO dataset"""
55
+
56
+ VERSION = datasets.Version("1.0.0")
57
+
58
+ BUILDER_CONFIG_CLASS = COCOBuilderConfig
59
+ BUILDER_CONFIGS = [
60
+ COCOBuilderConfig(name='2022.08', splits=['train', 'val', 'test']),
61
+ ]
62
+ DEFAULT_CONFIG_NAME = "2022.08"
63
+
64
+ def _info(self):
65
+ # This method specifies the datasets.DatasetInfo object which contains informations and typings for the dataset
66
+
67
+ feature_dict = {
68
+ "id": datasets.Value("int64"),
69
+ "height": datasets.Value("int64"),
70
+ "width": datasets.Value("int64"),
71
+ "file_name": datasets.Value("string"),
72
+
73
+ # Custom fields
74
+ "doc_category": datasets.Value("string"), # high-level document category
75
+ "collection": datasets.Value("string"), # sub-collection name
76
+ "doc_name": datasets.Value("string"), # original document filename
77
+ "page_no": datasets.Value("int64"), # page number in original document
78
+ # "precedence": datasets.Value("int64"), # annotation order, non-zero in case of redundant double- or triple-annotation
79
+ }
80
+
81
+ features = datasets.Features(feature_dict)
82
+
83
+ return datasets.DatasetInfo(
84
+ # This is the description that will appear on the datasets page.
85
+ description=_DESCRIPTION,
86
+ # This defines the different columns of the dataset and their types
87
+ features=features, # Here we define them above because they are different between the two configurations
88
+ # If there's a common (input, target) tuple from the features,
89
+ # specify them here. They'll be used if as_supervised=True in
90
+ # builder.as_dataset.
91
+ supervised_keys=None,
92
+ # Homepage of the dataset for documentation
93
+ homepage=_HOMEPAGE,
94
+ # License for the dataset if available
95
+ license=_LICENSE,
96
+ # Citation for the dataset
97
+ citation=_CITATION,
98
+ )
99
+
100
+ def _split_generators(self, dl_manager):
101
+ """Returns SplitGenerators."""
102
+ # This method is tasked with downloading/extracting the data and defining the splits depending on the configuration
103
+ # If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name
104
+
105
+ # data_dir = self.config.data_dir
106
+ # if not data_dir:
107
+ # raise ValueError(
108
+ # "This script is supposed to work with local (downloaded) COCO dataset. The argument `data_dir` in `load_dataset()` is required."
109
+ # )
110
+
111
+ # _DL_URLS = {
112
+ # "train": os.path.join(data_dir, "train2017.zip"),
113
+ # "val": os.path.join(data_dir, "val2017.zip"),
114
+ # "test": os.path.join(data_dir, "test2017.zip"),
115
+ # "annotations_trainval": os.path.join(data_dir, "annotations_trainval2017.zip"),
116
+ # "image_info_test": os.path.join(data_dir, "image_info_test2017.zip"),
117
+ # }
118
+ archive_path = dl_manager.download_and_extract(_URLs)
119
+ print("archive_path: ", archive_path)
120
+
121
+ splits = []
122
+ for split in self.config.splits:
123
+ if split == 'train':
124
+ dataset = datasets.SplitGenerator(
125
+ name=datasets.Split.TRAIN,
126
+ # These kwargs will be passed to _generate_examples
127
+ gen_kwargs={
128
+ "json_path": os.path.join(archive_path["core"], "COCO", "train.json"),
129
+ "image_dir": os.path.join(archive_path["core"], "PNG"),
130
+ "split": "train",
131
+ }
132
+ )
133
+ elif split in ['val', 'valid', 'validation', 'dev']:
134
+ dataset = datasets.SplitGenerator(
135
+ name=datasets.Split.VALIDATION,
136
+ # These kwargs will be passed to _generate_examples
137
+ gen_kwargs={
138
+ "json_path": os.path.join(archive_path["core"], "COCO", "val.json"),
139
+ "image_dir": os.path.join(archive_path["core"], "PNG"),
140
+ "split": "val",
141
+ },
142
+ )
143
+ elif split == 'test':
144
+ dataset = datasets.SplitGenerator(
145
+ name=datasets.Split.TEST,
146
+ # These kwargs will be passed to _generate_examples
147
+ gen_kwargs={
148
+ "json_path": os.path.join(archive_path["core"], "COCO", "test.json"),
149
+ "image_dir": os.path.join(archive_path["core"], "PNG"),
150
+ "split": "test",
151
+ },
152
+ )
153
+ else:
154
+ continue
155
+
156
+ splits.append(dataset)
157
+
158
+ return splits
159
+
160
+ def _generate_examples(
161
+ # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
162
+ self, json_path, image_dir, split
163
+ ):
164
+ """ Yields examples as (key, example) tuples. """
165
+ # This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
166
+ # The `key` is here for legacy reason (tfds) and is not important in itself.
167
+
168
+ _features = ["image_id", "image_path", "doc_category", "collection", "height", "width", "file_name", "doc_name", "page_no", "id"]
169
+ features = list(_features)
170
+
171
+ with open(json_path, 'r', encoding='UTF-8') as fp:
172
+ data = json.load(fp)
173
+
174
+ # list of dict
175
+ images = data["images"]
176
+ entries = images
177
+
178
+ # build a dict of image_id -> image info dict
179
+ d = {image["id"]: image for image in images}
180
+
181
+ # list of dict
182
+ if split in ["train", "val"]:
183
+ annotations = data["annotations"]
184
+
185
+ # build a dict of image_id ->
186
+ for annotation in annotations:
187
+ _id = annotation["id"]
188
+ image_info = d[annotation["image_id"]]
189
+ annotation.update(image_info)
190
+ annotation["id"] = _id
191
+
192
+ entries = annotations
193
+
194
+ for id_, entry in enumerate(entries):
195
+
196
+ entry = {k: v for k, v in entry.items() if k in features}
197
+
198
+ if split == "test":
199
+ entry["image_id"] = entry["id"]
200
+ entry["id"] = -1
201
+
202
+ entry["image_path"] = os.path.join(image_dir, entry["file_name"])
203
+
204
+ entry = {k: entry[k] for k in _features if k in entry}
205
+
206
+ yield str((entry["image_id"], entry["id"])), entry