PHilita commited on
Commit
594180e
1 Parent(s): 55b58cd
Files changed (1) hide show
  1. Carla-COCO-Object-Detection-Dataset.py +193 -193
Carla-COCO-Object-Detection-Dataset.py CHANGED
@@ -1,158 +1,19 @@
1
- # coding=utf-8
2
- # Copyright 2021 The HuggingFace Datasets Authors and the current dataset script contributor.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
- """CPPE-5 dataset."""
16
-
17
-
18
- import collections
19
- import json
20
- import os
21
-
22
- import datasets
23
-
24
-
25
- _CITATION = """\
26
- @misc{dagli2021cppe5,
27
- title={CPPE-5: Medical Personal Protective Equipment Dataset},
28
- author={Rishit Dagli and Ali Mustufa Shaikh},
29
- year={2021},
30
- eprint={2112.09569},
31
- archivePrefix={arXiv},
32
- primaryClass={cs.CV}
33
- }
34
- """
35
-
36
- _DESCRIPTION = """\
37
- CPPE - 5 (Medical Personal Protective Equipment) is a new challenging dataset with the goal
38
- to allow the study of subordinate categorization of medical personal protective equipments,
39
- which is not possible with other popular data sets that focus on broad level categories.
40
- """
41
-
42
- _HOMEPAGE = "https://sites.google.com/view/cppe5"
43
-
44
- _LICENSE = "Unknown"
45
-
46
- _URL = "https://drive.google.com/uc?id=1QeveFt1jDNrafJeeCV1N_KoIKQEZyhuf"
47
-
48
- _CATEGORIES = ["Coverall", "Face_Shield", "Gloves", "Goggles", "Mask"]
49
-
50
-
51
- class CPPE5(datasets.GeneratorBasedBuilder):
52
- """CPPE - 5 dataset."""
53
-
54
- VERSION = datasets.Version("1.0.0")
55
-
56
- def _info(self):
57
- features = datasets.Features(
58
- {
59
- "image_id": datasets.Value("int64"),
60
- "image": datasets.Image(),
61
- "width": datasets.Value("int32"),
62
- "height": datasets.Value("int32"),
63
- "objects": datasets.Sequence(
64
- {
65
- "id": datasets.Value("int64"),
66
- "area": datasets.Value("int64"),
67
- "bbox": datasets.Sequence(datasets.Value("float32"), length=4),
68
- "category": datasets.ClassLabel(names=_CATEGORIES),
69
- }
70
- ),
71
- }
72
- )
73
- return datasets.DatasetInfo(
74
- description=_DESCRIPTION,
75
- features=features,
76
- homepage=_HOMEPAGE,
77
- license=_LICENSE,
78
- citation=_CITATION,
79
- )
80
-
81
- def _split_generators(self, dl_manager):
82
- archive = dl_manager.download(_URL)
83
- return [
84
- datasets.SplitGenerator(
85
- name=datasets.Split.TRAIN,
86
- gen_kwargs={
87
- "annotation_file_path": "annotations/train.json",
88
- "files": dl_manager.iter_archive(archive),
89
- },
90
- ),
91
- datasets.SplitGenerator(
92
- name=datasets.Split.TEST,
93
- gen_kwargs={
94
- "annotation_file_path": "annotations/test.json",
95
- "files": dl_manager.iter_archive(archive),
96
- },
97
- ),
98
- ]
99
-
100
- def _generate_examples(self, annotation_file_path, files):
101
- def process_annot(annot, category_id_to_category):
102
- return {
103
- "id": annot["id"],
104
- "area": annot["area"],
105
- "bbox": annot["bbox"],
106
- "category": category_id_to_category[annot["category_id"]],
107
- }
108
-
109
- image_id_to_image = {}
110
- idx = 0
111
- # This loop relies on the ordering of the files in the archive:
112
- # Annotation files come first, then the images.
113
- for path, f in files:
114
- file_name = os.path.basename(path)
115
- if path == annotation_file_path:
116
- annotations = json.load(f)
117
- category_id_to_category = {category["id"]: category["name"] for category in annotations["categories"]}
118
- image_id_to_annotations = collections.defaultdict(list)
119
- for annot in annotations["annotations"]:
120
- image_id_to_annotations[annot["image_id"]].append(annot)
121
- image_id_to_image = {annot["file_name"]: annot for annot in annotations["images"]}
122
- elif file_name in image_id_to_image:
123
- image = image_id_to_image[file_name]
124
- objects = [
125
- process_annot(annot, category_id_to_category) for annot in image_id_to_annotations[image["id"]]
126
- ]
127
- yield idx, {
128
- "image_id": image["id"],
129
- "image": {"path": path, "bytes": f.read()},
130
- "width": image["width"],
131
- "height": image["height"],
132
- "objects": objects,
133
- }
134
- idx += 1
135
-
136
  # # coding=utf-8
137
- # # Permission is hereby granted, free of charge, to any person obtaining
138
- # # a copy of this software and associated documentation files (the
139
- # # "Software"), to deal in the Software without restriction, including
140
- # # without limitation the rights to use, copy, modify, merge, publish,
141
- # # distribute, sublicense, and/or sell copies of the Software, and to
142
- # # permit persons to whom the Software is furnished to do so, subject to
143
- # # the following conditions:
144
-
145
- # # The above copyright notice and this permission notice shall be
146
- # # included in all copies or substantial portions of the Software.
147
-
148
- # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
149
- # # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
150
- # # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
151
- # # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
152
- # # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
153
- # # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
154
- # # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
155
- # """Carla-COCO-Object-Detection-Dataset"""
156
 
157
  # import collections
158
  # import json
@@ -161,48 +22,50 @@ class CPPE5(datasets.GeneratorBasedBuilder):
161
  # import datasets
162
 
163
 
164
- # logger = datasets.logging.get_logger(__name__)
 
 
 
 
 
 
 
 
 
165
 
166
  # _DESCRIPTION = """\
167
- # This dataset contains 1028 images each 640x380 pixels.
168
- # The dataset is split into 249 test and 779 training examples.
169
- # Every image comes with MS COCO format annotations.
170
- # The dataset was collected in Carla Simulator, driving around in autopilot mode in various environments
171
- # (Town01, Town02, Town03, Town04, Town05) and saving every i-th frame.
172
- # The labels where then automatically generated using the semantic segmentation information.
173
  # """
174
 
175
- # _HOMEPAGE = "https://github.com/yunusskeete/Carla-COCO-Object-Detection-Dataset"
176
 
177
- # _LICENSE = "MIT"
178
 
179
- # _URL = "https://drive.google.com/file/d/1xUPwrMBBrGFIapLx_fyLjmH4HN16A4iZ"
180
 
181
- # _CATEGORIES = ["automobile", "bike", "motorbike", "traffic_light", "traffic_sign"]
182
 
183
- # class CARLA_COCO(datasets.GeneratorBasedBuilder):
184
- # """Carla-COCO-Object-Detection-Dataset"""
185
 
186
- # VERSION = datasets.Version("1.1.0")
 
187
 
188
- # def _info(self):
189
- # """This method specifies the datasets.DatasetInfo object which contains informations and typings for the dataset"""
190
 
 
191
  # features = datasets.Features(
192
  # {
193
- # "id": datasets.Value("int64"),
194
- # "image_id": datasets.Value("string"),
195
  # "image": datasets.Image(),
196
  # "width": datasets.Value("int32"),
197
  # "height": datasets.Value("int32"),
198
- # "file_name": datasets.Value("string"),
199
- # "url": datasets.Value("string"),
200
  # "objects": datasets.Sequence(
201
  # {
202
- # "id": datasets.Sequence(datasets.Value("int64")),
203
- # "area": datasets.Sequence(datasets.Value("int64")),
204
  # "bbox": datasets.Sequence(datasets.Value("float32"), length=4),
205
- # "category": datasets.Sequence(datasets.ClassLabel(names=_CATEGORIES)),
206
  # }
207
  # ),
208
  # }
@@ -212,41 +75,29 @@ class CPPE5(datasets.GeneratorBasedBuilder):
212
  # features=features,
213
  # homepage=_HOMEPAGE,
214
  # license=_LICENSE,
 
215
  # )
216
 
217
  # def _split_generators(self, dl_manager):
218
- # """This method is tasked with downloading/extracting the data and defining the splits depending on the configuration"""
219
-
220
- # archive = dl_manager.download_and_extract(_URL)
221
-
222
  # return [
223
  # datasets.SplitGenerator(
224
  # name=datasets.Split.TRAIN,
225
- # # These kwargs will be passed to _generate_examples
226
  # gen_kwargs={
227
  # "annotation_file_path": "annotations/train.json",
228
  # "files": dl_manager.iter_archive(archive),
229
- # }
230
  # ),
231
  # datasets.SplitGenerator(
232
  # name=datasets.Split.TEST,
233
- # # These kwargs will be passed to _generate_examples
234
  # gen_kwargs={
235
  # "annotation_file_path": "annotations/test.json",
236
  # "files": dl_manager.iter_archive(archive),
237
- # }
238
  # ),
239
  # ]
240
 
241
- # # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
242
  # def _generate_examples(self, annotation_file_path, files):
243
- # """
244
- # This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
245
- # The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
246
- # """
247
-
248
- # logger.info("generating examples from = %s", annotation_file_path)
249
-
250
  # def process_annot(annot, category_id_to_category):
251
  # return {
252
  # "id": annot["id"],
@@ -281,3 +132,152 @@ class CPPE5(datasets.GeneratorBasedBuilder):
281
  # "objects": objects,
282
  # }
283
  # idx += 1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  # # coding=utf-8
2
+ # # Copyright 2021 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ # #
4
+ # # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # # you may not use this file except in compliance with the License.
6
+ # # You may obtain a copy of the License at
7
+ # #
8
+ # # http://www.apache.org/licenses/LICENSE-2.0
9
+ # #
10
+ # # Unless required by applicable law or agreed to in writing, software
11
+ # # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # # See the License for the specific language governing permissions and
14
+ # # limitations under the License.
15
+ # """CPPE-5 dataset."""
16
+
 
 
 
 
17
 
18
  # import collections
19
  # import json
 
22
  # import datasets
23
 
24
 
25
+ # _CITATION = """\
26
+ # @misc{dagli2021cppe5,
27
+ # title={CPPE-5: Medical Personal Protective Equipment Dataset},
28
+ # author={Rishit Dagli and Ali Mustufa Shaikh},
29
+ # year={2021},
30
+ # eprint={2112.09569},
31
+ # archivePrefix={arXiv},
32
+ # primaryClass={cs.CV}
33
+ # }
34
+ # """
35
 
36
  # _DESCRIPTION = """\
37
+ # CPPE - 5 (Medical Personal Protective Equipment) is a new challenging dataset with the goal
38
+ # to allow the study of subordinate categorization of medical personal protective equipments,
39
+ # which is not possible with other popular data sets that focus on broad level categories.
 
 
 
40
  # """
41
 
42
+ # _HOMEPAGE = "https://sites.google.com/view/cppe5"
43
 
44
+ # _LICENSE = "Unknown"
45
 
46
+ # _URL = "https://drive.google.com/uc?id=1QeveFt1jDNrafJeeCV1N_KoIKQEZyhuf"
47
 
48
+ # _CATEGORIES = ["Coverall", "Face_Shield", "Gloves", "Goggles", "Mask"]
49
 
 
 
50
 
51
+ # class CPPE5(datasets.GeneratorBasedBuilder):
52
+ # """CPPE - 5 dataset."""
53
 
54
+ # VERSION = datasets.Version("1.0.0")
 
55
 
56
+ # def _info(self):
57
  # features = datasets.Features(
58
  # {
59
+ # "image_id": datasets.Value("int64"),
 
60
  # "image": datasets.Image(),
61
  # "width": datasets.Value("int32"),
62
  # "height": datasets.Value("int32"),
 
 
63
  # "objects": datasets.Sequence(
64
  # {
65
+ # "id": datasets.Value("int64"),
66
+ # "area": datasets.Value("int64"),
67
  # "bbox": datasets.Sequence(datasets.Value("float32"), length=4),
68
+ # "category": datasets.ClassLabel(names=_CATEGORIES),
69
  # }
70
  # ),
71
  # }
 
75
  # features=features,
76
  # homepage=_HOMEPAGE,
77
  # license=_LICENSE,
78
+ # citation=_CITATION,
79
  # )
80
 
81
  # def _split_generators(self, dl_manager):
82
+ # archive = dl_manager.download(_URL)
 
 
 
83
  # return [
84
  # datasets.SplitGenerator(
85
  # name=datasets.Split.TRAIN,
 
86
  # gen_kwargs={
87
  # "annotation_file_path": "annotations/train.json",
88
  # "files": dl_manager.iter_archive(archive),
89
+ # },
90
  # ),
91
  # datasets.SplitGenerator(
92
  # name=datasets.Split.TEST,
 
93
  # gen_kwargs={
94
  # "annotation_file_path": "annotations/test.json",
95
  # "files": dl_manager.iter_archive(archive),
96
+ # },
97
  # ),
98
  # ]
99
 
 
100
  # def _generate_examples(self, annotation_file_path, files):
 
 
 
 
 
 
 
101
  # def process_annot(annot, category_id_to_category):
102
  # return {
103
  # "id": annot["id"],
 
132
  # "objects": objects,
133
  # }
134
  # idx += 1
135
+
136
+ # coding=utf-8
137
+ # Permission is hereby granted, free of charge, to any person obtaining
138
+ # a copy of this software and associated documentation files (the
139
+ # "Software"), to deal in the Software without restriction, including
140
+ # without limitation the rights to use, copy, modify, merge, publish,
141
+ # distribute, sublicense, and/or sell copies of the Software, and to
142
+ # permit persons to whom the Software is furnished to do so, subject to
143
+ # the following conditions:
144
+
145
+ # The above copyright notice and this permission notice shall be
146
+ # included in all copies or substantial portions of the Software.
147
+
148
+ # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
149
+ # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
150
+ # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
151
+ # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
152
+ # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
153
+ # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
154
+ # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
155
+ """Carla-COCO-Object-Detection-Dataset"""
156
+
157
+ import collections
158
+ import json
159
+ import os
160
+
161
+ import datasets
162
+
163
+
164
+ logger = datasets.logging.get_logger(__name__)
165
+
166
+ _DESCRIPTION = """\
167
+ This dataset contains 1028 images each 640x380 pixels.
168
+ The dataset is split into 249 test and 779 training examples.
169
+ Every image comes with MS COCO format annotations.
170
+ The dataset was collected in Carla Simulator, driving around in autopilot mode in various environments
171
+ (Town01, Town02, Town03, Town04, Town05) and saving every i-th frame.
172
+ The labels where then automatically generated using the semantic segmentation information.
173
+ """
174
+
175
+ _HOMEPAGE = "https://github.com/yunusskeete/Carla-COCO-Object-Detection-Dataset"
176
+
177
+ _LICENSE = "MIT"
178
+
179
+ _URL = "https://drive.google.com/uc?id=1xUPwrMBBrGFIapLx_fyLjmH4HN16A4iZ"
180
+
181
+ _CATEGORIES = ["automobile", "bike", "motorbike", "traffic_light", "traffic_sign"]
182
+
183
+ class CARLA_COCO(datasets.GeneratorBasedBuilder):
184
+ """Carla-COCO-Object-Detection-Dataset"""
185
+
186
+ VERSION = datasets.Version("1.1.0")
187
+
188
+ def _info(self):
189
+ """This method specifies the datasets.DatasetInfo object which contains informations and typings for the dataset"""
190
+
191
+ features = datasets.Features(
192
+ {
193
+ "id": datasets.Value("int64"),
194
+ "image_id": datasets.Value("string"),
195
+ "image": datasets.Image(),
196
+ "width": datasets.Value("int32"),
197
+ "height": datasets.Value("int32"),
198
+ "file_name": datasets.Value("string"),
199
+ "url": datasets.Value("string"),
200
+ "objects": datasets.Sequence(
201
+ {
202
+ "id": datasets.Sequence(datasets.Value("int64")),
203
+ "area": datasets.Sequence(datasets.Value("int64")),
204
+ "bbox": datasets.Sequence(datasets.Value("float32"), length=4),
205
+ "category": datasets.Sequence(datasets.ClassLabel(names=_CATEGORIES)),
206
+ }
207
+ ),
208
+ }
209
+ )
210
+ return datasets.DatasetInfo(
211
+ description=_DESCRIPTION,
212
+ features=features,
213
+ homepage=_HOMEPAGE,
214
+ license=_LICENSE,
215
+ )
216
+
217
+ def _split_generators(self, dl_manager):
218
+ """This method is tasked with downloading/extracting the data and defining the splits depending on the configuration"""
219
+
220
+ archive = dl_manager.download_and_extract(_URL)
221
+
222
+ return [
223
+ datasets.SplitGenerator(
224
+ name=datasets.Split.TRAIN,
225
+ # These kwargs will be passed to _generate_examples
226
+ gen_kwargs={
227
+ "annotation_file_path": "annotations/train.json",
228
+ "files": dl_manager.iter_archive(archive),
229
+ }
230
+ ),
231
+ datasets.SplitGenerator(
232
+ name=datasets.Split.TEST,
233
+ # These kwargs will be passed to _generate_examples
234
+ gen_kwargs={
235
+ "annotation_file_path": "annotations/test.json",
236
+ "files": dl_manager.iter_archive(archive),
237
+ }
238
+ ),
239
+ ]
240
+
241
+ # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
242
+ def _generate_examples(self, annotation_file_path, files):
243
+ """
244
+ This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
245
+ The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
246
+ """
247
+
248
+ logger.info("generating examples from = %s", annotation_file_path)
249
+
250
+ def process_annot(annot, category_id_to_category):
251
+ return {
252
+ "id": annot["id"],
253
+ "area": annot["area"],
254
+ "bbox": annot["bbox"],
255
+ "category": category_id_to_category[annot["category_id"]],
256
+ }
257
+
258
+ image_id_to_image = {}
259
+ idx = 0
260
+ # This loop relies on the ordering of the files in the archive:
261
+ # Annotation files come first, then the images.
262
+ for path, f in files:
263
+ file_name = os.path.basename(path)
264
+ if path == annotation_file_path:
265
+ annotations = json.load(f)
266
+ category_id_to_category = {category["id"]: category["name"] for category in annotations["categories"]}
267
+ image_id_to_annotations = collections.defaultdict(list)
268
+ for annot in annotations["annotations"]:
269
+ image_id_to_annotations[annot["image_id"]].append(annot)
270
+ image_id_to_image = {annot["file_name"]: annot for annot in annotations["images"]}
271
+ elif file_name in image_id_to_image:
272
+ image = image_id_to_image[file_name]
273
+ objects = [
274
+ process_annot(annot, category_id_to_category) for annot in image_id_to_annotations[image["id"]]
275
+ ]
276
+ yield idx, {
277
+ "image_id": image["id"],
278
+ "image": {"path": path, "bytes": f.read()},
279
+ "width": image["width"],
280
+ "height": image["height"],
281
+ "objects": objects,
282
+ }
283
+ idx += 1