fuliucansheng commited on
Commit
2b5cf97
1 Parent(s): e355620

add dataset minicoco

Browse files
Files changed (3) hide show
  1. .gitattributes +1 -0
  2. minicoco.py +531 -0
  3. minicoco.tar.gz +3 -0
.gitattributes CHANGED
@@ -14,3 +14,4 @@
14
  *.pb filter=lfs diff=lfs merge=lfs -text
15
  *.pt filter=lfs diff=lfs merge=lfs -text
16
  *.pth filter=lfs diff=lfs merge=lfs -text
 
14
  *.pb filter=lfs diff=lfs merge=lfs -text
15
  *.pt filter=lfs diff=lfs merge=lfs -text
16
  *.pth filter=lfs diff=lfs merge=lfs -text
17
+ ./minicoco.tar.gz filter=lfs diff=lfs merge=lfs -text
minicoco.py ADDED
@@ -0,0 +1,531 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import json
3
+ import logging
4
+ import datasets
5
+ import xml.etree.ElementTree as ET
6
+ from collections import defaultdict
7
+
8
+
9
+ _CITATION = """
10
+ MINICOCO2017
11
+ """
12
+
13
+ _DESCRIPTION = """
14
+ MINICOCO2017
15
+ """
16
+
17
+ _URLS = {
18
+ "minicoco2017": "minicoco.tar.gz"
19
+ }
20
+
21
+ # fmt: off
22
+ CLASS_INFOS = [
23
+ # name id train
24
+ ('person', 1, 0),
25
+ ('bicycle', 2, 1),
26
+ ('car', 3, 2),
27
+ ('motorcycle', 4, 3),
28
+ ('airplane', 5, 4),
29
+ ('bus', 6, 5),
30
+ ('train', 7, 6),
31
+ ('truck', 8, 7),
32
+ ('boat', 9, 8),
33
+ ('traffic light', 10, 9),
34
+ ('fire hydrant', 11, 10),
35
+ ('stop sign', 13, 11),
36
+ ('parking meter', 14, 12),
37
+ ('bench', 15, 13),
38
+ ('bird', 16, 14),
39
+ ('cat', 17, 15),
40
+ ('dog', 18, 16),
41
+ ('horse', 19, 17),
42
+ ('sheep', 20, 18),
43
+ ('cow', 21, 19),
44
+ ('elephant', 22, 20),
45
+ ('bear', 23, 21),
46
+ ('zebra', 24, 22),
47
+ ('giraffe', 25, 23),
48
+ ('backpack', 27, 24),
49
+ ('umbrella', 28, 25),
50
+ ('handbag', 31, 26),
51
+ ('tie', 32, 27),
52
+ ('suitcase', 33, 28),
53
+ ('frisbee', 34, 29),
54
+ ('skis', 35, 30),
55
+ ('snowboard', 36, 31),
56
+ ('sports ball', 37, 32),
57
+ ('kite', 38, 33),
58
+ ('baseball bat', 39, 34),
59
+ ('baseball glove', 40, 35),
60
+ ('skateboard', 41, 36),
61
+ ('surfboard', 42, 37),
62
+ ('tennis racket', 43, 38),
63
+ ('bottle', 44, 39),
64
+ ('wine glass', 46, 40),
65
+ ('cup', 47, 41),
66
+ ('fork', 48, 42),
67
+ ('knife', 49, 43),
68
+ ('spoon', 50, 44),
69
+ ('bowl', 51, 45),
70
+ ('banana', 52, 46),
71
+ ('apple', 53, 47),
72
+ ('sandwich', 54, 48),
73
+ ('orange', 55, 49),
74
+ ('broccoli', 56, 50),
75
+ ('carrot', 57, 51),
76
+ ('hot dog', 58, 52),
77
+ ('pizza', 59, 53),
78
+ ('donut', 60, 54),
79
+ ('cake', 61, 55),
80
+ ('chair', 62, 56),
81
+ ('couch', 63, 57),
82
+ ('potted plant', 64, 58),
83
+ ('bed', 65, 59),
84
+ ('dining table', 67, 60),
85
+ ('toilet', 70, 61),
86
+ ('tv', 72, 62),
87
+ ('laptop', 73, 63),
88
+ ('mouse', 74, 64),
89
+ ('remote', 75, 65),
90
+ ('keyboard', 76, 66),
91
+ ('cell phone', 77, 67),
92
+ ('microwave', 78, 68),
93
+ ('oven', 79, 69),
94
+ ('toaster', 80, 70),
95
+ ('sink', 81, 71),
96
+ ('refrigerator', 82, 72),
97
+ ('book', 84, 73),
98
+ ('clock', 85, 74),
99
+ ('vase', 86, 75),
100
+ ('scissors', 87, 76),
101
+ ('teddy bear', 88, 77),
102
+ ('hair drier', 89, 78),
103
+ ('toothbrush', 90, 79)
104
+ ]
105
+
106
+ KEYPOINTS_INFOS=[
107
+ # name id train
108
+ # ('nose', 1, 0),
109
+ # ('left_eye', 2, 1),
110
+ # ('right_eye', 3, 2),
111
+ # ('left_ear', 4, 3),
112
+ # ('right_ear', 5, 4),
113
+ # ('left_shoulder', 6, 5),
114
+ # ('right_shoulder', 7, 6),
115
+ # ('left_elbow', 8, 7),
116
+ # ('right_elbow', 9, 8),
117
+ # ('left_wrist', 10, 9),
118
+ # ('right_wrist', 11, 10),
119
+ # ('left_hip', 12, 11),
120
+ # ('right_hip', 13, 12),
121
+ # ('left_knee', 14, 13),
122
+ # ('right_knee', 15, 14),
123
+ # ('left_ankle', 16, 15),
124
+ # ('right_ankle', 17, 16)
125
+ ('none', 1, 0),
126
+ ('nose', 2, 1),
127
+ ('left_eye', 3, 2),
128
+ ('right_eye', 4, 3),
129
+ ('left_ear', 5, 4),
130
+ ('right_ear', 6, 5),
131
+ ('left_shoulder', 7, 6),
132
+ ('right_shoulder', 8, 7),
133
+ ('left_elbow', 9, 8),
134
+ ('right_elbow', 10, 9),
135
+ ('left_wrist', 11, 10),
136
+ ('right_wrist', 12, 11),
137
+ ('left_hip', 13, 12),
138
+ ('right_hip', 14, 13),
139
+ ('left_knee', 15, 14),
140
+ ('right_knee', 16, 15),
141
+ ('left_ankle', 17, 16),
142
+ ('right_ankle', 18, 17)
143
+ ]
144
+
145
+
146
+ # fmt: on
147
+ CLASS_NAMES = [CLASS_INFO[0] for CLASS_INFO in CLASS_INFOS]
148
+ KEYPOINTS_NAMES = [KEYPOINTS_INFO[0] for KEYPOINTS_INFO in KEYPOINTS_INFOS]
149
+
150
+ CLASS_DICT = {CLASS_INFO[0]: CLASS_INFO[2] for CLASS_INFO in CLASS_INFOS}
151
+ CATEGORY_ID2CLASS_NAMES = {CLASS_INFO[1]: CLASS_INFO[0] for CLASS_INFO in CLASS_INFOS}
152
+ KEYPOINTS_DICT = {KEYPOINTS_INFO[0]: KEYPOINTS_INFO[1] for KEYPOINTS_INFO in KEYPOINTS_INFOS}
153
+
154
+
155
+ # datasets.Features
156
+ detection_features = datasets.Features(
157
+ {
158
+ "id": datasets.Value("int32"),
159
+ "image": datasets.Value("string"),
160
+ "height": datasets.Value("int32"),
161
+ "width": datasets.Value("int32"),
162
+ "objects": datasets.features.Sequence(
163
+ {
164
+ "bboxes": datasets.Sequence(datasets.Value("float32")),
165
+ "classes": datasets.features.ClassLabel(names=CLASS_NAMES),
166
+ }
167
+ ),
168
+ }
169
+ )
170
+
171
+ segmentation_features = datasets.Features(
172
+ {
173
+ "id": datasets.Value("int32"),
174
+ "image": datasets.Value("string"),
175
+ "height": datasets.Value("int32"),
176
+ "width": datasets.Value("int32"),
177
+ "objects": datasets.features.Sequence(
178
+ {
179
+ "bboxes": datasets.Sequence(datasets.Value("float32")),
180
+ "classes": datasets.features.ClassLabel(names=CLASS_NAMES),
181
+ 'segmentation':datasets.Sequence(datasets.Value("float32")),
182
+ 'iscrowd':datasets.Value("int32"),
183
+ }
184
+ ),
185
+ }
186
+ )
187
+
188
+ captions_features = datasets.Features(
189
+ {
190
+ "id": datasets.Value("int32"),
191
+ "image": datasets.Value("string"),
192
+ "height": datasets.Value("int32"),
193
+ "width": datasets.Value("int32"),
194
+ "captions": datasets.features.Sequence(datasets.Value("string")),
195
+ }
196
+ )
197
+
198
+ keypoint_features = datasets.Features(
199
+ # 这里可能有点问题,因为模型的keypoint的标注的类别没别没有增加进来,
200
+ # 有点复杂,后面再finetune,现在基本信息已经正确
201
+ {
202
+ "id": datasets.Value("int32"),
203
+ "image": datasets.Value("string"),
204
+ "height": datasets.Value("int32"),
205
+ "width": datasets.Value("int32"),
206
+ "objects": datasets.features.Sequence(
207
+ {
208
+ "bboxes": datasets.Sequence(datasets.Value("float32")),
209
+ "classes": datasets.features.ClassLabel(names=CLASS_NAMES),
210
+ 'keypoints':datasets.Sequence(datasets.Value("float32")),
211
+ "num_keypoints":datasets.Value("int32")
212
+ }
213
+ ),
214
+ }
215
+ )
216
+
217
+ _DATASET_FEATURES = {
218
+ "detection": detection_features,
219
+ "segmentation":segmentation_features,
220
+ "caption": captions_features,
221
+ "keypoint": keypoint_features
222
+ }
223
+
224
+
225
+ def get_captions_annotation(captions_path):
226
+ with open(captions_path,'r') as f:
227
+ anno_captions = json.load(f)
228
+
229
+ anno_infos = defaultdict(list)
230
+ images_infos = list()
231
+
232
+ for caption_info in anno_captions['annotations']:
233
+ # caption_info={'image_id': 179765, 'id': 38, 'caption': 'A black Honda motorcycle parked in front of a garage.'}
234
+ caption = caption_info['caption']
235
+ image_id = caption_info['image_id']
236
+
237
+ anno_infos[image_id].append(caption)
238
+
239
+ for image in anno_captions['images']:
240
+ # image={'license': 4, 'file_name': '000000397133.jpg', 'coco_url': 'http://images.cocodataset.org/val2017/000000397133.jpg', 'height': 427, 'width': 640, 'date_captured': '2013-11-14 17:02:52', 'flickr_url': 'http://farm7.staticflickr.com/6116/6255196340_da26cf2c9e_z.jpg', 'id': 397133}
241
+ images_infos.append({
242
+ "image_name":image['file_name'],
243
+ "height": image["height"],
244
+ "width":image["width"],
245
+ "image_id":image['id']
246
+ })
247
+
248
+ return anno_infos, images_infos
249
+
250
+
251
+ def get_instances_annotation(instances_path):
252
+ with open(instances_path,'r') as f:
253
+ anno_instances = json.load(f)
254
+
255
+ anno_infos = dict()
256
+ images_infos = list()
257
+
258
+ for instance_info in anno_instances['annotations']:
259
+ # instance_info = {'segmentation': [[510.66, 423.01, 511.72, 420.03, 510.45, 416.0, 510.34, 413.02,
260
+ # 510.77, 410.26, 510.77, 407.5, 510.34, 405.16, 511.51, 402.83, 511.41, 400.49, 510.24, 398.16,
261
+ # 509.39, 397.31, 504.61, 399.22, 502.17, 399.64, 500.89, 401.66, 500.47, 402.08, 499.09, 401.87,
262
+ # 495.79, 401.98, 490.59, 401.77, 488.79, 401.77, 485.39, 398.58, 483.9, 397.31, 481.56, 396.35,
263
+ # 478.48, 395.93, 476.68, 396.03, 475.4, 396.77, 473.92, 398.79, 473.28, 399.96, 473.49, 401.87,
264
+ # 474.56, 403.47, 473.07, 405.59, 473.39, 407.71, 476.68, 409.41, 479.23, 409.73, 481.56, 410.69,
265
+ # 480.4, 411.85, 481.35, 414.93, 479.86, 418.65, 477.32, 420.03, 476.04, 422.58, 479.02, 422.58,
266
+ # 480.29, 423.01, 483.79, 419.93, 486.66, 416.21, 490.06, 415.57, 492.18, 416.85, 491.65, 420.24,
267
+ # 492.82, 422.9, 493.56, 424.39, 496.43, 424.6, 498.02, 423.01, 498.13, 421.31, 497.07, 420.03,
268
+ # 497.07, 415.15, 496.33, 414.51, 501.1, 411.96, 502.06, 411.32, 503.02, 415.04, 503.33, 418.12,
269
+ # 501.1, 420.24, 498.98, 421.63, 500.47, 424.39, 505.03, 423.32, 506.2, 421.31, 507.69, 419.5,
270
+ # 506.31, 423.32, 510.03, 423.01, 510.45, 423.01]], 'area': 702.1057499999998, 'iscrowd': 0,
271
+ # 'image_id': 289343, 'bbox': [473.07, 395.93, 38.65, 28.67], 'category_id': 18, 'id': 1768}
272
+ bbox = instance_info['bbox']
273
+ image_id = instance_info['image_id']
274
+ segmentation = instance_info['segmentation'][0]
275
+
276
+ if image_id in anno_infos:
277
+ anno_infos[image_id].append(
278
+ {
279
+ "segmentation": segmentation,
280
+ "bbox": bbox,
281
+ 'iscrowd':instance_info['iscrowd'],
282
+ "classes":CATEGORY_ID2CLASS_NAMES[instance_info['category_id']]
283
+ }
284
+ )
285
+ else:
286
+ anno_infos[image_id]=[
287
+ {
288
+ "segmentation": segmentation,
289
+ "bbox": bbox,
290
+ 'iscrowd':instance_info['iscrowd'],
291
+ "classes":CATEGORY_ID2CLASS_NAMES[instance_info['category_id']]
292
+ }
293
+ ]
294
+
295
+
296
+ for image in anno_instances['images']:
297
+ # image={'license': 4, 'file_name': '000000397133.jpg',
298
+ # 'coco_url': 'http://images.cocodataset.org/val2017/000000397133.jpg',
299
+ # 'height': 427, 'width': 640, 'date_captured': '2013-11-14 17:02:52',
300
+ # 'flickr_url': 'http://farm7.staticflickr.com/6116/6255196340_da26cf2c9e_z.jpg', 'id': 397133}
301
+ images_infos.append({
302
+ "image_name":image['file_name'],
303
+ "height": image["height"],
304
+ "width":image["width"],
305
+ "image_id":image['id']
306
+ })
307
+
308
+ return anno_infos, images_infos
309
+
310
+
311
+ def get_keypoints_annotation(keypoints_path):
312
+ with open(keypoints_path,'r') as f:
313
+ anno_keypoints = json.load(f)
314
+
315
+ anno_infos = dict()
316
+ images_infos = list()
317
+
318
+ for keypoint_info in anno_keypoints['annotations']:
319
+ # keypoint_info = {'segmentation': [[63.2, 229.21, 65.73, 208.99, 70.79, 187.92, 78.37, 162.64, 84.27, 146.63, 84.27, 132.3, 75.84, 109.55, 90.17, 97.75, 104.49, 96.91, 114.61, 102.81, 123.88, 123.88, 137.36, 136.52, 153.37, 150.84, 146.63, 169.38, 144.1, 180.34, 142.42, 190.45, 137.36, 209.83, 139.89, 230.9, 128.09, 232.58, 97.75, 235.11, 81.74, 237.64, 87.64, 208.99, 85.96, 186.24, 78.37, 198.88, 75.84, 224.16, 68.26, 239.33, 60.67, 230.9]], 'num_keypoints': 12, 'area': 8096.3096, 'iscrowd': 0, 'keypoints': [100, 135, 2, 102, 127, 2, 94, 131, 2, 112, 121, 2, 91, 132, 2, 137, 148, 2, 81, 158, 2, 150, 179, 1, 76, 193, 2, 0, 0, 0, 70, 234, 2, 136, 242, 1, 104, 246, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 'image_id': 275749, 'bbox': [60.67, 96.91, 92.7, 142.42], 'category_id': 1, 'id': 232027}
320
+ bbox = keypoint_info['bbox']
321
+ image_id = keypoint_info['image_id']
322
+
323
+ if image_id in anno_infos:
324
+ anno_infos[image_id].append(
325
+ {
326
+ "bbox": bbox,
327
+ "classes":CATEGORY_ID2CLASS_NAMES[keypoint_info['category_id']],
328
+ 'keypoints':keypoint_info['keypoints'],
329
+ "num_keypoints":keypoint_info['num_keypoints'],
330
+ }
331
+ )
332
+ else:
333
+ anno_infos[image_id]=[
334
+ {
335
+ "bbox": bbox,
336
+ "classes":CATEGORY_ID2CLASS_NAMES[keypoint_info['category_id']],
337
+ 'keypoints':keypoint_info['keypoints'],
338
+ "num_keypoints":keypoint_info['num_keypoints'],
339
+ }
340
+ ]
341
+
342
+
343
+ for image in anno_keypoints['images']:
344
+ # image={'license': 4, 'file_name': '000000397133.jpg', 'coco_url': 'http://images.cocodataset.org/val2017/000000397133.jpg', 'height': 427, 'width': 640, 'date_captured': '2013-11-14 17:02:52', 'flickr_url': 'http://farm7.staticflickr.com/6116/6255196340_da26cf2c9e_z.jpg', 'id': 397133}
345
+ images_infos.append({
346
+ "image_name":image['file_name'],
347
+ "height": image["height"],
348
+ "width":image["width"],
349
+ "image_id":image['id']
350
+ })
351
+
352
+ return anno_infos, images_infos
353
+
354
+
355
+ class MINICOCOConfig(datasets.BuilderConfig):
356
+ def __init__(self, data_name, task_name, **kwargs):
357
+ """
358
+
359
+ Args:
360
+ **kwargs: keyword arguments forwarded to super.
361
+ """
362
+ super().__init__(**kwargs)
363
+ assert data_name in ["minicoco2017"] and task_name in [
364
+ "detection",
365
+ "segmentation",
366
+ "caption",
367
+ "keypoint"
368
+ ]
369
+ self.data_name = data_name
370
+ self.task_name = task_name
371
+
372
+
373
+ class PASCALDataset(datasets.GeneratorBasedBuilder):
374
+
375
+ BUILDER_CONFIGS = [
376
+ MINICOCOConfig(
377
+ name="minicoco2017_detection",
378
+ version=datasets.Version("1.0.0", ""),
379
+ description="minicoco2017 detection dataset",
380
+ data_name="minicoco2017",
381
+ task_name="detection",
382
+ ),
383
+ MINICOCOConfig(
384
+ name="minicoco2017_segmentation",
385
+ version=datasets.Version("1.0.0", ""),
386
+ description="minicoco2017 segmentation dataset",
387
+ data_name="minicoco2017",
388
+ task_name="segmentation",
389
+ ),
390
+ MINICOCOConfig(
391
+ name="minicoco2017_caption",
392
+ version=datasets.Version("1.0.0", ""),
393
+ description="minicoco2017 caption dataset",
394
+ data_name="minicoco2017",
395
+ task_name="caption",
396
+ ),
397
+ MINICOCOConfig(
398
+ name="minicoco2017_keypoint",
399
+ version=datasets.Version("1.0.0", ""),
400
+ description="minicoco2017 keypoint dataset",
401
+ data_name="minicoco2017",
402
+ task_name="keypoint",
403
+ )
404
+ ]
405
+
406
+ def _info(self):
407
+ return datasets.DatasetInfo(
408
+ description=_DESCRIPTION,
409
+ features=_DATASET_FEATURES[self.config.task_name],
410
+ # No default supervised_keys (as we have to pass both question
411
+ # and context as input).
412
+ supervised_keys=None,
413
+ homepage="https://fuliucansheng.github.io/",
414
+ citation=_CITATION,
415
+ )
416
+
417
+ def _split_generators(self, dl_manager):
418
+ downloaded_files = dl_manager.download_and_extract(_URLS[self.config.data_name])
419
+
420
+ return [
421
+ datasets.SplitGenerator(
422
+ name=datasets.Split.TRAIN,
423
+ gen_kwargs={"filepath": downloaded_files, "split": "train"},
424
+ ),
425
+ datasets.SplitGenerator(
426
+ name=datasets.Split.VALIDATION,
427
+ gen_kwargs={"filepath": downloaded_files, "split": "val"},
428
+ ),
429
+ datasets.SplitGenerator(
430
+ name=datasets.Split.TEST,
431
+ gen_kwargs={"filepath": downloaded_files, "split": "test"},
432
+ ),
433
+ ]
434
+
435
+ def _generate_examples(self, filepath, split):
436
+ """This function returns the examples in the raw (text) form."""
437
+
438
+ # filepath = os.path.join(filepath, os.listdir(filepath)[0]) # mine add
439
+
440
+ logging.info("generating examples from = %s, split = %s", filepath, split)
441
+ task_name = self.config.task_name
442
+
443
+ if task_name == "caption":
444
+ captions_path = os.path.join(filepath, "annotations", "captions_" + split + "2017.json")
445
+ anno_infos, images_infos = get_captions_annotation(captions_path)
446
+
447
+ for id_, image in enumerate(images_infos):
448
+ image_path = os.path.join(filepath, split + "2017", image["image_name"])
449
+ if not os.path.exists(image_path):
450
+ continue
451
+ example = {
452
+ "id": id_,
453
+ "image": os.path.abspath(image_path),
454
+ "height": image["height"],
455
+ "width": image["width"],
456
+ "captions": anno_infos[image['image_id']],
457
+ }
458
+ yield id_, example
459
+
460
+ elif task_name=="detection":
461
+ instances_path = os.path.join(filepath, "annotations", "instances_" + split + "2017.json")
462
+ anno_infos, images_infos = get_instances_annotation(instances_path)
463
+
464
+ for id_, image in enumerate(images_infos):
465
+ image_path = os.path.join(filepath, split + "2017", image["image_name"])
466
+ if not os.path.exists(image_path):
467
+ continue
468
+ example = {
469
+ "id": id_,
470
+ "image": os.path.abspath(image_path),
471
+ "height": image["height"],
472
+ "width": image["width"],
473
+ "objects":[
474
+ {
475
+ "bboxes": object_info["bbox"],
476
+ "classes": object_info["classes"]
477
+ }
478
+ for object_info in anno_infos[image['image_id']]
479
+ ]
480
+ }
481
+ yield id_, example
482
+
483
+ elif task_name=="segmentation":
484
+ instances_path = os.path.join(filepath, "annotations", "instances_" + split + "2017.json")
485
+ anno_infos, images_infos = get_instances_annotation(instances_path)
486
+
487
+ for id_, image in enumerate(images_infos):
488
+ image_path = os.path.join(filepath, split + "2017", image["image_name"])
489
+ if not os.path.exists(image_path):
490
+ continue
491
+ example = {
492
+ "id": id_,
493
+ "image": os.path.abspath(image_path),
494
+ "height": image["height"],
495
+ "width": image["width"],
496
+ "objects":[
497
+ {
498
+ "bboxes": object_info["bbox"],
499
+ "classes": object_info["classes"],
500
+ 'segmentation':object_info['segmentation'],
501
+ 'iscrowd':object_info['iscrowd']
502
+ }
503
+ for object_info in anno_infos[image['image_id']]
504
+ ]
505
+ }
506
+ yield id_, example
507
+
508
+ elif task_name=="keypoint":
509
+ keypoints_path = os.path.join(filepath, "annotations", "person_keypoints_" + split + "2017.json")
510
+ anno_infos, images_infos = get_keypoints_annotation(keypoints_path)
511
+
512
+ for id_, image in enumerate(images_infos):
513
+ image_path = os.path.join(filepath, split + "2017", image["image_name"])
514
+ if not os.path.exists(image_path):
515
+ continue
516
+ example = {
517
+ "id": id_,
518
+ "image": os.path.abspath(image_path),
519
+ "height": image["height"],
520
+ "width": image["width"],
521
+ "objects":[
522
+ {
523
+ "bboxes": object_info["bbox"],
524
+ "classes": object_info["classes"],
525
+ 'keypoints':object_info['keypoints'],
526
+ "num_keypoints":object_info["num_keypoints"]
527
+ }
528
+ for object_info in anno_infos[image['image_id']]
529
+ ]
530
+ }
531
+ yield id_, example
minicoco.tar.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:347d6613f2564d33029f78fb52eebdaf4036013274b4cd9daa2aa52fb0e59fcf
3
+ size 6792618279