yiqun commited on
Commit
373acca
1 Parent(s): 76e5a60

add loading script

Browse files
Files changed (2) hide show
  1. refer.py +345 -0
  2. referit.py +203 -0
refer.py ADDED
@@ -0,0 +1,345 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ __author__ = 'licheng'
2
+ """
3
+ Modified to make it work with Python 3+.
4
+
5
+ This interface provides access to four datasets:
6
+ 1) refclef
7
+ 2) refcoco
8
+ 3) refcoco+
9
+ 4) refcocog
10
+ split by unc and google
11
+ The following API functions are defined:
12
+ REFER - REFER api class
13
+ getRefIds - get ref ids that satisfy given filter conditions.
14
+ getAnnIds - get ann ids that satisfy given filter conditions.
15
+ getImgIds - get image ids that satisfy given filter conditions.
16
+ getCatIds - get category ids that satisfy given filter conditions.
17
+ loadRefs - load refs with the specified ref ids.
18
+ loadAnns - load anns with the specified ann ids.
19
+ loadImgs - load images with the specified image ids.
20
+ loadCats - load category names with the specified category ids.
21
+ getRefBox - get ref's bounding box [x, y, w, h] given the ref_id
22
+ showRef - show image, segmentation or box of the referred object with the ref
23
+ getMask - get mask and area of the referred object given ref
24
+ showMask - show mask of the referred object given ref
25
+ """
26
+
27
+ import itertools
28
+ import json
29
+ import os.path as osp
30
+ import pickle
31
+ import sys
32
+ import time
33
+ from pprint import pprint
34
+
35
+ import matplotlib.pyplot as plt
36
+ import numpy as np
37
+ import skimage.io as io
38
+ from matplotlib.collections import PatchCollection
39
+ from matplotlib.patches import Polygon, Rectangle
40
+ from pycocotools import mask
41
+
42
+
43
+ class REFER:
44
+ def __init__(self, data_root, dataset='refcoco', splitBy='unc'):
45
+ # provide data_root folder which contains refclef, refcoco, refcoco+
46
+ # and refcocog also provide dataset name and splitBy information
47
+ # e.g., dataset = 'refcoco', splitBy = 'unc'
48
+ print('loading dataset %s into memory...' % dataset)
49
+ self.ROOT_DIR = osp.abspath(osp.dirname(__file__))
50
+ self.DATA_DIR = osp.join(data_root, dataset)
51
+ if dataset in ['refcoco', 'refcoco+', 'refcocog']:
52
+ self.IMAGE_DIR = osp.join(data_root, 'images/train2014')
53
+ elif dataset == 'refclef':
54
+ self.IMAGE_DIR = osp.join(data_root, 'images/saiapr_tc-12')
55
+ else:
56
+ print('No refer dataset is called [%s]' % dataset)
57
+ sys.exit()
58
+
59
+ # load refs from data/dataset/refs(dataset).json
60
+ tic = time.time()
61
+ ref_file = osp.join(self.DATA_DIR, 'refs(' + splitBy + ').p')
62
+ self.data = {}
63
+ self.data['dataset'] = dataset
64
+
65
+ self.data['refs'] = pickle.load(open(ref_file, 'rb'), fix_imports=True)
66
+
67
+ # load annotations from data/dataset/instances.json
68
+ instances_file = osp.join(self.DATA_DIR, 'instances.json')
69
+ instances = json.load(open(instances_file, 'r'))
70
+ self.data['images'] = instances['images']
71
+ self.data['annotations'] = instances['annotations']
72
+ self.data['categories'] = instances['categories']
73
+
74
+ # create index
75
+ self.createIndex()
76
+ print('DONE (t=%.2fs)' % (time.time() - tic))
77
+
78
+ def createIndex(self):
79
+ # create sets of mapping
80
+ # 1) Refs: {ref_id: ref}
81
+ # 2) Anns: {ann_id: ann}
82
+ # 3) Imgs: {image_id: image}
83
+ # 4) Cats: {category_id: category_name}
84
+ # 5) Sents: {sent_id: sent}
85
+ # 6) imgToRefs: {image_id: refs}
86
+ # 7) imgToAnns: {image_id: anns}
87
+ # 8) refToAnn: {ref_id: ann}
88
+ # 9) annToRef: {ann_id: ref}
89
+ # 10) catToRefs: {category_id: refs}
90
+ # 11) sentToRef: {sent_id: ref}
91
+ # 12) sentToTokens: {sent_id: tokens}
92
+ print('creating index...')
93
+ # fetch info from instances
94
+ Anns, Imgs, Cats, imgToAnns = {}, {}, {}, {}
95
+ for ann in self.data['annotations']:
96
+ Anns[ann['id']] = ann
97
+ imgToAnns[ann['image_id']] = imgToAnns.get(ann['image_id'],
98
+ []) + [ann]
99
+ for img in self.data['images']:
100
+ Imgs[img['id']] = img
101
+ for cat in self.data['categories']:
102
+ Cats[cat['id']] = cat['name']
103
+
104
+ # fetch info from refs
105
+ Refs, imgToRefs, refToAnn, annToRef, catToRefs = {}, {}, {}, {}, {}
106
+ Sents, sentToRef, sentToTokens = {}, {}, {}
107
+ for ref in self.data['refs']:
108
+ # ids
109
+ ref_id = ref['ref_id']
110
+ ann_id = ref['ann_id']
111
+ category_id = ref['category_id']
112
+ image_id = ref['image_id']
113
+
114
+ # add mapping related to ref
115
+ Refs[ref_id] = ref
116
+ imgToRefs[image_id] = imgToRefs.get(image_id, []) + [ref]
117
+ catToRefs[category_id] = catToRefs.get(category_id, []) + [ref]
118
+ refToAnn[ref_id] = Anns[ann_id]
119
+ annToRef[ann_id] = ref
120
+
121
+ # add mapping of sent
122
+ for sent in ref['sentences']:
123
+ Sents[sent['sent_id']] = sent
124
+ sentToRef[sent['sent_id']] = ref
125
+ sentToTokens[sent['sent_id']] = sent['tokens']
126
+
127
+ # create class members
128
+ self.Refs = Refs
129
+ self.Anns = Anns
130
+ self.Imgs = Imgs
131
+ self.Cats = Cats
132
+ self.Sents = Sents
133
+ self.imgToRefs = imgToRefs
134
+ self.imgToAnns = imgToAnns
135
+ self.refToAnn = refToAnn
136
+ self.annToRef = annToRef
137
+ self.catToRefs = catToRefs
138
+ self.sentToRef = sentToRef
139
+ self.sentToTokens = sentToTokens
140
+ print('index created.')
141
+
142
+ def getRefIds(self, image_ids=[], cat_ids=[], ref_ids=[], split=''):
143
+ image_ids = image_ids if isinstance(image_ids, list) else [image_ids]
144
+ cat_ids = cat_ids if isinstance(cat_ids, list) else [cat_ids]
145
+ ref_ids = ref_ids if isinstance(ref_ids, list) else [ref_ids]
146
+
147
+ if len(image_ids) == len(cat_ids) == len(ref_ids) == len(split) == 0:
148
+ refs = self.data['refs']
149
+ else:
150
+ if not len(image_ids) == 0:
151
+ refs = [self.imgToRefs[image_id] for image_id in image_ids]
152
+ refs = list(itertools.chain.from_iterable(refs))
153
+ else:
154
+ refs = self.data['refs']
155
+ if not len(cat_ids) == 0:
156
+ refs = [ref for ref in refs if ref['category_id'] in cat_ids]
157
+ if not len(ref_ids) == 0:
158
+ refs = [ref for ref in refs if ref['ref_id'] in ref_ids]
159
+ if not len(split) == 0:
160
+ if split in ['testA', 'testB', 'testC']:
161
+ refs = [ref for ref in refs if split[-1] in ref['split']
162
+ ] # we also consider testAB, testBC, ...
163
+ elif split in ['testAB', 'testBC', 'testAC']:
164
+ refs = [ref for ref in refs
165
+ if ref['split'] == split] # rarely used I guess...
166
+ elif split == 'test':
167
+ refs = [ref for ref in refs if 'test' in ref['split']]
168
+ elif split == 'train' or split == 'val':
169
+ refs = [ref for ref in refs if ref['split'] == split]
170
+ else:
171
+ print('No such split [%s]' % split)
172
+ sys.exit()
173
+ ref_ids = [ref['ref_id'] for ref in refs]
174
+ return ref_ids
175
+
176
+ def getAnnIds(self, image_ids=[], cat_ids=[], ref_ids=[]):
177
+ image_ids = image_ids if isinstance(image_ids, list) else [image_ids]
178
+ cat_ids = cat_ids if isinstance(cat_ids, list) else [cat_ids]
179
+ ref_ids = ref_ids if isinstance(ref_ids, list) else [ref_ids]
180
+
181
+ if len(image_ids) == len(cat_ids) == len(ref_ids) == 0:
182
+ ann_ids = [ann['id'] for ann in self.data['annotations']]
183
+ else:
184
+ if not len(image_ids) == 0:
185
+ lists = [
186
+ self.imgToAnns[image_id] for image_id in image_ids
187
+ if image_id in self.imgToAnns
188
+ ] # list of [anns]
189
+ anns = list(itertools.chain.from_iterable(lists))
190
+ else:
191
+ anns = self.data['annotations']
192
+ if not len(cat_ids) == 0:
193
+ anns = [ann for ann in anns if ann['category_id'] in cat_ids]
194
+ ann_ids = [ann['id'] for ann in anns]
195
+ if not len(ref_ids) == 0:
196
+ ids = set(ann_ids).intersection( # noqa
197
+ set([self.Refs[ref_id]['ann_id'] for ref_id in ref_ids]))
198
+ return ann_ids
199
+
200
+ def getImgIds(self, ref_ids=[]):
201
+ ref_ids = ref_ids if isinstance(ref_ids, list) else [ref_ids]
202
+
203
+ if not len(ref_ids) == 0:
204
+ image_ids = list(
205
+ set([self.Refs[ref_id]['image_id'] for ref_id in ref_ids]))
206
+ else:
207
+ image_ids = self.Imgs.keys()
208
+ return image_ids
209
+
210
+ def getCatIds(self):
211
+ return self.Cats.keys()
212
+
213
+ def loadRefs(self, ref_ids=[]):
214
+ if isinstance(ref_ids, list):
215
+ return [self.Refs[ref_id] for ref_id in ref_ids]
216
+ elif isinstance(ref_ids, int):
217
+ return [self.Refs[ref_ids]]
218
+
219
+ def loadAnns(self, ann_ids=[]):
220
+ if isinstance(ann_ids, list):
221
+ return [self.Anns[ann_id] for ann_id in ann_ids]
222
+ elif isinstance(ann_ids, int) or isinstance(ann_ids, str):
223
+ return [self.Anns[ann_ids]]
224
+
225
+ def loadImgs(self, image_ids=[]):
226
+ if isinstance(image_ids, list):
227
+ return [self.Imgs[image_id] for image_id in image_ids]
228
+ elif isinstance(image_ids, int):
229
+ return [self.Imgs[image_ids]]
230
+
231
+ def loadCats(self, cat_ids=[]):
232
+ if isinstance(cat_ids, list):
233
+ return [self.Cats[cat_id] for cat_id in cat_ids]
234
+ elif isinstance(cat_ids, int):
235
+ return [self.Cats[cat_ids]]
236
+
237
+ def getRefBox(self, ref_id):
238
+ ref = self.Refs[ref_id] # noqa
239
+ ann = self.refToAnn[ref_id]
240
+ return ann['bbox'] # [x, y, w, h]
241
+
242
+ def showRef(self, ref, seg_box='seg'):
243
+ ax = plt.gca()
244
+ # show image
245
+ image = self.Imgs[ref['image_id']]
246
+ _img = io.imread(osp.join(self.IMAGE_DIR, image['file_name']))
247
+ ax.imshow(_img)
248
+ # show refer expression
249
+ for sid, sent in enumerate(ref['sentences']):
250
+ print('%s. %s' % (sid + 1, sent['sent']))
251
+ # show segmentations
252
+ if seg_box == 'seg':
253
+ ann_id = ref['ann_id']
254
+ ann = self.Anns[ann_id]
255
+ polygons = []
256
+ color = []
257
+ c = 'none'
258
+ if isinstance(ann['segmentation'][0], list):
259
+ # polygon used for refcoco*
260
+ for seg in ann['segmentation']:
261
+ poly = np.array(seg).reshape((len(seg) / 2, 2))
262
+ polygons.append(Polygon(poly, True, alpha=0.4))
263
+ color.append(c)
264
+ p = PatchCollection(polygons,
265
+ facecolors=color,
266
+ edgecolors=(1, 1, 0, 0),
267
+ linewidths=3,
268
+ alpha=1)
269
+ ax.add_collection(p) # thick yellow polygon
270
+ p = PatchCollection(polygons,
271
+ facecolors=color,
272
+ edgecolors=(1, 0, 0, 0),
273
+ linewidths=1,
274
+ alpha=1)
275
+ ax.add_collection(p) # thin red polygon
276
+ else:
277
+ # mask used for refclef
278
+ rle = ann['segmentation']
279
+ m = mask.decode(rle)
280
+ img = np.ones((m.shape[0], m.shape[1], 3))
281
+ color_mask = np.array([2.0, 166.0, 101.0]) / 255
282
+ for i in range(3):
283
+ img[:, :, i] = color_mask[i]
284
+ ax.imshow(np.dstack((img, m * 0.5)))
285
+ # show bounding-box
286
+ elif seg_box == 'box':
287
+ ann_id = ref['ann_id']
288
+ ann = self.Anns[ann_id]
289
+ bbox = self.getRefBox(ref['ref_id'])
290
+ box_plot = Rectangle((bbox[0], bbox[1]),
291
+ bbox[2],
292
+ bbox[3],
293
+ fill=False,
294
+ edgecolor='green',
295
+ linewidth=3)
296
+ ax.add_patch(box_plot)
297
+
298
+ def getMask(self, ref):
299
+ # return mask, area and mask-center
300
+ ann = self.refToAnn[ref['ref_id']]
301
+ image = self.Imgs[ref['image_id']]
302
+ if isinstance(ann['segmentation'][0], list): # polygon
303
+ rle = mask.frPyObjects(ann['segmentation'], image['height'],
304
+ image['width'])
305
+ else:
306
+ rle = ann['segmentation']
307
+
308
+ # for i in range(len(rle['counts'])):
309
+ # print(rle)
310
+ m = mask.decode(rle)
311
+ # sometimes there are multiple binary map (multiple segs)
312
+ m = np.sum(m, axis=2)
313
+ m = m.astype(np.uint8) # convert to np.uint8
314
+ # compute area
315
+ area = sum(mask.area(rle)) # should be close to ann['area']
316
+ return {'mask': m, 'area': area}
317
+
318
+ def showMask(self, ref):
319
+ M = self.getMask(ref)
320
+ msk = M['mask']
321
+ ax = plt.gca()
322
+ ax.imshow(msk)
323
+
324
+
325
+ if __name__ == '__main__':
326
+ refer = REFER(dataset='refcocog', splitBy='google')
327
+ ref_ids = refer.getRefIds()
328
+ print(len(ref_ids))
329
+
330
+ print(len(refer.Imgs))
331
+ print(len(refer.imgToRefs))
332
+
333
+ ref_ids = refer.getRefIds(split='train')
334
+ print('There are %s training referred objects.' % len(ref_ids))
335
+
336
+ for ref_id in ref_ids:
337
+ ref = refer.loadRefs(ref_id)[0]
338
+ if len(ref['sentences']) < 2:
339
+ continue
340
+
341
+ pprint(ref)
342
+ print('The label is %s.' % refer.Cats[ref['category_id']])
343
+ plt.figure()
344
+ refer.showRef(ref, seg_box='box')
345
+ plt.show()
referit.py ADDED
@@ -0,0 +1,203 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset
2
+ # script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ # TODO: Address all TODOs and remove all explanatory comments
16
+ """TODO: Add a description here."""
17
+
18
+
19
+ import os.path as osp
20
+
21
+ import datasets
22
+ from .refer import REFER
23
+
24
+
25
+ # TODO: Add BibTeX citation
26
+ # Find for instance the citation on arxiv or on the dataset repo/website
27
+ _CITATION = """\
28
+ @InProceedings{huggingface:dataset,
29
+ title = {A great new dataset},
30
+ author={huggingface, Inc.
31
+ },
32
+ year={2020}
33
+ }
34
+ """
35
+
36
+ # TODO: Add description of the dataset here
37
+ # You can copy an official description
38
+ _DESCRIPTION = """\
39
+ This RefCOCO dataset is designed to load refcoco, refcoco+, and refcocog.
40
+ """
41
+
42
+ # TODO: Add a link to an official homepage for the dataset here
43
+ _HOMEPAGE = ""
44
+
45
+ # TODO: Add the licence for the dataset here if you can find it
46
+ _LICENSE = ""
47
+
48
+ # TODO: Add link to the official dataset URLs here
49
+ # The HuggingFace Datasets library doesn't host the datasets but only points
50
+ # to the original files.
51
+ # This can be an arbitrary nested dict/list of URLs
52
+ # (see below in `_split_generators` method)
53
+ _URLS = {}
54
+
55
+
56
+ VALID_SPLIT_NAMES = ("train", "val", "testA", "testB")
57
+
58
+
59
+ class ReferitBuilderConfig(datasets.BuilderConfig):
60
+
61
+ def __init__(self, name: str, split_by: str, **kwargs):
62
+ super().__init__(name, **kwargs)
63
+ self.split_by = split_by
64
+
65
+
66
+ # TODO: Name of the dataset usually matches the script name with CamelCase
67
+ # instead of snake_case
68
+ class ReferitDataset(datasets.GeneratorBasedBuilder):
69
+ """TODO: Short description of my dataset."""
70
+
71
+ VERSION = datasets.Version("0.0.1")
72
+
73
+ # This is an example of a dataset with multiple configurations.
74
+ # If you don't want/need to define several sub-sets in your dataset,
75
+ # just remove the BUILDER_CONFIG_CLASS and the BUILDER_CONFIGS attributes.
76
+
77
+ # If you need to make complex sub-parts in the datasets with configurable
78
+ # options
79
+ # You can create your own builder configuration class to store attribute,
80
+ # inheriting from datasets.BuilderConfig
81
+ BUILDER_CONFIG_CLASS = ReferitBuilderConfig
82
+
83
+ # You will be able to load one or the other configurations
84
+ # in the following list with
85
+ # data = datasets.load_dataset('my_dataset', 'first_domain')
86
+ # data = datasets.load_dataset('my_dataset', 'second_domain')
87
+ BUILDER_CONFIGS = [
88
+ # refcoco
89
+ ReferitBuilderConfig(
90
+ name="refcoco", split_by="unc",
91
+ version=VERSION, description="refcoco."),
92
+ # refcoco+
93
+ ReferitBuilderConfig(
94
+ name="refcoco+", split_by="unc",
95
+ version=VERSION, description="refcoco+"),
96
+ # refcocog
97
+ ReferitBuilderConfig(
98
+ name="refcocog", split_by="umd",
99
+ version=VERSION, description="refcocog"),
100
+ ]
101
+
102
+ # It's not mandatory to have a default configuration.
103
+ # Just use one if it make sense.
104
+ DEFAULT_CONFIG_NAME = "refcoco"
105
+
106
+ def _info(self):
107
+ self.config: ReferitBuilderConfig
108
+ features = datasets.Features(
109
+ {
110
+ "ref_id": datasets.Value("int32"),
111
+ "img_id": datasets.Value("int32"),
112
+ "ann_id": datasets.Value("int32"),
113
+ "file_name": datasets.Value("string"),
114
+ "image_path": datasets.Value("string"),
115
+ "height": datasets.Value("int32"),
116
+ "width": datasets.Value("int32"),
117
+ "coco_url": datasets.Value("string"),
118
+ "sentences": [datasets.Value("string")],
119
+ "segmentation": [[[datasets.Value("float")]]],
120
+ "bbox": [[datasets.Value("float")]],
121
+ "area": datasets.Value("float"),
122
+ "iscrowd": datasets.Value("int32"),
123
+ "category_id": datasets.Value("int32"),
124
+ }
125
+ )
126
+ return datasets.DatasetInfo(
127
+ # This is the description that will appear on the datasets page.
128
+ description=_DESCRIPTION,
129
+ # This defines the different columns of the dataset and their types
130
+ features=features,
131
+ # If there's a common (input, target) tuple from the features,
132
+ # uncomment supervised_keys line below and specify them.
133
+ # They'll be used if as_supervised=True in builder.as_dataset.
134
+ # supervised_keys=("sentence", "label"),
135
+ # Homepage of the dataset for documentation
136
+ homepage=_HOMEPAGE,
137
+ # License for the dataset if available
138
+ license=_LICENSE,
139
+ # Citation for the dataset
140
+ citation=_CITATION,
141
+ )
142
+
143
+ def _split_generators(self, dl_manager):
144
+ # TODO: This method is tasked with downloading/extracting the data and
145
+ # defining the splits depending on the configuration
146
+ # If several configurations are possible (listed in BUILDER_CONFIGS),
147
+ # the configuration selected by the user is in self.config.name
148
+
149
+ # dl_manager is a datasets.download.DownloadManager that can be used
150
+ # to download and extract URLS. It can accept any type
151
+ # or nested list/dict and will give back the same structure with
152
+ # the url replaced with path to local files.
153
+ # By default the archives will be extracted and a path to a cached
154
+ # folder where they are extracted is returned instead of the archive
155
+ # urls = _URLS[self.config.name]
156
+ # data_dir = dl_manager.download_and_extract(urls)
157
+ splits = []
158
+ for split in ("train", "val", "test", "testA", "testB"):
159
+ splits.append(datasets.SplitGenerator(
160
+ name=datasets.NamedSplit(split),
161
+ gen_kwargs={
162
+ "split": split,
163
+ },
164
+ ))
165
+ return splits
166
+
167
+ # method parameters are unpacked from `gen_kwargs` as given in
168
+ # `_split_generators`
169
+ def _generate_examples(self, split: str):
170
+ # TODO: This method handles input defined in _split_generators to
171
+ # yield (key, example) tuples from the dataset.
172
+ # The `key` is for legacy reasons (tfds) and is not important
173
+ # in itself, but must be unique for each example.
174
+ refer = REFER(data_root=self.config.data_dir,
175
+ dataset=self.config.name,
176
+ splitBy=self.config.split_by)
177
+ ref_ids = refer.getRefIds(split=split)
178
+ for ref_id in ref_ids:
179
+ ref = refer.loadRefs(ref_id)[0]
180
+ ann_id = ref['ann_id']
181
+ ann = refer.loadAnns(ann_id)[0]
182
+ img_id = ann['image_id']
183
+ img = refer.loadImgs(img_id)[0]
184
+ file_name = img['file_name']
185
+ image_path = osp.join(
186
+ self.config.data_dir, "images", "train2014", file_name)
187
+ descriptions = [r['raw'] for r in ref['sentences']]
188
+ yield ref_id, {
189
+ "ref_id": ref_id,
190
+ "img_id": img_id,
191
+ "ann_id": ann_id,
192
+ "file_name": file_name,
193
+ "image_path": image_path,
194
+ "height": img['height'],
195
+ "width": img['width'],
196
+ "coco_url": img['coco_url'],
197
+ "sentences": descriptions,
198
+ "segmentation": [ann['segmentation']],
199
+ "bbox": [ann['bbox']],
200
+ "area": ann['area'],
201
+ "iscrowd": ann['iscrowd'],
202
+ "category_id": ann['category_id'],
203
+ }