ydshieh commited on
Commit
7dd6183
1 Parent(s): b920f1e
Files changed (2) hide show
  1. coco_dataset.py +0 -215
  2. dummy_coco_dataset.py +15 -6
coco_dataset.py DELETED
@@ -1,215 +0,0 @@
1
- import json
2
- import os
3
- import datasets
4
-
5
-
6
- class COCOBuilderConfig(datasets.BuilderConfig):
7
-
8
- def __init__(self, name, splits, **kwargs):
9
- super().__init__(name, **kwargs)
10
- self.splits = splits
11
-
12
-
13
- # Add BibTeX citation
14
- # Find for instance the citation on arxiv or on the dataset repo/website
15
- _CITATION = """\
16
- @article{DBLP:journals/corr/LinMBHPRDZ14,
17
- author = {Tsung{-}Yi Lin and
18
- Michael Maire and
19
- Serge J. Belongie and
20
- Lubomir D. Bourdev and
21
- Ross B. Girshick and
22
- James Hays and
23
- Pietro Perona and
24
- Deva Ramanan and
25
- Piotr Doll{'{a} }r and
26
- C. Lawrence Zitnick},
27
- title = {Microsoft {COCO:} Common Objects in Context},
28
- journal = {CoRR},
29
- volume = {abs/1405.0312},
30
- year = {2014},
31
- url = {http://arxiv.org/abs/1405.0312},
32
- archivePrefix = {arXiv},
33
- eprint = {1405.0312},
34
- timestamp = {Mon, 13 Aug 2018 16:48:13 +0200},
35
- biburl = {https://dblp.org/rec/bib/journals/corr/LinMBHPRDZ14},
36
- bibsource = {dblp computer science bibliography, https://dblp.org}
37
- }
38
- """
39
-
40
- # Add description of the dataset here
41
- # You can copy an official description
42
- _DESCRIPTION = """\
43
- COCO is a large-scale object detection, segmentation, and captioning dataset.
44
- """
45
-
46
- # Add a link to an official homepage for the dataset here
47
- _HOMEPAGE = "http://cocodataset.org/#home"
48
-
49
- # Add the licence for the dataset here if you can find it
50
- _LICENSE = ""
51
-
52
- # Add link to the official dataset URLs here
53
- # The HuggingFace dataset library don't host the datasets but only point to the original files
54
- # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
55
-
56
- # This script is supposed to work with local (downloaded) COCO dataset.
57
- _URLs = {}
58
-
59
-
60
- # Name of the dataset usually match the script name with CamelCase instead of snake_case
61
- class COCODataset(datasets.GeneratorBasedBuilder):
62
- """An example dataset script to work with the local (downloaded) COCO dataset"""
63
-
64
- VERSION = datasets.Version("0.0.0")
65
-
66
- BUILDER_CONFIG_CLASS = COCOBuilderConfig
67
- BUILDER_CONFIGS = [
68
- COCOBuilderConfig(name='2017', splits=['train', 'valid', 'test']),
69
- ]
70
- DEFAULT_CONFIG_NAME = "2017"
71
-
72
- def _info(self):
73
- # This method specifies the datasets.DatasetInfo object which contains informations and typings for the dataset
74
-
75
- feature_dict = {
76
- "image_id": datasets.Value("int64"),
77
- "caption_id": datasets.Value("int64"),
78
- "caption": datasets.Value("string"),
79
- "height": datasets.Value("int64"),
80
- "width": datasets.Value("int64"),
81
- "file_name": datasets.Value("string"),
82
- "coco_url": datasets.Value("string"),
83
- "image_path": datasets.Value("string"),
84
- }
85
-
86
- features = datasets.Features(feature_dict)
87
-
88
- return datasets.DatasetInfo(
89
- # This is the description that will appear on the datasets page.
90
- description=_DESCRIPTION,
91
- # This defines the different columns of the dataset and their types
92
- features=features, # Here we define them above because they are different between the two configurations
93
- # If there's a common (input, target) tuple from the features,
94
- # specify them here. They'll be used if as_supervised=True in
95
- # builder.as_dataset.
96
- supervised_keys=None,
97
- # Homepage of the dataset for documentation
98
- homepage=_HOMEPAGE,
99
- # License for the dataset if available
100
- license=_LICENSE,
101
- # Citation for the dataset
102
- citation=_CITATION,
103
- )
104
-
105
- def _split_generators(self, dl_manager):
106
- """Returns SplitGenerators."""
107
- # This method is tasked with downloading/extracting the data and defining the splits depending on the configuration
108
- # If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name
109
-
110
- data_dir = self.config.data_dir
111
- if not data_dir:
112
- raise ValueError(
113
- "This script is supposed to work with local (downloaded) COCO dataset. The argument `data_dir` in `load_dataset()` is required."
114
- )
115
-
116
- _DL_URLS = {
117
- "train": os.path.join(data_dir, "train2017.zip"),
118
- "val": os.path.join(data_dir, "val2017.zip"),
119
- "test": os.path.join(data_dir, "test2017.zip"),
120
- "annotations_trainval": os.path.join(data_dir, "annotations_trainval2017.zip"),
121
- "image_info_test": os.path.join(data_dir, "image_info_test2017.zip"),
122
- }
123
- archive_path = dl_manager.download_and_extract(_DL_URLS)
124
-
125
- splits = []
126
- for split in self.config.splits:
127
- if split == 'train':
128
- dataset = datasets.SplitGenerator(
129
- name=datasets.Split.TRAIN,
130
- # These kwargs will be passed to _generate_examples
131
- gen_kwargs={
132
- "json_path": os.path.join(archive_path["annotations_trainval"], "annotations", "captions_train2017.json"),
133
- "image_dir": os.path.join(archive_path["train"], "train2017"),
134
- "split": "train",
135
- }
136
- )
137
- elif split in ['val', 'valid', 'validation', 'dev']:
138
- dataset = datasets.SplitGenerator(
139
- name=datasets.Split.VALIDATION,
140
- # These kwargs will be passed to _generate_examples
141
- gen_kwargs={
142
- "json_path": os.path.join(archive_path["annotations_trainval"], "annotations", "captions_val2017.json"),
143
- "image_dir": os.path.join(archive_path["val"], "val2017"),
144
- "split": "valid",
145
- },
146
- )
147
- elif split == 'test':
148
- dataset = datasets.SplitGenerator(
149
- name=datasets.Split.TEST,
150
- # These kwargs will be passed to _generate_examples
151
- gen_kwargs={
152
- "json_path": os.path.join(archive_path["image_info_test"], "annotations", "image_info_test2017.json"),
153
- "image_dir": os.path.join(archive_path["test"], "test2017"),
154
- "split": "test",
155
- },
156
- )
157
- else:
158
- continue
159
-
160
- splits.append(dataset)
161
-
162
- return splits
163
-
164
- def _generate_examples(
165
- # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
166
- self, json_path, image_dir, split
167
- ):
168
- """ Yields examples as (key, example) tuples. """
169
- # This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
170
- # The `key` is here for legacy reason (tfds) and is not important in itself.
171
-
172
- _features = ["image_id", "caption_id", "caption", "height", "width", "file_name", "coco_url", "image_path", "id"]
173
- features = list(_features)
174
-
175
- if split in "valid":
176
- split = "val"
177
-
178
- with open(json_path, 'r', encoding='UTF-8') as fp:
179
- data = json.load(fp)
180
-
181
- # list of dict
182
- images = data["images"]
183
- entries = images
184
-
185
- # build a dict of image_id -> image info dict
186
- d = {image["id"]: image for image in images}
187
-
188
- # list of dict
189
- if split in ["train", "val"]:
190
- annotations = data["annotations"]
191
-
192
- # build a dict of image_id ->
193
- for annotation in annotations:
194
- _id = annotation["id"]
195
- image_info = d[annotation["image_id"]]
196
- annotation.update(image_info)
197
- annotation["id"] = _id
198
-
199
- entries = annotations
200
-
201
- for id_, entry in enumerate(entries):
202
-
203
- entry = {k: v for k, v in entry.items() if k in features}
204
-
205
- if split == "test":
206
- entry["image_id"] = entry["id"]
207
- entry["id"] = -1
208
- entry["caption"] = -1
209
-
210
- entry["caption_id"] = entry.pop("id")
211
- entry["image_path"] = os.path.join(image_dir, entry["file_name"])
212
-
213
- entry = {k: entry[k] for k in _features if k in entry}
214
-
215
- yield str((entry["image_id"], entry["caption_id"])), entry
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
dummy_coco_dataset.py CHANGED
@@ -113,6 +113,15 @@ class COCODataset(datasets.GeneratorBasedBuilder):
113
  "This script is supposed to work with local (downloaded) COCO dataset. The argument `data_dir` in `load_dataset()` is required."
114
  )
115
 
 
 
 
 
 
 
 
 
 
116
  splits = []
117
  for split in self.config.splits:
118
  if split == 'train':
@@ -120,8 +129,8 @@ class COCODataset(datasets.GeneratorBasedBuilder):
120
  name=datasets.Split.TRAIN,
121
  # These kwargs will be passed to _generate_examples
122
  gen_kwargs={
123
- "json_path": os.path.join(data_dir, f"captions_train{self.config.name}.json"),
124
- "image_dir": os.path.join(data_dir, f'train{self.config.name}'),
125
  "split": "train",
126
  }
127
  )
@@ -130,8 +139,8 @@ class COCODataset(datasets.GeneratorBasedBuilder):
130
  name=datasets.Split.VALIDATION,
131
  # These kwargs will be passed to _generate_examples
132
  gen_kwargs={
133
- "json_path": os.path.join(data_dir, f"captions_val{self.config.name}.json"),
134
- "image_dir": os.path.join(data_dir, f'val{self.config.name}'),
135
  "split": "valid",
136
  },
137
  )
@@ -140,8 +149,8 @@ class COCODataset(datasets.GeneratorBasedBuilder):
140
  name=datasets.Split.TEST,
141
  # These kwargs will be passed to _generate_examples
142
  gen_kwargs={
143
- "json_path": os.path.join(data_dir, f'image_info_test{self.config.name}.json'),
144
- "image_dir": os.path.join(data_dir, f'test{self.config.name}'),
145
  "split": "test",
146
  },
147
  )
 
113
  "This script is supposed to work with local (downloaded) COCO dataset. The argument `data_dir` in `load_dataset()` is required."
114
  )
115
 
116
+ _DL_URLS = {
117
+ "train": os.path.join(data_dir, "train2017.zip"),
118
+ "val": os.path.join(data_dir, "val2017.zip"),
119
+ "test": os.path.join(data_dir, "test2017.zip"),
120
+ "annotations_trainval": os.path.join(data_dir, "annotations_trainval2017.zip"),
121
+ "image_info_test": os.path.join(data_dir, "image_info_test2017.zip"),
122
+ }
123
+ archive_path = dl_manager.download_and_extract(_DL_URLS)
124
+
125
  splits = []
126
  for split in self.config.splits:
127
  if split == 'train':
 
129
  name=datasets.Split.TRAIN,
130
  # These kwargs will be passed to _generate_examples
131
  gen_kwargs={
132
+ "json_path": os.path.join(archive_path["annotations_trainval"], "annotations", "captions_train2017.json"),
133
+ "image_dir": os.path.join(archive_path["train"], "train2017"),
134
  "split": "train",
135
  }
136
  )
 
139
  name=datasets.Split.VALIDATION,
140
  # These kwargs will be passed to _generate_examples
141
  gen_kwargs={
142
+ "json_path": os.path.join(archive_path["annotations_trainval"], "annotations", "captions_val2017.json"),
143
+ "image_dir": os.path.join(archive_path["val"], "val2017"),
144
  "split": "valid",
145
  },
146
  )
 
149
  name=datasets.Split.TEST,
150
  # These kwargs will be passed to _generate_examples
151
  gen_kwargs={
152
+ "json_path": os.path.join(archive_path["image_info_test"], "annotations", "image_info_test2017.json"),
153
+ "image_dir": os.path.join(archive_path["test"], "test2017"),
154
  "split": "test",
155
  },
156
  )