VictorSanh HF staff commited on
Commit
94c1e74
1 Parent(s): ba9716e

loading script v0

Browse files
Files changed (1) hide show
  1. NoCaps.py +129 -0
NoCaps.py ADDED
@@ -0,0 +1,129 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Datasets Authors.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ """NoCaps loading script."""
15
+
16
+
17
+ import json
18
+
19
+ from collections import defaultdict
20
+ import datasets
21
+
22
+ _CITATION = """\
23
+ @inproceedings{agrawal2019nocaps,
24
+ title={nocaps: novel object captioning at scale},
25
+ author={Agrawal, Harsh and Desai, Karan and Wang, Yufei and Chen, Xinlei and Jain, Rishabh and Johnson, Mark and Batra, Dhruv and Parikh, Devi and Lee, Stefan and Anderson, Peter},
26
+ booktitle={Proceedings of the IEEE International Conference on Computer Vision},
27
+ pages={8948--8957},
28
+ year={2019}
29
+ }
30
+ """
31
+
32
+ _DESCRIPTION = """\
33
+ Dubbed NoCaps, for novel object captioning at scale, NoCaps consists of 166,100 human-generated captions describing 15,100 images from the Open Images validation and test sets.
34
+ The associated training data consists of COCO image-caption pairs, plus Open Images image-level labels and object bounding boxes.
35
+ Since Open Images contains many more classes than COCO, nearly 400 object classes seen in test images have no or very few associated training captions (hence, nocaps).
36
+ """
37
+
38
+ _HOMEPAGE = "https://nocaps.org/"
39
+
40
+ _LICENSE = "CC BY 2.0"
41
+
42
+ _URLS = {
43
+ "validation": "https://nocaps.s3.amazonaws.com/nocaps_val_4500_captions.json",
44
+ "test": "https://s3.amazonaws.com/nocaps/nocaps_test_image_info.json",
45
+ }
46
+
47
+
48
+ class NoCaps(datasets.GeneratorBasedBuilder):
49
+
50
+ VERSION = datasets.Version("1.0.0")
51
+
52
+ def _info(self):
53
+ features = datasets.Features(
54
+ {
55
+ "image": datasets.Image(),
56
+ "image_coco_url": datasets.Value("string"),
57
+ "image_date_captured": datasets.Value("string"),
58
+ "image_file_name": datasets.Value("string"),
59
+ "image_height": datasets.Value("int32"),
60
+ "image_width": datasets.Value("int32"),
61
+ "image_id": datasets.Value("int32"),
62
+ "image_license": datasets.Value("int8"),
63
+ "image_open_images_id": datasets.Value("string"),
64
+ "annotations_ids": datasets.Sequence(datasets.Value("int32")),
65
+ "annotations_captions": datasets.Sequence(datasets.Value("string")),
66
+ }
67
+ )
68
+ return datasets.DatasetInfo(
69
+ description=_DESCRIPTION,
70
+ features=features,
71
+ homepage=_HOMEPAGE,
72
+ license=_LICENSE,
73
+ citation=_CITATION,
74
+ )
75
+
76
+ def _split_generators(self, dl_manager):
77
+ data_file = dl_manager.download_and_extract(_URLS)
78
+ return [
79
+ datasets.SplitGenerator(
80
+ name=datasets.Split.VALIDATION,
81
+ gen_kwargs={
82
+ "data_file": data_file["validation"],
83
+ },
84
+ ),
85
+ datasets.SplitGenerator(
86
+ name=datasets.Split.TEST,
87
+ gen_kwargs={
88
+ "data_file": data_file["test"],
89
+ },
90
+ ),
91
+ ]
92
+
93
+ def _generate_examples(self, data_file):
94
+ with open(data_file, encoding="utf-8") as f:
95
+ data = json.load(f)
96
+
97
+ annotations = defaultdict(list)
98
+ if "annotations" in data:
99
+ # Only present for the validation split
100
+ for ann in data["annotations"]:
101
+ image_id = ann["image_id"]
102
+ caption_id = ann["id"]
103
+ caption = ann["caption"]
104
+ annotations[image_id].append((caption_id, caption))
105
+
106
+ counter = 0
107
+ for im in data["images"]:
108
+ image_coco_url = im["coco_url"]
109
+ image_date_captured = im["date_captured"]
110
+ image_file_name = im["file_name"]
111
+ image_height = im["height"]
112
+ image_width = im["width"]
113
+ image_id = im["id"]
114
+ image_license = im["license"]
115
+ image_open_images_id = im["open_images_id"]
116
+ yield counter, {
117
+ "image": image_coco_url,
118
+ "image_coco_url": image_coco_url,
119
+ "image_date_captured": image_date_captured,
120
+ "image_file_name": image_file_name,
121
+ "image_height": image_height,
122
+ "image_width": image_width,
123
+ "image_id": image_id,
124
+ "image_license": image_license,
125
+ "image_open_images_id": image_open_images_id,
126
+ "annotations_ids": [ann[0] for ann in annotations[image_id]],
127
+ "annotations_captions": [ann[1] for ann in annotations[image_id]],
128
+ }
129
+ counter += 1