yonatanbitton commited on
Commit
4275770
1 Parent(s): 006d305

Create flickr30k.py

Browse files
Files changed (1) hide show
  1. flickr30k.py +75 -0
flickr30k.py ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 the HuggingFace Datasets Authors.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ import os
17
+ import pandas as pd
18
+ import datasets
19
+ import json
20
+ from huggingface_hub import hf_hub_url
21
+
22
+ _INPUT_CSV = "flickr_annotations_30k.csv"
23
+ _INPUT_IMAGES = "flickr30k-images"
24
+ _REPO_ID = "nlphuji/flickr30k"
25
+ _JSON_KEYS = ['imgids', 'raw', 'sentids']
26
+
27
+ class Dataset(datasets.GeneratorBasedBuilder):
28
+ VERSION = datasets.Version("1.1.0")
29
+ BUILDER_CONFIGS = [
30
+ datasets.BuilderConfig(name="TEST", version=VERSION, description="test"),
31
+ ]
32
+
33
+ def _info(self):
34
+ return datasets.DatasetInfo(
35
+ features=datasets.Features(
36
+ {
37
+ "image": datasets.Image(),
38
+ "caption": [datasets.Value('string')],
39
+ "sentids": [datasets.Value("string")],
40
+ "imgid": datasets.Value("string"),
41
+ "split": datasets.Value("string"),
42
+ "filename": datasets.Value("string"),
43
+ 'sentids': [datasets.Value('int64')],
44
+ 'imgid': [datasets.Value('int64')],
45
+ 'tokens': [[datasets.Value('string')]],
46
+ 'sentid': [datasets.Value('int64')],
47
+ }
48
+ ),
49
+ task_templates=[],
50
+ )
51
+
52
+ def _split_generators(self, dl_manager):
53
+ """Returns SplitGenerators."""
54
+
55
+ repo_id = _REPO_ID
56
+ data_dir = dl_manager.download_and_extract({
57
+ "examples_csv": hf_hub_url(repo_id=repo_id, repo_type='dataset', filename=_INPUT_CSV),
58
+ "images_dir": hf_hub_url(repo_id=repo_id, repo_type='dataset', filename=f"{_INPUT_IMAGES}.zip")
59
+ })
60
+
61
+ return [datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs=data_dir)]
62
+
63
+
64
+ def _generate_examples(self, examples_csv, images_dir):
65
+ """Yields examples."""
66
+ df = pd.read_csv(examples_csv)
67
+ for c in _JSON_KEYS:
68
+ df[c] = df[c].apply(json.loads)
69
+
70
+ for r_idx, r in df.iterrows():
71
+ r_dict = r.to_dict()
72
+ image_path = os.path.join(images_dir, _INPUT_IMAGES, r_dict['filename'])
73
+ r_dict['image'] = image_path
74
+ r_dict['caption'] = r_dict.pop('raw')
75
+ yield r_idx, r_dict