Datasets:

Languages:
English
ArXiv:
License:
yonatanbitton commited on
Commit
3c4e300
1 Parent(s): 1ed5543

Update vasr.py

Browse files
Files changed (1) hide show
  1. vasr.py +13 -10
vasr.py CHANGED
@@ -13,10 +13,9 @@
13
  # limitations under the License.
14
  """ VASR Loading Script """
15
 
16
-
17
  import json
18
  import os
19
- import pandas as pd
20
  import datasets
21
  from huggingface_hub import hf_hub_url
22
 
@@ -60,7 +59,7 @@ class Vasr(datasets.GeneratorBasedBuilder):
60
  "A_img": [datasets.Value("string")],
61
  "B_img": [datasets.Value("string")],
62
  "C_img": [datasets.Value("string")],
63
- "candidates": [datasets.Value("string")],
64
  "label": datasets.Value("int64"),
65
  "D_img": [datasets.Value("string")],
66
  "A_verb": [datasets.Value("string")],
@@ -98,13 +97,16 @@ class Vasr(datasets.GeneratorBasedBuilder):
98
  # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
99
  data_dir = dl_manager.download_and_extract({
100
  "images_dir": hf_hub_url("datasets/nlphuji/vasr", filename="vasr_images.zip")
101
- })
102
-
103
  # return [datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs=data_dir)]
104
  return [
105
- datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"], gen_kwargs=data_dir}),
106
- datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["dev"], gen_kwargs=data_dir}),
107
- datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": downloaded_files["test"], gen_kwargs=data_dir}),
 
 
 
108
  ]
109
 
110
  # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
@@ -116,10 +118,11 @@ class Vasr(datasets.GeneratorBasedBuilder):
116
  # columns_to_serialize = ['candidates', 'associations']
117
  # for c in columns_to_serialize:
118
  # df[c] = df[c].apply(json.loads)
119
-
120
  for r_idx, r in df.iterrows():
121
  r_dict = r.to_dict()
122
  r_dict['candidates'] = json.loads(r_dict['candidates'])
123
- candidates_images = [os.path.join(images_dir, "vasr_images", f"{x}.{self.IMAGE_EXTENSION}") for x in r_dict['candidates']]
 
124
  r_dict['candidates_images'] = candidates_images
125
  yield r_idx, r_dict
 
13
  # limitations under the License.
14
  """ VASR Loading Script """
15
 
 
16
  import json
17
  import os
18
+ import pandas as pd
19
  import datasets
20
  from huggingface_hub import hf_hub_url
21
 
 
59
  "A_img": [datasets.Value("string")],
60
  "B_img": [datasets.Value("string")],
61
  "C_img": [datasets.Value("string")],
62
+ "candidates": [datasets.Value("string")],
63
  "label": datasets.Value("int64"),
64
  "D_img": [datasets.Value("string")],
65
  "A_verb": [datasets.Value("string")],
 
97
  # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
98
  data_dir = dl_manager.download_and_extract({
99
  "images_dir": hf_hub_url("datasets/nlphuji/vasr", filename="vasr_images.zip")
100
+ })
101
+
102
  # return [datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs=data_dir)]
103
  return [
104
+ datasets.SplitGenerator(name=datasets.Split.TEST,
105
+ gen_kwargs={**data_dir, **{'filepath': downloaded_files["test"]}}),
106
+ datasets.SplitGenerator(name=datasets.Split.TRAIN,
107
+ gen_kwargs={**data_dir, **{'filepath': downloaded_files["train"]}}),
108
+ datasets.SplitGenerator(name=datasets.Split.VALIDATION,
109
+ gen_kwargs={**data_dir, **{'filepath': downloaded_files["dev"]}}),
110
  ]
111
 
112
  # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
 
118
  # columns_to_serialize = ['candidates', 'associations']
119
  # for c in columns_to_serialize:
120
  # df[c] = df[c].apply(json.loads)
121
+
122
  for r_idx, r in df.iterrows():
123
  r_dict = r.to_dict()
124
  r_dict['candidates'] = json.loads(r_dict['candidates'])
125
+ candidates_images = [os.path.join(images_dir, "vasr_images", f"{x}.{self.IMAGE_EXTENSION}") for x in
126
+ r_dict['candidates']]
127
  r_dict['candidates_images'] = candidates_images
128
  yield r_idx, r_dict