albertvillanova HF staff commited on
Commit
5370727
1 Parent(s): fb1f36b

Move ans2label and id2feature loading to _generate_examples

Browse files
Files changed (1) hide show
  1. gqa-lxmert.py +56 -34
gqa-lxmert.py CHANGED
@@ -91,52 +91,49 @@ class GqaLxmert(datasets.GeneratorBasedBuilder):
91
  def _split_generators(self, dl_manager):
92
  """Returns SplitGenerators."""
93
  dl_dir = dl_manager.download_and_extract(_URLS)
94
- self.ans2label = json.load(open(dl_dir["ans2label"]))
95
- self.trainval_id2features = self._load_features(os.path.join(dl_dir["trainval_feat"], TRAINVAL_FEAT_PATH))
96
- self.test_id2features = self._load_features(os.path.join(dl_dir["test_feat"], TEST_FEAT_PATH))
97
-
98
  return [
99
  datasets.SplitGenerator(
100
  name=datasets.Split.TRAIN,
101
- gen_kwargs={"filepath": dl_dir["train"], "testset": False},
 
 
 
 
 
102
  ),
103
  datasets.SplitGenerator(
104
  name=datasets.Split.VALIDATION,
105
- gen_kwargs={"filepath": dl_dir["valid"], "testset": False},
 
 
 
 
 
106
  ),
107
  datasets.SplitGenerator(
108
  name=datasets.Split.TEST,
109
- gen_kwargs={"filepath": dl_dir["testdev"], "testset": True},
 
 
 
 
 
110
  ),
111
  ]
112
 
113
- def _load_features(self, filepath):
114
- """Returns a dictionary mapping an image id to the corresponding image's objects features."""
115
- id2features = {}
116
- with open(filepath) as f:
117
- reader = csv.DictReader(f, FIELDNAMES, delimiter="\t")
118
- for i, item in enumerate(reader):
119
- features = {}
120
- img_h = int(item["img_h"])
121
- img_w = int(item["img_w"])
122
- num_boxes = int(item["num_boxes"])
123
- features["features"] = np.frombuffer(base64.b64decode(item["features"]), dtype=np.float32).reshape(
124
- (num_boxes, -1)
125
- )
126
- boxes = np.frombuffer(base64.b64decode(item["boxes"]), dtype=np.float32).reshape((num_boxes, 4))
127
- features["normalized_boxes"] = self._normalize_boxes(boxes, img_h, img_w)
128
- id2features[item["img_id"]] = features
129
- return id2features
130
-
131
- def _normalize_boxes(self, boxes, img_h, img_w):
132
- """ Normalizes the input boxes given the original image size."""
133
- normalized_boxes = boxes.copy()
134
- normalized_boxes[:, (0, 2)] /= img_w
135
- normalized_boxes[:, (1, 3)] /= img_h
136
- return normalized_boxes
137
-
138
- def _generate_examples(self, filepath, testset=False):
139
  """ Yields examples as (key, example) tuples."""
 
 
 
 
 
 
 
 
 
140
  with open(filepath, encoding="utf-8") as f:
141
  gqa = json.load(f)
142
  for id_, d in enumerate(gqa):
@@ -161,4 +158,29 @@ class GqaLxmert(datasets.GeneratorBasedBuilder):
161
  "features": img_features["features"],
162
  "normalized_boxes": img_features["normalized_boxes"],
163
  "label": label,
164
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
91
  def _split_generators(self, dl_manager):
92
  """Returns SplitGenerators."""
93
  dl_dir = dl_manager.download_and_extract(_URLS)
94
+ trainval_features_path = os.path.join(dl_dir["trainval_feat"], TRAINVAL_FEAT_PATH)
95
+ test_features_path = os.path.join(dl_dir["test_feat"], TEST_FEAT_PATH)
 
 
96
  return [
97
  datasets.SplitGenerator(
98
  name=datasets.Split.TRAIN,
99
+ gen_kwargs={
100
+ "filepath": dl_dir["train"],
101
+ "ans2label_path": dl_dir["ans2label"],
102
+ "features_path": trainval_features_path,
103
+ "testset": False,
104
+ },
105
  ),
106
  datasets.SplitGenerator(
107
  name=datasets.Split.VALIDATION,
108
+ gen_kwargs={
109
+ "filepath": dl_dir["valid"],
110
+ "ans2label_path": dl_dir["ans2label"],
111
+ "features_path": trainval_features_path,
112
+ "testset": False,
113
+ },
114
  ),
115
  datasets.SplitGenerator(
116
  name=datasets.Split.TEST,
117
+ gen_kwargs={
118
+ "filepath": dl_dir["testdev"],
119
+ "ans2label_path": dl_dir["ans2label"],
120
+ "features_path": test_features_path,
121
+ "testset": True,
122
+ },
123
  ),
124
  ]
125
 
126
+ def _generate_examples(self, filepath, ans2label_path, features_path, testset=False):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
127
  """ Yields examples as (key, example) tuples."""
128
+ if not hasattr(self, "ans2label"):
129
+ with open(ans2label_path, encoding="utf-8") as f:
130
+ self.ans2label = json.load(f)
131
+ if testset:
132
+ self.test_id2features = self._load_features(features_path)
133
+ else:
134
+ if not hasattr(self, "trainval_id2features"):
135
+ self.trainval_id2features = self._load_features(features_path)
136
+
137
  with open(filepath, encoding="utf-8") as f:
138
  gqa = json.load(f)
139
  for id_, d in enumerate(gqa):
 
158
  "features": img_features["features"],
159
  "normalized_boxes": img_features["normalized_boxes"],
160
  "label": label,
161
+ }
162
+
163
+ def _load_features(self, filepath):
164
+ """Returns a dictionary mapping an image id to the corresponding image's objects features."""
165
+ id2features = {}
166
+ with open(filepath) as f:
167
+ reader = csv.DictReader(f, FIELDNAMES, delimiter="\t")
168
+ for i, item in enumerate(reader):
169
+ features = {}
170
+ img_h = int(item["img_h"])
171
+ img_w = int(item["img_w"])
172
+ num_boxes = int(item["num_boxes"])
173
+ features["features"] = np.frombuffer(base64.b64decode(item["features"]), dtype=np.float32).reshape(
174
+ (num_boxes, -1)
175
+ )
176
+ boxes = np.frombuffer(base64.b64decode(item["boxes"]), dtype=np.float32).reshape((num_boxes, 4))
177
+ features["normalized_boxes"] = self._normalize_boxes(boxes, img_h, img_w)
178
+ id2features[item["img_id"]] = features
179
+ return id2features
180
+
181
+ def _normalize_boxes(self, boxes, img_h, img_w):
182
+ """ Normalizes the input boxes given the original image size."""
183
+ normalized_boxes = boxes.copy()
184
+ normalized_boxes[:, (0, 2)] /= img_w
185
+ normalized_boxes[:, (1, 3)] /= img_h
186
+ return normalized_boxes