Jinchen Ge commited on
Commit
62c0a9b
1 Parent(s): 2bb73b8

Add test set

Browse files
Files changed (1) hide show
  1. vqa-lxmert.py +46 -23
vqa-lxmert.py CHANGED
@@ -47,11 +47,14 @@ _URLS = {
47
  "train_feat": "https://nlp.cs.unc.edu/data/lxmert_data/mscoco_imgfeat/train2014_obj36.zip",
48
  "valid": "https://nlp.cs.unc.edu/data/lxmert_data/vqa/valid.json",
49
  "valid_feat": "https://nlp.cs.unc.edu/data/lxmert_data/mscoco_imgfeat/val2014_obj36.zip",
 
 
50
  "ans2label": "https://raw.githubusercontent.com/airsplay/lxmert/master/data/vqa/trainval_ans2label.json",
51
  }
52
 
53
- _TRAIN_IMG_PATH = "train2014_obj36.tsv"
54
- _VALID_IMG_PATH = "mscoco_imgfeat/val2014_obj36.tsv"
 
55
 
56
  FIELDNAMES = [
57
  "img_id", "img_h", "img_w", "objects_id", "objects_conf", "attrs_id", "attrs_conf", "num_boxes", "boxes", "features"
@@ -102,11 +105,15 @@ class VqaV2Lxmert(datasets.GeneratorBasedBuilder):
102
  return [
103
  datasets.SplitGenerator(
104
  name=datasets.Split.TRAIN,
105
- gen_kwargs={"filepath": dl_dir["train"], "imgfeat": os.path.join(dl_dir["train_feat"], _TRAIN_IMG_PATH)},
106
  ),
107
  datasets.SplitGenerator(
108
  name=datasets.Split.VALIDATION,
109
- gen_kwargs={"filepath": dl_dir["valid"], "imgfeat": os.path.join(dl_dir["valid_feat"], _VALID_IMG_PATH)},
 
 
 
 
110
  ),
111
  ]
112
 
@@ -135,26 +142,42 @@ class VqaV2Lxmert(datasets.GeneratorBasedBuilder):
135
  normalized_boxes[:, (1, 3)] /= img_h
136
  return normalized_boxes
137
 
138
- def _generate_examples(self, filepath, imgfeat):
139
  """ Yields examples as (key, example) tuples."""
140
  id2features = self._load_features(imgfeat)
141
  with open(filepath, encoding="utf-8") as f:
142
  vqa = json.load(f)
143
- for id_, d in enumerate(vqa):
144
- img_features = id2features[d["img_id"]]
145
- ids = [self.ans2label[x] for x in d["label"].keys()]
146
- weights = list(d["label"].values())
147
- yield id_, {
148
- "question": d["sent"],
149
- "question_type": d["question_type"],
150
- "question_id": d["question_id"],
151
- "image_id": d["img_id"],
152
- "features": img_features["features"],
153
- "normalized_boxes": img_features["normalized_boxes"],
154
- "answer_type": d["answer_type"],
155
- "label": {
156
- "ids": ids,
157
- "weights": weights,
158
- },
159
- }
160
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
47
  "train_feat": "https://nlp.cs.unc.edu/data/lxmert_data/mscoco_imgfeat/train2014_obj36.zip",
48
  "valid": "https://nlp.cs.unc.edu/data/lxmert_data/vqa/valid.json",
49
  "valid_feat": "https://nlp.cs.unc.edu/data/lxmert_data/mscoco_imgfeat/val2014_obj36.zip",
50
+ "test": "https://nlp.cs.unc.edu/data/lxmert_data/vqa/test.json",
51
+ "test_feat": "https://nlp.cs.unc.edu/data/lxmert_data/mscoco_imgfeat/test2015_obj36.zip",
52
  "ans2label": "https://raw.githubusercontent.com/airsplay/lxmert/master/data/vqa/trainval_ans2label.json",
53
  }
54
 
55
+ _TRAIN_FEAT_PATH = "train2014_obj36.tsv"
56
+ _VALID_FEAT_PATH = "mscoco_imgfeat/val2014_obj36.tsv"
57
+ _TEST_FEAT_PATH = "mscoco_imgfeat/test2015_obj36.tsv"
58
 
59
  FIELDNAMES = [
60
  "img_id", "img_h", "img_w", "objects_id", "objects_conf", "attrs_id", "attrs_conf", "num_boxes", "boxes", "features"
 
105
  return [
106
  datasets.SplitGenerator(
107
  name=datasets.Split.TRAIN,
108
+ gen_kwargs={"filepath": dl_dir["train"], "imgfeat": os.path.join(dl_dir["train_feat"], _TRAIN_FEAT_PATH)},
109
  ),
110
  datasets.SplitGenerator(
111
  name=datasets.Split.VALIDATION,
112
+ gen_kwargs={"filepath": dl_dir["valid"], "imgfeat": os.path.join(dl_dir["valid_feat"], _VALID_FEAT_PATH)},
113
+ ),
114
+ datasets.SplitGenerator(
115
+ name=datasets.Split.TEST,
116
+ gen_kwargs={"filepath": dl_dir["test"], "imgfeat": os.path.join(dl_dir["test_feat"], _TEST_FEAT_PATH), "labeled": False},
117
  ),
118
  ]
119
 
 
142
  normalized_boxes[:, (1, 3)] /= img_h
143
  return normalized_boxes
144
 
145
+ def _generate_examples(self, filepath, imgfeat, labeled=True):
146
  """ Yields examples as (key, example) tuples."""
147
  id2features = self._load_features(imgfeat)
148
  with open(filepath, encoding="utf-8") as f:
149
  vqa = json.load(f)
150
+ if labeled:
151
+ for id_, d in enumerate(vqa):
152
+ img_features = id2features[d["img_id"]]
153
+ ids = [self.ans2label[x] for x in d["label"].keys()]
154
+ weights = list(d["label"].values())
155
+ yield id_, {
156
+ "question": d["sent"],
157
+ "question_type": d["question_type"],
158
+ "question_id": d["question_id"],
159
+ "image_id": d["img_id"],
160
+ "features": img_features["features"],
161
+ "normalized_boxes": img_features["normalized_boxes"],
162
+ "answer_type": d["answer_type"],
163
+ "label": {
164
+ "ids": ids,
165
+ "weights": weights,
166
+ },
167
+ }
168
+ else:
169
+ for id_, d in enumerate(vqa):
170
+ img_features = id2features[d["img_id"]]
171
+ yield id_, {
172
+ "question": d["sent"],
173
+ "question_type": "",
174
+ "question_id": d["question_id"],
175
+ "image_id": d["img_id"],
176
+ "features": img_features["features"],
177
+ "normalized_boxes": img_features["normalized_boxes"],
178
+ "answer_type": "",
179
+ "label": {
180
+ "ids": [],
181
+ "weights": [],
182
+ },
183
+ }