sps commited on
Commit
302ddb6
1 Parent(s): ae9cd16

Refactor twostep data loading

Browse files
Files changed (1) hide show
  1. codequeries.py +38 -17
codequeries.py CHANGED
@@ -119,7 +119,7 @@ class Codequeries(datasets.GeneratorBasedBuilder):
119
  "subtokenized_input_sequence", "label_sequence"],
120
  citation=_CODEQUERIES_CITATION,
121
  data_url={
122
- "test": "twostep_relevance"
123
  },
124
  url="",
125
  ),
@@ -209,20 +209,41 @@ class Codequeries(datasets.GeneratorBasedBuilder):
209
  assert split == datasets.Split.TEST
210
  logger.info("generating examples from = %s", filepath)
211
 
212
- with open(filepath, encoding="utf-8") as f:
213
  key = 0
214
- for line in f:
215
- row = json.loads(line)
216
-
217
- instance_key = str(key) + "_" + row["query_name"] + "_" + row["code_file_path"]
218
- yield instance_key, {
219
- "query_name": row["query_name"],
220
- "context_blocks": row["context_blocks"],
221
- "answer_spans": row["answer_spans"],
222
- "supporting_fact_spans": row["supporting_fact_spans"],
223
- "code_file_path": row["code_file_path"],
224
- "example_type": row["example_type"],
225
- "subtokenized_input_sequence": row["subtokenized_input_sequence"],
226
- "label_sequence": row["label_sequence"],
227
- }
228
- key += 1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
119
  "subtokenized_input_sequence", "label_sequence"],
120
  citation=_CODEQUERIES_CITATION,
121
  data_url={
122
+ "test": "twostep_relevance/twostep_relevance_test_"
123
  },
124
  url="",
125
  ),
 
209
  assert split == datasets.Split.TEST
210
  logger.info("generating examples from = %s", filepath)
211
 
212
+ if self.config.name == "twostep":
213
  key = 0
214
+ for i in range(10):
215
+ with open(filepath + str(i) + '.json', encoding="utf-8") as f:
216
+ for line in f:
217
+ row = json.loads(line)
218
+
219
+ instance_key = str(key) + "_" + row["query_name"] + "_" + row["code_file_path"]
220
+ yield instance_key, {
221
+ "query_name": row["query_name"],
222
+ "context_blocks": row["context_blocks"],
223
+ "answer_spans": row["answer_spans"],
224
+ "supporting_fact_spans": row["supporting_fact_spans"],
225
+ "code_file_path": row["code_file_path"],
226
+ "example_type": row["example_type"],
227
+ "subtokenized_input_sequence": row["subtokenized_input_sequence"],
228
+ "label_sequence": row["label_sequence"],
229
+ }
230
+ key += 1
231
+ else:
232
+ with open(filepath, encoding="utf-8") as f:
233
+ key = 0
234
+ for line in f:
235
+ row = json.loads(line)
236
+
237
+ instance_key = str(key) + "_" + row["query_name"] + "_" + row["code_file_path"]
238
+ yield instance_key, {
239
+ "query_name": row["query_name"],
240
+ "context_blocks": row["context_blocks"],
241
+ "answer_spans": row["answer_spans"],
242
+ "supporting_fact_spans": row["supporting_fact_spans"],
243
+ "code_file_path": row["code_file_path"],
244
+ "example_type": row["example_type"],
245
+ "subtokenized_input_sequence": row["subtokenized_input_sequence"],
246
+ "label_sequence": row["label_sequence"],
247
+ }
248
+ key += 1
249
+