nouamanetazi HF staff commited on
Commit
0dc9587
1 Parent(s): bdd5a32
Files changed (1) hide show
  1. test111.py +62 -57
test111.py CHANGED
@@ -207,91 +207,96 @@ class MASSIVE(datasets.GeneratorBasedBuilder):
207
 
208
  def _split_generators(self, dl_manager):
209
 
210
- path = dl_manager.iter_archive(_URL)
 
211
 
212
  return [
213
  datasets.SplitGenerator(
214
  name=datasets.Split.TRAIN,
215
  gen_kwargs={
216
- "filepath": "train",
217
- "files": path,
 
218
  },
219
  ),
220
  datasets.SplitGenerator(
221
  name=datasets.Split.VALIDATION,
222
  gen_kwargs={
223
- "files": path,
224
- "filepath": "dev",
 
225
  },
226
  ),
227
  datasets.SplitGenerator(
228
  name=datasets.Split.TEST,
229
  gen_kwargs={
230
- "files": path,
231
- "filepath": "test",
 
232
  },
233
  ),
234
  ]
235
 
236
- def _generate_examples(self, filepath, split, lang):
237
 
238
- filepath = filepath + "/1.0/data/" + lang + ".jsonl"
239
 
240
  logger.info("⏳ Generating examples from = %s", filepath)
241
 
242
- # Read the file
243
- f = open(filepath, "r")
244
- lines = f.read().split("\n")
245
- f.close()
 
246
 
247
- key_ = 0
248
 
249
- for line in lines:
250
 
251
- data = json.loads(line)
252
 
253
- if data["partition"] != split:
254
- continue
255
 
256
- # Slot method
257
- if "slot_method" in data:
258
- slot_method = [
259
- {
260
- "slot": s["slot"],
261
- "method": s["method"],
262
- }
263
- for s in data["slot_method"]
264
- ]
265
- else:
266
- slot_method = []
267
 
268
- # Judgments
269
- if "judgments" in data:
270
- judgments = [
271
- {
272
- "worker_id": j["worker_id"],
273
- "intent_score": j["intent_score"],
274
- "slots_score": j["slots_score"],
275
- "grammar_score": j["grammar_score"],
276
- "spelling_score": j["spelling_score"],
277
- "language_identification": j["language_identification"],
278
- }
279
- for j in data["judgments"]
280
- ]
281
- else:
282
- judgments = []
283
 
284
- yield key_, {
285
- "id": data["id"],
286
- "locale": data["locale"],
287
- "partition": data["partition"],
288
- "scenario": data["scenario"],
289
- "intent": data["intent"],
290
- "utt": data["utt"],
291
- "annot_utt": data["annot_utt"],
292
- "worker_id": data["worker_id"],
293
- "slot_method": slot_method,
294
- "judgments": judgments,
295
- }
296
 
297
- key_ += 1
 
207
 
208
  def _split_generators(self, dl_manager):
209
 
210
+ # path = dl_manager.download_and_extract(_URL)
211
+ files = dl_manager.iter_archive(_URL)
212
 
213
  return [
214
  datasets.SplitGenerator(
215
  name=datasets.Split.TRAIN,
216
  gen_kwargs={
217
+ "files": files,
218
+ "split": "train",
219
+ "lang": self.config.name,
220
  },
221
  ),
222
  datasets.SplitGenerator(
223
  name=datasets.Split.VALIDATION,
224
  gen_kwargs={
225
+ "files": files,
226
+ "split": "dev",
227
+ "lang": self.config.name,
228
  },
229
  ),
230
  datasets.SplitGenerator(
231
  name=datasets.Split.TEST,
232
  gen_kwargs={
233
+ "files": files,
234
+ "split": "test",
235
+ "lang": self.config.name,
236
  },
237
  ),
238
  ]
239
 
240
+ def _generate_examples(self, files, split, lang):
241
 
242
+ filepath = "/1.0/data/" + lang + ".jsonl"
243
 
244
  logger.info("⏳ Generating examples from = %s", filepath)
245
 
246
+ for path, f in files:
247
+ if path == filepath:
248
+ # Read the file
249
+ lines = f.read().split("\n")
250
+ f.close()
251
 
252
+ key_ = 0
253
 
254
+ for line in lines:
255
 
256
+ data = json.loads(line)
257
 
258
+ if data["partition"] != split:
259
+ continue
260
 
261
+ # Slot method
262
+ if "slot_method" in data:
263
+ slot_method = [
264
+ {
265
+ "slot": s["slot"],
266
+ "method": s["method"],
267
+ }
268
+ for s in data["slot_method"]
269
+ ]
270
+ else:
271
+ slot_method = []
272
 
273
+ # Judgments
274
+ if "judgments" in data:
275
+ judgments = [
276
+ {
277
+ "worker_id": j["worker_id"],
278
+ "intent_score": j["intent_score"],
279
+ "slots_score": j["slots_score"],
280
+ "grammar_score": j["grammar_score"],
281
+ "spelling_score": j["spelling_score"],
282
+ "language_identification": j["language_identification"],
283
+ }
284
+ for j in data["judgments"]
285
+ ]
286
+ else:
287
+ judgments = []
288
 
289
+ yield key_, {
290
+ "id": data["id"],
291
+ "locale": data["locale"],
292
+ "partition": data["partition"],
293
+ "scenario": data["scenario"],
294
+ "intent": data["intent"],
295
+ "utt": data["utt"],
296
+ "annot_utt": data["annot_utt"],
297
+ "worker_id": data["worker_id"],
298
+ "slot_method": slot_method,
299
+ "judgments": judgments,
300
+ }
301
 
302
+ key_ += 1