system HF staff commited on
Commit
3d1d9b6
1 Parent(s): 76b3800

Update files from the datasets library (from 1.16.0)

Browse files

Release notes: https://github.com/huggingface/datasets/releases/tag/1.16.0

Files changed (2) hide show
  1. README.md +1 -0
  2. flue.py +39 -31
README.md CHANGED
@@ -1,4 +1,5 @@
1
  ---
 
2
  annotations_creators:
3
  - crowdsourced
4
  - machine-generated
1
  ---
2
+ pretty_name: FLUE
3
  annotations_creators:
4
  - crowdsourced
5
  - machine-generated
flue.py CHANGED
@@ -283,47 +283,52 @@ class Flue(datasets.GeneratorBasedBuilder):
283
 
284
  def _split_generators(self, dl_manager):
285
  if self.config.name == "CLS":
286
- data_folder = dl_manager.download_and_extract(self.config.data_url)
287
 
288
  return [
289
  datasets.SplitGenerator(
290
  name=datasets.Split.TRAIN,
291
  gen_kwargs={
292
- "data_file": os.path.join(data_folder, "cls-acl10-unprocessed", "fr"),
293
  "split": "train",
 
294
  },
295
  ),
296
  datasets.SplitGenerator(
297
  name=datasets.Split.TEST,
298
  gen_kwargs={
299
- "data_file": os.path.join(data_folder, "cls-acl10-unprocessed", "fr"),
300
  "split": "test",
 
301
  },
302
  ),
303
  ]
304
  elif self.config.name == "PAWS-X":
305
- data_folder = dl_manager.download_and_extract(self.config.data_url)
306
 
307
  return [
308
  datasets.SplitGenerator(
309
  name=datasets.Split.VALIDATION,
310
  gen_kwargs={
311
- "data_file": os.path.join(data_folder, "x-final", "fr", "dev_2k.tsv"),
312
  "split": "",
 
313
  },
314
  ),
315
  datasets.SplitGenerator(
316
  name=datasets.Split.TEST,
317
  gen_kwargs={
318
- "data_file": os.path.join(data_folder, "x-final", "fr", "test_2k.tsv"),
319
  "split": "",
 
320
  },
321
  ),
322
  datasets.SplitGenerator(
323
  name=datasets.Split.TRAIN,
324
  gen_kwargs={
325
- "data_file": os.path.join(data_folder, "x-final", "fr", "translated_train.tsv"),
326
  "split": "",
 
327
  },
328
  ),
329
  ]
@@ -355,6 +360,7 @@ class Flue(datasets.GeneratorBasedBuilder):
355
  ),
356
  ]
357
  elif self.config.name == "WSD-V":
 
358
  data_folder = dl_manager.download_and_extract(self.config.data_url)
359
  self._wsdv_prepare_data(os.path.join(data_folder, self.config.data_dir))
360
 
@@ -375,33 +381,35 @@ class Flue(datasets.GeneratorBasedBuilder):
375
  ),
376
  ]
377
 
378
- def _generate_examples(self, data_file, split):
379
  if self.config.name == "CLS":
380
- for category in ["books", "dvd", "music"]:
381
- file_path = os.path.join(data_file, category, split + ".review")
382
- with open(file_path, "rt", encoding="utf-8") as f:
383
- next(f)
 
 
 
 
 
 
 
 
 
 
 
 
 
384
  id = 0
385
- text = f.read()
386
- for id_, line in enumerate(text.split("\n\n")):
387
- if len(line) > 9:
388
  id += 1
389
- review_text, label = self._cls_extractor(line)
390
- yield f"{category}_{id_}", {"idx": id, "text": review_text, "label": label}
391
- elif self.config.name == "PAWS-X":
392
- with open(data_file, encoding="utf-8") as f:
393
- data = csv.reader(f, delimiter="\t")
394
- next(data) # skip header
395
- id = 0
396
- for id_, row in enumerate(data):
397
- if len(row) == 4:
398
- id += 1
399
- yield id_, {
400
- "idx": id,
401
- "sentence1": self._cleaner(row[1]),
402
- "sentence2": self._cleaner(row[2]),
403
- "label": int(row[3].strip()),
404
- }
405
  elif self.config.name == "XNLI":
406
  with open(data_file, encoding="utf-8") as f:
407
  data = csv.reader(f, delimiter="\t", quoting=csv.QUOTE_NONE)
283
 
284
  def _split_generators(self, dl_manager):
285
  if self.config.name == "CLS":
286
+ archive = dl_manager.download(self.config.data_url)
287
 
288
  return [
289
  datasets.SplitGenerator(
290
  name=datasets.Split.TRAIN,
291
  gen_kwargs={
292
+ "data_file": ("cls-acl10-unprocessed", "fr"),
293
  "split": "train",
294
+ "files": dl_manager.iter_archive(archive),
295
  },
296
  ),
297
  datasets.SplitGenerator(
298
  name=datasets.Split.TEST,
299
  gen_kwargs={
300
+ "data_file": ("cls-acl10-unprocessed", "fr"),
301
  "split": "test",
302
+ "files": dl_manager.iter_archive(archive),
303
  },
304
  ),
305
  ]
306
  elif self.config.name == "PAWS-X":
307
+ archive = dl_manager.download(self.config.data_url)
308
 
309
  return [
310
  datasets.SplitGenerator(
311
  name=datasets.Split.VALIDATION,
312
  gen_kwargs={
313
+ "data_file": ("x-final", "fr", "dev_2k.tsv"),
314
  "split": "",
315
+ "files": dl_manager.iter_archive(archive),
316
  },
317
  ),
318
  datasets.SplitGenerator(
319
  name=datasets.Split.TEST,
320
  gen_kwargs={
321
+ "data_file": ("x-final", "fr", "test_2k.tsv"),
322
  "split": "",
323
+ "files": dl_manager.iter_archive(archive),
324
  },
325
  ),
326
  datasets.SplitGenerator(
327
  name=datasets.Split.TRAIN,
328
  gen_kwargs={
329
+ "data_file": ("x-final", "fr", "translated_train.tsv"),
330
  "split": "",
331
+ "files": dl_manager.iter_archive(archive),
332
  },
333
  ),
334
  ]
360
  ),
361
  ]
362
  elif self.config.name == "WSD-V":
363
+ # TODO(QL): make streamable using iter_archive
364
  data_folder = dl_manager.download_and_extract(self.config.data_url)
365
  self._wsdv_prepare_data(os.path.join(data_folder, self.config.data_dir))
366
 
381
  ),
382
  ]
383
 
384
+ def _generate_examples(self, data_file, split, files=None):
385
  if self.config.name == "CLS":
386
+ for path, f in files:
387
+ for category in ["books", "dvd", "music"]:
388
+ file_path = "/".join(data_file + (category, split + ".review"))
389
+ if path == file_path:
390
+ next(f)
391
+ id = 0
392
+ text = f.read().decode("utf-8")
393
+ for id_, line in enumerate(text.split("\n\n")):
394
+ if len(line) > 9:
395
+ id += 1
396
+ review_text, label = self._cls_extractor(line)
397
+ yield f"{category}_{id_}", {"idx": id, "text": review_text, "label": label}
398
+ elif self.config.name == "PAWS-X":
399
+ for path, f in files:
400
+ if path == "/".join(data_file):
401
+ data = csv.reader((line.decode("utf-8") for line in f), delimiter="\t")
402
+ next(data) # skip header
403
  id = 0
404
+ for id_, row in enumerate(data):
405
+ if len(row) == 4:
 
406
  id += 1
407
+ yield id_, {
408
+ "idx": id,
409
+ "sentence1": self._cleaner(row[1]),
410
+ "sentence2": self._cleaner(row[2]),
411
+ "label": int(row[3].strip()),
412
+ }
 
 
 
 
 
 
 
 
 
 
413
  elif self.config.name == "XNLI":
414
  with open(data_file, encoding="utf-8") as f:
415
  data = csv.reader(f, delimiter="\t", quoting=csv.QUOTE_NONE)