system HF staff commited on
Commit
571662b
1 Parent(s): 0b7c991

Update files from the datasets library (from 1.16.0)

Browse files

Release notes: https://github.com/huggingface/datasets/releases/tag/1.16.0

Files changed (2) hide show
  1. README.md +1 -0
  2. klue.py +131 -119
README.md CHANGED
@@ -1,4 +1,5 @@
1
  ---
 
2
  annotations_creators:
3
  - expert-generated
4
  language_creators:
1
  ---
2
+ pretty_name: KLUE
3
  annotations_creators:
4
  - expert-generated
5
  language_creators:
klue.py CHANGED
@@ -18,7 +18,6 @@
18
 
19
  import csv
20
  import json
21
- import os
22
  import textwrap
23
 
24
  import datasets
@@ -374,148 +373,161 @@ class Klue(datasets.GeneratorBasedBuilder):
374
  )
375
 
376
  def _split_generators(self, dl_manager):
377
- dl_dir = dl_manager.download_and_extract(self.config.data_url)
378
  dir_name = self.config.data_url.split("/")[-1].replace(".tar.gz", "")
379
- data_dir = os.path.join(dl_dir, dir_name)
380
  return [
381
  datasets.SplitGenerator(
382
  name=datasets.Split.TRAIN,
383
  gen_kwargs={
384
- "data_file": os.path.join(data_dir, self.config.file_map["train"]),
385
- "split": "train",
386
  },
387
  ),
388
  datasets.SplitGenerator(
389
  name=datasets.Split.VALIDATION,
390
  gen_kwargs={
391
- "data_file": os.path.join(data_dir, self.config.file_map["dev"]),
392
- "split": "dev",
393
  },
394
  ),
395
  ]
396
 
397
- def _generate_examples(self, data_file, split):
398
  if self.config.name in ["ynat", "sts", "re"]:
399
- with open(data_file, encoding="UTF-8") as f:
400
- f = json.load(f)
401
- for id_, row in enumerate(f):
402
- features = {key: row[key] for key in row if key in self.config.features}
403
- yield id_, features
 
 
404
 
405
  if self.config.name == "nli":
406
- with open(data_file, encoding="UTF-8") as f:
407
- f = json.load(f)
408
- for id_, row in enumerate(f):
409
- # In train file, "source" is written as "genre"
410
- features = {
411
- "guid": row["guid"],
412
- "source": row["source"] if "source" in row else row["genre"],
413
- "premise": row["premise"],
414
- "hypothesis": row["hypothesis"],
415
- "label": row["gold_label"],
416
- }
417
- yield id_, features
 
 
418
 
419
  if self.config.name == "ner":
420
- with open(data_file, encoding="UTF-8") as f:
421
- reader = csv.reader(f, delimiter="\t", quoting=csv.QUOTE_NONE)
422
- for _ in range(5): # skip headers
423
- next(reader)
424
- id_ = -1
425
- for row in reader:
426
- if row:
427
- if row[0].startswith("##"):
428
- id_ += 1
429
- tokens, ner_tags = [], []
430
- sentence = row[1]
431
- else:
432
- tokens.append(row[0])
433
- ner_tags.append(row[1])
434
- else: # new line
435
- assert len(tokens) == len(ner_tags)
436
- yield id_, {"sentence": sentence, "tokens": tokens, "ner_tags": ner_tags}
 
 
 
437
 
438
  if self.config.name == "dp":
439
- with open(data_file, encoding="UTF-8") as f:
440
- reader = csv.reader(f, delimiter="\t", quoting=csv.QUOTE_NONE)
441
- for _ in range(5): # skip headers
442
- next(reader)
443
- id_ = -1
444
- for row in reader:
445
- if row:
446
- if row[0].startswith("##"):
447
- id_ += 1
448
- index = []
449
- word_form = []
450
- lemma = []
451
- pos = []
452
- head = []
453
- deprel = []
454
- sentence = row[1]
455
- else:
456
- index.append(row[0])
457
- word_form.append(row[1])
458
- lemma.append(row[2])
459
- pos.append(row[3])
460
- head.append(row[4])
461
- deprel.append(row[5])
462
- else: # new line
463
- assert len(index) == len(word_form) == len(lemma) == len(pos) == len(head) == len(deprel)
464
- yield id_, {
465
- "sentence": sentence,
466
- "index": index,
467
- "word_form": word_form,
468
- "lemma": lemma,
469
- "pos": pos,
470
- "head": head,
471
- "deprel": deprel,
472
- }
 
 
 
473
 
474
  if self.config.name == "mrc":
475
- with open(data_file, encoding="UTF-8") as f:
476
- f = json.load(f)
477
- id_ = -1
478
- for example in f["data"]:
479
- title = example.get("title", "")
480
- news_category = example.get("news_category", "")
481
- source = example["source"]
482
- for paragraph in example["paragraphs"]:
483
- context = paragraph["context"].strip()
484
- for qa in paragraph["qas"]:
485
- guid = qa["guid"]
486
- question_type = qa["question_type"]
487
- is_impossible = qa["is_impossible"]
488
- question = qa["question"].strip()
 
489
 
490
- if "plausible_answers" in qa:
491
- qa["answers"].extend(qa["plausible_answers"])
492
- answer_starts = [answer["answer_start"] for answer in qa["answers"]]
493
- answers = [answer["text"].strip() for answer in qa["answers"]]
494
- id_ += 1
495
 
496
- yield id_, {
497
- "guid": guid,
498
- "title": title,
499
- "context": context,
500
- "news_category": news_category,
501
- "source": source,
502
- "question_type": question_type,
503
- "is_impossible": is_impossible,
504
- "question": question,
505
- "answers": {
506
- "answer_start": answer_starts,
507
- "text": answers,
508
- },
509
- }
 
510
 
511
  if self.config.name == "wos":
512
- with open(data_file, encoding="UTF-8") as f:
513
- f = json.load(f)
514
- for id_, row in enumerate(f):
515
- guid = row["guid"]
516
- domains = row["domains"]
517
- dialogue = row["dialogue"]
518
- for utterance in dialogue:
519
- if "state" not in utterance:
520
- utterance["state"] = []
521
- yield id_, {"guid": guid, "domains": domains, "dialogue": dialogue}
 
 
18
 
19
  import csv
20
  import json
 
21
  import textwrap
22
 
23
  import datasets
373
  )
374
 
375
  def _split_generators(self, dl_manager):
376
+ archive = dl_manager.download(self.config.data_url)
377
  dir_name = self.config.data_url.split("/")[-1].replace(".tar.gz", "")
 
378
  return [
379
  datasets.SplitGenerator(
380
  name=datasets.Split.TRAIN,
381
  gen_kwargs={
382
+ "data_file": dir_name + "/" + self.config.file_map["train"],
383
+ "files": dl_manager.iter_archive(archive),
384
  },
385
  ),
386
  datasets.SplitGenerator(
387
  name=datasets.Split.VALIDATION,
388
  gen_kwargs={
389
+ "data_file": dir_name + "/" + self.config.file_map["dev"],
390
+ "files": dl_manager.iter_archive(archive),
391
  },
392
  ),
393
  ]
394
 
395
+ def _generate_examples(self, data_file, files):
396
  if self.config.name in ["ynat", "sts", "re"]:
397
+ for path, f in files:
398
+ if path == data_file:
399
+ f = json.load(f)
400
+ for id_, row in enumerate(f):
401
+ features = {key: row[key] for key in row if key in self.config.features}
402
+ yield id_, features
403
+ break
404
 
405
  if self.config.name == "nli":
406
+ for path, f in files:
407
+ if path == data_file:
408
+ f = json.load(f)
409
+ for id_, row in enumerate(f):
410
+ # In train file, "source" is written as "genre"
411
+ features = {
412
+ "guid": row["guid"],
413
+ "source": row["source"] if "source" in row else row["genre"],
414
+ "premise": row["premise"],
415
+ "hypothesis": row["hypothesis"],
416
+ "label": row["gold_label"],
417
+ }
418
+ yield id_, features
419
+ break
420
 
421
  if self.config.name == "ner":
422
+ for path, f in files:
423
+ if path == data_file:
424
+ f = (line.decode("utf-8") for line in f)
425
+ reader = csv.reader(f, delimiter="\t", quoting=csv.QUOTE_NONE)
426
+ for _ in range(5): # skip headers
427
+ next(reader)
428
+ id_ = -1
429
+ for row in reader:
430
+ if row:
431
+ if row[0].startswith("##"):
432
+ id_ += 1
433
+ tokens, ner_tags = [], []
434
+ sentence = row[1]
435
+ else:
436
+ tokens.append(row[0])
437
+ ner_tags.append(row[1])
438
+ else: # new line
439
+ assert len(tokens) == len(ner_tags)
440
+ yield id_, {"sentence": sentence, "tokens": tokens, "ner_tags": ner_tags}
441
+ break
442
 
443
  if self.config.name == "dp":
444
+ for path, f in files:
445
+ if path == data_file:
446
+ f = (line.decode("utf-8") for line in f)
447
+ reader = csv.reader(f, delimiter="\t", quoting=csv.QUOTE_NONE)
448
+ for _ in range(5): # skip headers
449
+ next(reader)
450
+ id_ = -1
451
+ for row in reader:
452
+ if row:
453
+ if row[0].startswith("##"):
454
+ id_ += 1
455
+ index = []
456
+ word_form = []
457
+ lemma = []
458
+ pos = []
459
+ head = []
460
+ deprel = []
461
+ sentence = row[1]
462
+ else:
463
+ index.append(row[0])
464
+ word_form.append(row[1])
465
+ lemma.append(row[2])
466
+ pos.append(row[3])
467
+ head.append(row[4])
468
+ deprel.append(row[5])
469
+ else: # new line
470
+ assert len(index) == len(word_form) == len(lemma) == len(pos) == len(head) == len(deprel)
471
+ yield id_, {
472
+ "sentence": sentence,
473
+ "index": index,
474
+ "word_form": word_form,
475
+ "lemma": lemma,
476
+ "pos": pos,
477
+ "head": head,
478
+ "deprel": deprel,
479
+ }
480
+ break
481
 
482
  if self.config.name == "mrc":
483
+ for path, f in files:
484
+ if path == data_file:
485
+ f = json.load(f)
486
+ id_ = -1
487
+ for example in f["data"]:
488
+ title = example.get("title", "")
489
+ news_category = example.get("news_category", "")
490
+ source = example["source"]
491
+ for paragraph in example["paragraphs"]:
492
+ context = paragraph["context"].strip()
493
+ for qa in paragraph["qas"]:
494
+ guid = qa["guid"]
495
+ question_type = qa["question_type"]
496
+ is_impossible = qa["is_impossible"]
497
+ question = qa["question"].strip()
498
 
499
+ if "plausible_answers" in qa:
500
+ qa["answers"].extend(qa["plausible_answers"])
501
+ answer_starts = [answer["answer_start"] for answer in qa["answers"]]
502
+ answers = [answer["text"].strip() for answer in qa["answers"]]
503
+ id_ += 1
504
 
505
+ yield id_, {
506
+ "guid": guid,
507
+ "title": title,
508
+ "context": context,
509
+ "news_category": news_category,
510
+ "source": source,
511
+ "question_type": question_type,
512
+ "is_impossible": is_impossible,
513
+ "question": question,
514
+ "answers": {
515
+ "answer_start": answer_starts,
516
+ "text": answers,
517
+ },
518
+ }
519
+ break
520
 
521
  if self.config.name == "wos":
522
+ for path, f in files:
523
+ if path == data_file:
524
+ f = json.load(f)
525
+ for id_, row in enumerate(f):
526
+ guid = row["guid"]
527
+ domains = row["domains"]
528
+ dialogue = row["dialogue"]
529
+ for utterance in dialogue:
530
+ if "state" not in utterance:
531
+ utterance["state"] = []
532
+ yield id_, {"guid": guid, "domains": domains, "dialogue": dialogue}
533
+ break