Update files from the datasets library (from 1.16.0)
Browse filesRelease notes: https://github.com/huggingface/datasets/releases/tag/1.16.0
README.md
CHANGED
@@ -1,4 +1,5 @@
|
|
1 |
---
|
|
|
2 |
annotations_creators:
|
3 |
- expert-generated
|
4 |
language_creators:
|
|
|
1 |
---
|
2 |
+
pretty_name: KLUE
|
3 |
annotations_creators:
|
4 |
- expert-generated
|
5 |
language_creators:
|
klue.py
CHANGED
@@ -18,7 +18,6 @@
|
|
18 |
|
19 |
import csv
|
20 |
import json
|
21 |
-
import os
|
22 |
import textwrap
|
23 |
|
24 |
import datasets
|
@@ -374,148 +373,161 @@ class Klue(datasets.GeneratorBasedBuilder):
|
|
374 |
)
|
375 |
|
376 |
def _split_generators(self, dl_manager):
|
377 |
-
|
378 |
dir_name = self.config.data_url.split("/")[-1].replace(".tar.gz", "")
|
379 |
-
data_dir = os.path.join(dl_dir, dir_name)
|
380 |
return [
|
381 |
datasets.SplitGenerator(
|
382 |
name=datasets.Split.TRAIN,
|
383 |
gen_kwargs={
|
384 |
-
"data_file":
|
385 |
-
"
|
386 |
},
|
387 |
),
|
388 |
datasets.SplitGenerator(
|
389 |
name=datasets.Split.VALIDATION,
|
390 |
gen_kwargs={
|
391 |
-
"data_file":
|
392 |
-
"
|
393 |
},
|
394 |
),
|
395 |
]
|
396 |
|
397 |
-
def _generate_examples(self, data_file,
|
398 |
if self.config.name in ["ynat", "sts", "re"]:
|
399 |
-
|
400 |
-
|
401 |
-
|
402 |
-
|
403 |
-
|
|
|
|
|
404 |
|
405 |
if self.config.name == "nli":
|
406 |
-
|
407 |
-
|
408 |
-
|
409 |
-
|
410 |
-
|
411 |
-
|
412 |
-
|
413 |
-
|
414 |
-
|
415 |
-
|
416 |
-
|
417 |
-
|
|
|
|
|
418 |
|
419 |
if self.config.name == "ner":
|
420 |
-
|
421 |
-
|
422 |
-
|
423 |
-
|
424 |
-
|
425 |
-
|
426 |
-
|
427 |
-
|
428 |
-
|
429 |
-
|
430 |
-
|
431 |
-
|
432 |
-
|
433 |
-
|
434 |
-
|
435 |
-
|
436 |
-
|
|
|
|
|
|
|
437 |
|
438 |
if self.config.name == "dp":
|
439 |
-
|
440 |
-
|
441 |
-
|
442 |
-
|
443 |
-
|
444 |
-
|
445 |
-
|
446 |
-
|
447 |
-
|
448 |
-
|
449 |
-
|
450 |
-
|
451 |
-
|
452 |
-
|
453 |
-
|
454 |
-
|
455 |
-
|
456 |
-
|
457 |
-
|
458 |
-
|
459 |
-
|
460 |
-
|
461 |
-
|
462 |
-
|
463 |
-
|
464 |
-
|
465 |
-
|
466 |
-
|
467 |
-
|
468 |
-
|
469 |
-
|
470 |
-
|
471 |
-
|
472 |
-
|
|
|
|
|
|
|
473 |
|
474 |
if self.config.name == "mrc":
|
475 |
-
|
476 |
-
|
477 |
-
|
478 |
-
|
479 |
-
|
480 |
-
|
481 |
-
|
482 |
-
|
483 |
-
|
484 |
-
|
485 |
-
|
486 |
-
|
487 |
-
|
488 |
-
|
|
|
489 |
|
490 |
-
|
491 |
-
|
492 |
-
|
493 |
-
|
494 |
-
|
495 |
|
496 |
-
|
497 |
-
|
498 |
-
|
499 |
-
|
500 |
-
|
501 |
-
|
502 |
-
|
503 |
-
|
504 |
-
|
505 |
-
|
506 |
-
|
507 |
-
|
508 |
-
|
509 |
-
|
|
|
510 |
|
511 |
if self.config.name == "wos":
|
512 |
-
|
513 |
-
|
514 |
-
|
515 |
-
|
516 |
-
|
517 |
-
|
518 |
-
|
519 |
-
|
520 |
-
|
521 |
-
|
|
|
|
|
|
18 |
|
19 |
import csv
|
20 |
import json
|
|
|
21 |
import textwrap
|
22 |
|
23 |
import datasets
|
|
|
373 |
)
|
374 |
|
375 |
def _split_generators(self, dl_manager):
|
376 |
+
archive = dl_manager.download(self.config.data_url)
|
377 |
dir_name = self.config.data_url.split("/")[-1].replace(".tar.gz", "")
|
|
|
378 |
return [
|
379 |
datasets.SplitGenerator(
|
380 |
name=datasets.Split.TRAIN,
|
381 |
gen_kwargs={
|
382 |
+
"data_file": dir_name + "/" + self.config.file_map["train"],
|
383 |
+
"files": dl_manager.iter_archive(archive),
|
384 |
},
|
385 |
),
|
386 |
datasets.SplitGenerator(
|
387 |
name=datasets.Split.VALIDATION,
|
388 |
gen_kwargs={
|
389 |
+
"data_file": dir_name + "/" + self.config.file_map["dev"],
|
390 |
+
"files": dl_manager.iter_archive(archive),
|
391 |
},
|
392 |
),
|
393 |
]
|
394 |
|
395 |
+
def _generate_examples(self, data_file, files):
|
396 |
if self.config.name in ["ynat", "sts", "re"]:
|
397 |
+
for path, f in files:
|
398 |
+
if path == data_file:
|
399 |
+
f = json.load(f)
|
400 |
+
for id_, row in enumerate(f):
|
401 |
+
features = {key: row[key] for key in row if key in self.config.features}
|
402 |
+
yield id_, features
|
403 |
+
break
|
404 |
|
405 |
if self.config.name == "nli":
|
406 |
+
for path, f in files:
|
407 |
+
if path == data_file:
|
408 |
+
f = json.load(f)
|
409 |
+
for id_, row in enumerate(f):
|
410 |
+
# In train file, "source" is written as "genre"
|
411 |
+
features = {
|
412 |
+
"guid": row["guid"],
|
413 |
+
"source": row["source"] if "source" in row else row["genre"],
|
414 |
+
"premise": row["premise"],
|
415 |
+
"hypothesis": row["hypothesis"],
|
416 |
+
"label": row["gold_label"],
|
417 |
+
}
|
418 |
+
yield id_, features
|
419 |
+
break
|
420 |
|
421 |
if self.config.name == "ner":
|
422 |
+
for path, f in files:
|
423 |
+
if path == data_file:
|
424 |
+
f = (line.decode("utf-8") for line in f)
|
425 |
+
reader = csv.reader(f, delimiter="\t", quoting=csv.QUOTE_NONE)
|
426 |
+
for _ in range(5): # skip headers
|
427 |
+
next(reader)
|
428 |
+
id_ = -1
|
429 |
+
for row in reader:
|
430 |
+
if row:
|
431 |
+
if row[0].startswith("##"):
|
432 |
+
id_ += 1
|
433 |
+
tokens, ner_tags = [], []
|
434 |
+
sentence = row[1]
|
435 |
+
else:
|
436 |
+
tokens.append(row[0])
|
437 |
+
ner_tags.append(row[1])
|
438 |
+
else: # new line
|
439 |
+
assert len(tokens) == len(ner_tags)
|
440 |
+
yield id_, {"sentence": sentence, "tokens": tokens, "ner_tags": ner_tags}
|
441 |
+
break
|
442 |
|
443 |
if self.config.name == "dp":
|
444 |
+
for path, f in files:
|
445 |
+
if path == data_file:
|
446 |
+
f = (line.decode("utf-8") for line in f)
|
447 |
+
reader = csv.reader(f, delimiter="\t", quoting=csv.QUOTE_NONE)
|
448 |
+
for _ in range(5): # skip headers
|
449 |
+
next(reader)
|
450 |
+
id_ = -1
|
451 |
+
for row in reader:
|
452 |
+
if row:
|
453 |
+
if row[0].startswith("##"):
|
454 |
+
id_ += 1
|
455 |
+
index = []
|
456 |
+
word_form = []
|
457 |
+
lemma = []
|
458 |
+
pos = []
|
459 |
+
head = []
|
460 |
+
deprel = []
|
461 |
+
sentence = row[1]
|
462 |
+
else:
|
463 |
+
index.append(row[0])
|
464 |
+
word_form.append(row[1])
|
465 |
+
lemma.append(row[2])
|
466 |
+
pos.append(row[3])
|
467 |
+
head.append(row[4])
|
468 |
+
deprel.append(row[5])
|
469 |
+
else: # new line
|
470 |
+
assert len(index) == len(word_form) == len(lemma) == len(pos) == len(head) == len(deprel)
|
471 |
+
yield id_, {
|
472 |
+
"sentence": sentence,
|
473 |
+
"index": index,
|
474 |
+
"word_form": word_form,
|
475 |
+
"lemma": lemma,
|
476 |
+
"pos": pos,
|
477 |
+
"head": head,
|
478 |
+
"deprel": deprel,
|
479 |
+
}
|
480 |
+
break
|
481 |
|
482 |
if self.config.name == "mrc":
|
483 |
+
for path, f in files:
|
484 |
+
if path == data_file:
|
485 |
+
f = json.load(f)
|
486 |
+
id_ = -1
|
487 |
+
for example in f["data"]:
|
488 |
+
title = example.get("title", "")
|
489 |
+
news_category = example.get("news_category", "")
|
490 |
+
source = example["source"]
|
491 |
+
for paragraph in example["paragraphs"]:
|
492 |
+
context = paragraph["context"].strip()
|
493 |
+
for qa in paragraph["qas"]:
|
494 |
+
guid = qa["guid"]
|
495 |
+
question_type = qa["question_type"]
|
496 |
+
is_impossible = qa["is_impossible"]
|
497 |
+
question = qa["question"].strip()
|
498 |
|
499 |
+
if "plausible_answers" in qa:
|
500 |
+
qa["answers"].extend(qa["plausible_answers"])
|
501 |
+
answer_starts = [answer["answer_start"] for answer in qa["answers"]]
|
502 |
+
answers = [answer["text"].strip() for answer in qa["answers"]]
|
503 |
+
id_ += 1
|
504 |
|
505 |
+
yield id_, {
|
506 |
+
"guid": guid,
|
507 |
+
"title": title,
|
508 |
+
"context": context,
|
509 |
+
"news_category": news_category,
|
510 |
+
"source": source,
|
511 |
+
"question_type": question_type,
|
512 |
+
"is_impossible": is_impossible,
|
513 |
+
"question": question,
|
514 |
+
"answers": {
|
515 |
+
"answer_start": answer_starts,
|
516 |
+
"text": answers,
|
517 |
+
},
|
518 |
+
}
|
519 |
+
break
|
520 |
|
521 |
if self.config.name == "wos":
|
522 |
+
for path, f in files:
|
523 |
+
if path == data_file:
|
524 |
+
f = json.load(f)
|
525 |
+
for id_, row in enumerate(f):
|
526 |
+
guid = row["guid"]
|
527 |
+
domains = row["domains"]
|
528 |
+
dialogue = row["dialogue"]
|
529 |
+
for utterance in dialogue:
|
530 |
+
if "state" not in utterance:
|
531 |
+
utterance["state"] = []
|
532 |
+
yield id_, {"guid": guid, "domains": domains, "dialogue": dialogue}
|
533 |
+
break
|