albertvillanova HF staff commited on
Commit
53bd002
1 Parent(s): fff9c79

Support streaming xtreme dataset for udpos config (#4131)

Browse files

* Support streaming xtreme dataset for udpos config

* Fix style

Commit from https://github.com/huggingface/datasets/commit/f87f79a864775d8f9e779073d8bace6c66230f60

Files changed (1) hide show
  1. xtreme.py +83 -135
xtreme.py CHANGED
@@ -2,7 +2,6 @@
2
 
3
 
4
  import csv
5
- import glob
6
  import json
7
  import os
8
  import textwrap
@@ -477,34 +476,7 @@ class Xtreme(datasets.GeneratorBasedBuilder):
477
  features["gold_label"] = datasets.Value("string")
478
 
479
  if self.config.name.startswith("udpos"):
480
- features = datasets.Features(
481
- {
482
- "tokens": datasets.Sequence(datasets.Value("string")),
483
- "pos_tags": datasets.Sequence(
484
- datasets.features.ClassLabel(
485
- names=[
486
- "ADJ",
487
- "ADP",
488
- "ADV",
489
- "AUX",
490
- "CCONJ",
491
- "DET",
492
- "INTJ",
493
- "NOUN",
494
- "NUM",
495
- "PART",
496
- "PRON",
497
- "PROPN",
498
- "PUNCT",
499
- "SCONJ",
500
- "SYM",
501
- "VERB",
502
- "X",
503
- ]
504
- )
505
- ),
506
- }
507
- )
508
 
509
  if self.config.name.startswith("PAN-X"):
510
  features = datasets.Features(
@@ -676,95 +648,7 @@ class Xtreme(datasets.GeneratorBasedBuilder):
676
  ),
677
  ]
678
  if self.config.name.startswith("udpos"):
679
- udpos_downloaded_files = dl_manager.download_and_extract(self.config.data_url)
680
- data_dir = os.path.join(udpos_downloaded_files, "ud-treebanks-v2.5")
681
-
682
- lang = self.config.name.split(".")[1]
683
- data_dir = os.path.join(data_dir, "*_" + lang + "*")
684
- folders = sorted(glob.glob(data_dir))
685
-
686
- if lang == "Kazakh":
687
- return [
688
- datasets.SplitGenerator(
689
- name=datasets.Split.TEST,
690
- # These kwargs will be passed to _generate_examples
691
- gen_kwargs={
692
- "filepath": [
693
- os.path.join(folder, file)
694
- for folder in folders
695
- for file in sorted(os.listdir(folder))
696
- if "test" in file and file.endswith(".conllu")
697
- ]
698
- },
699
- ),
700
- datasets.SplitGenerator(
701
- name=datasets.Split.TRAIN,
702
- # These kwargs will be passed to _generate_examples
703
- gen_kwargs={
704
- "filepath": [
705
- os.path.join(folder, file)
706
- for folder in folders
707
- for file in sorted(os.listdir(folder))
708
- if "train" in file and file.endswith(".conllu")
709
- ]
710
- },
711
- ),
712
- ]
713
- elif lang == "Tagalog" or lang == "Thai" or lang == "Yoruba":
714
- return [
715
- datasets.SplitGenerator(
716
- name=datasets.Split.TEST,
717
- # These kwargs will be passed to _generate_examples
718
- gen_kwargs={
719
- "filepath": [
720
- os.path.join(folder, file)
721
- for folder in folders
722
- for file in sorted(os.listdir(folder))
723
- if "test" in file and file.endswith(".conllu")
724
- ]
725
- },
726
- )
727
- ]
728
- else:
729
- return [
730
- # We exclude Arabic-NYUAD which does not contains any words, only _
731
- datasets.SplitGenerator(
732
- name=datasets.Split.VALIDATION,
733
- # These kwargs will be passed to _generate_examples
734
- gen_kwargs={
735
- "filepath": [
736
- os.path.join(folder, file)
737
- for folder in folders
738
- for file in sorted(os.listdir(folder))
739
- if "NYUAD" not in folder and "dev" in file and file.endswith(".conllu")
740
- ]
741
- },
742
- ),
743
- datasets.SplitGenerator(
744
- name=datasets.Split.TEST,
745
- # These kwargs will be passed to _generate_examples
746
- gen_kwargs={
747
- "filepath": [
748
- os.path.join(folder, file)
749
- for folder in folders
750
- for file in sorted(os.listdir(folder))
751
- if "NYUAD" not in folder and "test" in file and file.endswith(".conllu")
752
- ]
753
- },
754
- ),
755
- datasets.SplitGenerator(
756
- name=datasets.Split.TRAIN,
757
- # These kwargs will be passed to _generate_examples
758
- gen_kwargs={
759
- "filepath": [
760
- os.path.join(folder, file)
761
- for folder in folders
762
- for file in sorted(os.listdir(folder))
763
- if "NYUAD" not in folder and "train" in file and file.endswith(".conllu")
764
- ]
765
- },
766
- ),
767
- ]
768
 
769
  if self.config.name == "SQuAD":
770
 
@@ -808,7 +692,7 @@ class Xtreme(datasets.GeneratorBasedBuilder):
808
  ),
809
  ]
810
 
811
- def _generate_examples(self, filepath):
812
  """Yields examples."""
813
  # TODO(xtreme): Yields (key, example) tuples from the dataset
814
 
@@ -935,22 +819,7 @@ class Xtreme(datasets.GeneratorBasedBuilder):
935
  "target_lang": "eng",
936
  }
937
  if self.config.name.startswith("udpos"):
938
- for id_file, file in enumerate(filepath):
939
- with open(file, encoding="utf-8") as f:
940
- data = csv.reader(f, delimiter="\t", quoting=csv.QUOTE_NONE)
941
- tokens = []
942
- pos_tags = []
943
- for id_row, row in enumerate(data):
944
- if len(row) >= 10 and row[1] != "_" and row[3] != "_":
945
- tokens.append(row[1])
946
- pos_tags.append(row[3])
947
- if len(row) == 0 and len(tokens) > 0:
948
- yield str(id_file) + "_" + str(id_row), {
949
- "tokens": tokens,
950
- "pos_tags": pos_tags,
951
- }
952
- tokens = []
953
- pos_tags = []
954
  if self.config.name.startswith("PAN-X"):
955
  guid_index = 1
956
  with open(filepath, encoding="utf-8") as f:
@@ -986,3 +855,82 @@ class Xtreme(datasets.GeneratorBasedBuilder):
986
  "ner_tags": ner_tags,
987
  "langs": langs,
988
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2
 
3
 
4
  import csv
 
5
  import json
6
  import os
7
  import textwrap
 
476
  features["gold_label"] = datasets.Value("string")
477
 
478
  if self.config.name.startswith("udpos"):
479
+ features = UdposParser.features
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
480
 
481
  if self.config.name.startswith("PAN-X"):
482
  features = datasets.Features(
 
648
  ),
649
  ]
650
  if self.config.name.startswith("udpos"):
651
+ return UdposParser.split_generators(dl_manager=dl_manager, config=self.config)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
652
 
653
  if self.config.name == "SQuAD":
654
 
 
692
  ),
693
  ]
694
 
695
+ def _generate_examples(self, filepath=None, **kwargs):
696
  """Yields examples."""
697
  # TODO(xtreme): Yields (key, example) tuples from the dataset
698
 
 
819
  "target_lang": "eng",
820
  }
821
  if self.config.name.startswith("udpos"):
822
+ yield from UdposParser.generate_examples(config=self.config, filepath=filepath, **kwargs)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
823
  if self.config.name.startswith("PAN-X"):
824
  guid_index = 1
825
  with open(filepath, encoding="utf-8") as f:
 
855
  "ner_tags": ner_tags,
856
  "langs": langs,
857
  }
858
+
859
+
860
+ class UdposParser:
861
+
862
+ features = datasets.Features(
863
+ {
864
+ "tokens": datasets.Sequence(datasets.Value("string")),
865
+ "pos_tags": datasets.Sequence(
866
+ datasets.features.ClassLabel(
867
+ names=[
868
+ "ADJ",
869
+ "ADP",
870
+ "ADV",
871
+ "AUX",
872
+ "CCONJ",
873
+ "DET",
874
+ "INTJ",
875
+ "NOUN",
876
+ "NUM",
877
+ "PART",
878
+ "PRON",
879
+ "PROPN",
880
+ "PUNCT",
881
+ "SCONJ",
882
+ "SYM",
883
+ "VERB",
884
+ "X",
885
+ ]
886
+ )
887
+ ),
888
+ }
889
+ )
890
+
891
+ @staticmethod
892
+ def split_generators(dl_manager=None, config=None):
893
+ archive = dl_manager.download(config.data_url)
894
+ split_names = {datasets.Split.TRAIN: "train", datasets.Split.VALIDATION: "dev", datasets.Split.TEST: "test"}
895
+ split_generators = {
896
+ split: datasets.SplitGenerator(
897
+ name=split,
898
+ gen_kwargs={
899
+ "filepath": dl_manager.iter_archive(archive),
900
+ "split": split_names[split],
901
+ },
902
+ )
903
+ for split in split_names
904
+ }
905
+ lang = config.name.split(".")[1]
906
+ if lang in ["Tagalog", "Thai", "Yoruba"]:
907
+ return [split_generators["test"]]
908
+ elif lang == "Kazakh":
909
+ return [split_generators["train"], split_generators["test"]]
910
+ else:
911
+ return [split_generators["train"], split_generators["validation"], split_generators["test"]]
912
+
913
+ @staticmethod
914
+ def generate_examples(config=None, filepath=None, split=None):
915
+ lang = config.name.split(".")[1]
916
+ idx = 0
917
+ for path, file in filepath:
918
+ if f"_{lang}" in path and split in path and path.endswith(".conllu"):
919
+ # For lang other than [see below], we exclude Arabic-NYUAD which does not contains any words, only _
920
+ if lang in ["Kazakh", "Tagalog", "Thai", "Yoruba"] or "NYUAD" not in path:
921
+ lines = (line.decode("utf-8") for line in file)
922
+ data = csv.reader(lines, delimiter="\t", quoting=csv.QUOTE_NONE)
923
+ tokens = []
924
+ pos_tags = []
925
+ for id_row, row in enumerate(data):
926
+ if len(row) >= 10 and row[1] != "_" and row[3] != "_":
927
+ tokens.append(row[1])
928
+ pos_tags.append(row[3])
929
+ if len(row) == 0 and len(tokens) > 0:
930
+ yield idx, {
931
+ "tokens": tokens,
932
+ "pos_tags": pos_tags,
933
+ }
934
+ idx += 1
935
+ tokens = []
936
+ pos_tags = []