system HF staff commited on
Commit
2851143
1 Parent(s): 9a3f635

Update files from the datasets library (from 1.16.0)

Browse files

Release notes: https://github.com/huggingface/datasets/releases/tag/1.16.0

Files changed (3) hide show
  1. README.md +1 -0
  2. dataset_infos.json +0 -0
  3. indic_glue.py +205 -147
README.md CHANGED
@@ -1,4 +1,5 @@
1
  ---
 
2
  paperswithcode_id: null
3
  ---
4
 
 
1
  ---
2
+ pretty_name: IndicGLUE
3
  paperswithcode_id: null
4
  ---
5
 
dataset_infos.json CHANGED
The diff for this file is too large to render. See raw diff
 
indic_glue.py CHANGED
@@ -3,7 +3,6 @@
3
 
4
  import csv
5
  import json
6
- import os
7
  import textwrap
8
 
9
  import pandas as pd
@@ -515,137 +514,150 @@ class IndicGlue(datasets.GeneratorBasedBuilder):
515
  def _split_generators(self, dl_manager):
516
 
517
  if self.config.name.startswith("wnli"):
518
- dl_dir = dl_manager.download_and_extract(self.config.data_url)
519
  task_name = self._get_task_name_from_data_url(self.config.data_url)
520
- dl_dir = os.path.join(dl_dir, task_name + "/" + self.config.name.split(".")[1])
521
  return [
522
  datasets.SplitGenerator(
523
  name=datasets.Split.TRAIN,
524
  gen_kwargs={
525
- "datafile": os.path.join(dl_dir, "train.csv"),
526
  "split": datasets.Split.TRAIN,
527
  "key": "train-split",
 
528
  },
529
  ),
530
  datasets.SplitGenerator(
531
  name=datasets.Split.VALIDATION,
532
  gen_kwargs={
533
- "datafile": os.path.join(dl_dir, "dev.csv"),
534
  "split": datasets.Split.VALIDATION,
535
  "key": "val-split",
 
536
  },
537
  ),
538
  datasets.SplitGenerator(
539
  name=datasets.Split.TEST,
540
  gen_kwargs={
541
- "datafile": os.path.join(dl_dir, "test.csv"),
542
  "split": datasets.Split.TEST,
543
  "key": "test-split",
 
544
  },
545
  ),
546
  ]
547
 
548
  if self.config.name.startswith("copa"):
549
- dl_dir = dl_manager.download_and_extract(self.config.data_url)
550
  task_name = self._get_task_name_from_data_url(self.config.data_url)
551
- dl_dir = os.path.join(dl_dir, task_name + "/" + self.config.name.split(".")[1])
552
 
553
  return [
554
  datasets.SplitGenerator(
555
  name=datasets.Split.TRAIN,
556
  gen_kwargs={
557
- "datafile": os.path.join(dl_dir, "train.jsonl"),
558
  "split": datasets.Split.TRAIN,
559
  "key": "train-split",
 
560
  },
561
  ),
562
  datasets.SplitGenerator(
563
  name=datasets.Split.VALIDATION,
564
  gen_kwargs={
565
- "datafile": os.path.join(dl_dir, "val.jsonl"),
566
  "split": datasets.Split.VALIDATION,
567
  "key": "val-split",
 
568
  },
569
  ),
570
  datasets.SplitGenerator(
571
  name=datasets.Split.TEST,
572
  gen_kwargs={
573
- "datafile": os.path.join(dl_dir, "test.jsonl"),
574
  "split": datasets.Split.TEST,
575
  "key": "test-split",
 
576
  },
577
  ),
578
  ]
579
 
580
  if self.config.name.startswith("sna"):
581
- dl_dir = dl_manager.download_and_extract(self.config.data_url)
582
  task_name = self._get_task_name_from_data_url(self.config.data_url)
583
- dl_dir = os.path.join(dl_dir, task_name + "/" + self.config.name.split(".")[1])
584
 
585
  return [
586
  datasets.SplitGenerator(
587
  name=datasets.Split.TRAIN,
588
  gen_kwargs={
589
- "datafile": os.path.join(dl_dir, "bn-train.csv"),
590
  "split": datasets.Split.TRAIN,
 
591
  },
592
  ),
593
  datasets.SplitGenerator(
594
  name=datasets.Split.VALIDATION,
595
  gen_kwargs={
596
- "datafile": os.path.join(dl_dir, "bn-valid.csv"),
597
  "split": datasets.Split.VALIDATION,
 
598
  },
599
  ),
600
  datasets.SplitGenerator(
601
  name=datasets.Split.TEST,
602
  gen_kwargs={
603
- "datafile": os.path.join(dl_dir, "bn-test.csv"),
604
  "split": datasets.Split.TEST,
 
605
  },
606
  ),
607
  ]
608
 
609
  if self.config.name.startswith("csqa"):
610
- dl_dir = dl_manager.download_and_extract(self.config.data_url)
611
  task_name = self._get_task_name_from_data_url(self.config.data_url)
612
- dl_dir = os.path.join(dl_dir, task_name)
613
 
614
  return [
615
  datasets.SplitGenerator(
616
  name=datasets.Split.TEST,
617
  gen_kwargs={
618
- "datafile": os.path.join(dl_dir, f"{self.config.name.split('.')[1]}.json"),
619
  "split": datasets.Split.TEST,
 
620
  },
621
  )
622
  ]
623
 
624
  if self.config.name.startswith("wstp"):
625
- dl_dir = dl_manager.download_and_extract(self.config.data_url)
626
  task_name = self._get_task_name_from_data_url(self.config.data_url)
627
- dl_dir = os.path.join(dl_dir, task_name + "/" + self.config.name.split(".")[1])
628
 
629
  return [
630
  datasets.SplitGenerator(
631
  name=datasets.Split.TRAIN,
632
  gen_kwargs={
633
- "datafile": os.path.join(dl_dir, f"{self.config.name.split('.')[1]}-train.json"),
634
  "split": datasets.Split.TRAIN,
 
635
  },
636
  ),
637
  datasets.SplitGenerator(
638
  name=datasets.Split.VALIDATION,
639
  gen_kwargs={
640
- "datafile": os.path.join(dl_dir, f"{self.config.name.split('.')[1]}-valid.json"),
641
  "split": datasets.Split.VALIDATION,
 
642
  },
643
  ),
644
  datasets.SplitGenerator(
645
  name=datasets.Split.TEST,
646
  gen_kwargs={
647
- "datafile": os.path.join(dl_dir, f"{self.config.name.split('.')[1]}-test.json"),
648
  "split": datasets.Split.TEST,
 
649
  },
650
  ),
651
  ]
@@ -655,127 +667,139 @@ class IndicGlue(datasets.GeneratorBasedBuilder):
655
  or self.config.name.startswith("iitp")
656
  or self.config.name.startswith("actsa")
657
  ):
658
- dl_dir = dl_manager.download_and_extract(self.config.data_url)
659
  task_name = self._get_task_name_from_data_url(self.config.data_url)
660
- dl_dir = os.path.join(dl_dir, task_name + "/" + self.config.name.split(".")[1])
661
 
662
  return [
663
  datasets.SplitGenerator(
664
  name=datasets.Split.TRAIN,
665
  gen_kwargs={
666
- "datafile": os.path.join(dl_dir, f"{self.config.name.split('.')[1]}-train.csv"),
667
  "split": datasets.Split.TRAIN,
 
668
  },
669
  ),
670
  datasets.SplitGenerator(
671
  name=datasets.Split.VALIDATION,
672
  gen_kwargs={
673
- "datafile": os.path.join(dl_dir, f"{self.config.name.split('.')[1]}-valid.csv"),
674
  "split": datasets.Split.VALIDATION,
 
675
  },
676
  ),
677
  datasets.SplitGenerator(
678
  name=datasets.Split.TEST,
679
  gen_kwargs={
680
- "datafile": os.path.join(dl_dir, f"{self.config.name.split('.')[1]}-test.csv"),
681
  "split": datasets.Split.TEST,
 
682
  },
683
  ),
684
  ]
685
 
686
  if self.config.name.startswith("bbca"):
687
- dl_dir = dl_manager.download_and_extract(self.config.data_url)
688
  task_name = self._get_task_name_from_data_url(self.config.data_url)
689
- dl_dir = os.path.join(dl_dir, task_name + "/" + self.config.name.split(".")[1])
690
 
691
  return [
692
  datasets.SplitGenerator(
693
  name=datasets.Split.TRAIN,
694
  gen_kwargs={
695
- "datafile": os.path.join(dl_dir, f"{self.config.name.split('.')[1]}-train.csv"),
696
  "split": datasets.Split.TRAIN,
 
697
  },
698
  ),
699
  datasets.SplitGenerator(
700
  name=datasets.Split.TEST,
701
  gen_kwargs={
702
- "datafile": os.path.join(dl_dir, f"{self.config.name.split('.')[1]}-test.csv"),
703
  "split": datasets.Split.TEST,
 
704
  },
705
  ),
706
  ]
707
 
708
  if self.config.name.startswith("cvit"):
709
- dl_dir = dl_manager.download_and_extract(self.config.data_url)
710
  task_name = self._get_task_name_from_data_url(self.config.data_url)
711
- dl_dir = os.path.join(dl_dir, task_name + "/" + self.config.name.split(".")[1])
712
 
713
  return [
714
  datasets.SplitGenerator(
715
  name=datasets.Split.TEST,
716
  gen_kwargs={
717
  "datafile": None,
718
- "src": os.path.join(dl_dir, f"mkb.{self.config.name.split('.')[1].split('-')[0]}"),
719
- "tgt": os.path.join(dl_dir, f"mkb.{self.config.name.split('.')[1].split('-')[1]}"),
720
  "split": datasets.Split.TEST,
 
721
  },
722
  )
723
  ]
724
 
725
  if self.config.name.startswith("md"):
726
- dl_dir = dl_manager.download_and_extract(self.config.data_url)
727
  task_name = self._get_task_name_from_data_url(self.config.data_url)
728
- dl_dir = os.path.join(dl_dir, task_name + "/" + self.config.name.split(".")[1])
729
 
730
  return [
731
  datasets.SplitGenerator(
732
  name=datasets.Split.TRAIN,
733
  gen_kwargs={
734
- "datafile": os.path.join(dl_dir, "train.json"),
735
  "split": datasets.Split.TRAIN,
 
736
  },
737
  ),
738
  datasets.SplitGenerator(
739
  name=datasets.Split.VALIDATION,
740
  gen_kwargs={
741
- "datafile": os.path.join(dl_dir, "val.json"),
742
  "split": datasets.Split.VALIDATION,
 
743
  },
744
  ),
745
  datasets.SplitGenerator(
746
  name=datasets.Split.TEST,
747
  gen_kwargs={
748
- "datafile": os.path.join(dl_dir, "test.json"),
749
  "split": datasets.Split.TEST,
 
750
  },
751
  ),
752
  ]
753
 
754
  if self.config.name.startswith("wiki-ner"):
755
- dl_dir = dl_manager.download_and_extract(self.config.data_url)
756
  task_name = self._get_task_name_from_data_url(self.config.data_url)
757
- dl_dir = os.path.join(dl_dir, task_name + "/" + self.config.name.split(".")[1])
758
 
759
  return [
760
  datasets.SplitGenerator(
761
  name=datasets.Split.TRAIN,
762
  gen_kwargs={
763
- "datafile": os.path.join(dl_dir, f"{self.config.name.split('.')[1]}-train.txt"),
764
  "split": datasets.Split.TRAIN,
 
765
  },
766
  ),
767
  datasets.SplitGenerator(
768
  name=datasets.Split.VALIDATION,
769
  gen_kwargs={
770
- "datafile": os.path.join(dl_dir, f"{self.config.name.split('.')[1]}-valid.txt"),
771
  "split": datasets.Split.VALIDATION,
 
772
  },
773
  ),
774
  datasets.SplitGenerator(
775
  name=datasets.Split.TEST,
776
  gen_kwargs={
777
- "datafile": os.path.join(dl_dir, f"{self.config.name.split('.')[1]}-test.txt"),
778
  "split": datasets.Split.TEST,
 
779
  },
780
  ),
781
  ]
@@ -783,139 +807,173 @@ class IndicGlue(datasets.GeneratorBasedBuilder):
783
  def _generate_examples(self, **args):
784
  """Yields examples."""
785
  filepath = args["datafile"]
 
786
 
787
  if self.config.name.startswith("wnli"):
788
  if args["key"] == "test-split":
789
- with open(filepath, encoding="utf-8") as f:
790
- data = csv.DictReader(f)
791
- for id_, row in enumerate(data):
792
- yield id_, {"hypothesis": row["sentence1"], "premise": row["sentence2"], "label": "None"}
 
 
793
  else:
794
- with open(filepath, encoding="utf-8") as f:
795
- data = csv.DictReader(f)
796
- for id_, row in enumerate(data):
797
- label = "entailment" if row["label"] else "not_entailment"
798
- yield id_, {
799
- "hypothesis": row["sentence1"],
800
- "premise": row["sentence2"],
801
- "label": label,
802
- }
 
 
803
 
804
  if self.config.name.startswith("copa"):
805
  if args["key"] == "test-split":
806
- with open(filepath, "r", encoding="utf-8") as f:
807
- lines = f.readlines()
808
- data = map(lambda l: json.loads(l), lines)
809
- data = list(data)
810
- for id_, row in enumerate(data):
811
- yield id_, {
812
- "premise": row["premise"],
813
- "choice1": row["choice1"],
814
- "choice2": row["choice2"],
815
- "question": row["question"],
816
- "label": 0,
817
- }
 
 
818
  else:
819
- with open(filepath, "r", encoding="utf-8") as f:
820
- lines = f.readlines()
821
- data = map(lambda l: json.loads(l), lines)
822
- data = list(data)
823
- for id_, row in enumerate(data):
824
- yield id_, {
825
- "premise": row["premise"],
826
- "choice1": row["choice1"],
827
- "choice2": row["choice2"],
828
- "question": row["question"],
829
- "label": row["label"],
830
- }
 
 
831
 
832
  if self.config.name.startswith("sna"):
833
- df = pd.read_csv(filepath, names=["label", "text"])
834
- for id_, row in df.iterrows():
835
- yield id_, {"text": row["text"], "label": row["label"]}
 
 
 
836
 
837
  if self.config.name.startswith("csqa"):
838
- with open(filepath, encoding="utf-8") as f:
839
- data = json.load(f)
840
- df = pd.DataFrame(data["cloze_data"])
841
- df["out_of_context_options"].loc[df["out_of_context_options"].isnull()] = (
842
- df["out_of_context_options"].loc[df["out_of_context_options"].isnull()].apply(lambda x: [])
843
- )
844
- for id_, row in df.iterrows():
845
- yield id_, {
846
- "question": row["question"],
847
- "answer": row["answer"],
848
- "category": row["category"],
849
- "title": row["title"],
850
- "out_of_context_options": row["out_of_context_options"],
851
- "options": row["options"],
852
- }
 
 
853
 
854
  if self.config.name.startswith("wstp"):
855
- df = pd.read_json(filepath)
856
- for id_, row in df.iterrows():
857
- yield id_, {
858
- "sectionText": row["sectionText"],
859
- "correctTitle": row["correctTitle"],
860
- "titleA": row["titleA"],
861
- "titleB": row["titleB"],
862
- "titleC": row["titleC"],
863
- "titleD": row["titleD"],
864
- "url": row["url"],
865
- }
 
 
 
866
 
867
  if (
868
  self.config.name.startswith("inltkh")
869
  or self.config.name.startswith("bbca")
870
  or self.config.name.startswith("iitp")
871
  ):
872
- df = pd.read_csv(filepath, names=["label", "text"])
873
- for id_, row in df.iterrows():
874
- yield id_, {"text": row["text"], "label": row["label"]}
 
 
 
875
 
876
  if self.config.name.startswith("actsa"):
877
- df = pd.read_csv(filepath, names=["label", "text"])
878
- for id_, row in df.iterrows():
879
- label = "positive" if row["label"] else "negative"
880
- yield id_, {"text": row["text"], "label": label}
 
 
 
881
 
882
  if self.config.name.startswith("cvit"):
883
  source = args["src"]
884
  target = args["tgt"]
885
-
886
- src, tgt = open(source, "r", encoding="utf-8"), open(target, "r", encoding="utf-8")
887
- src, tgt = src.readlines(), tgt.readlines()
888
-
889
- for id_, row in enumerate(zip(src, tgt)):
890
- yield id_, {"sentence1": row[0], "sentence2": row[1]}
 
 
 
 
891
 
892
  if self.config.name.startswith("md"):
893
- df = pd.read_json(filepath)
894
- for id_, row in df.iterrows():
895
- yield id_, {
896
- "story_number": row["Story_no"],
897
- "sentence": row["Sentence"],
898
- "discourse_mode": row["Discourse Mode"],
899
- "id": row["id"],
900
- }
 
 
 
901
 
902
  if self.config.name.startswith("wiki-ner"):
903
- with open(filepath, "r", encoding="utf-8") as f:
904
- data = f.readlines()
905
- for id_, row in enumerate(data):
906
  tokens = []
907
  labels = []
908
  infos = []
909
-
910
- row = row.split()
911
-
912
- if len(row) == 0:
913
- yield id_, {"tokens": tokens, "ner_tags": labels, "additional_info": infos}
914
- continue
915
-
916
- tokens.append(row[0])
917
- labels.append(row[-1])
918
- infos.append(row[1:-1])
 
 
 
 
919
 
920
  def _get_task_name_from_data_url(self, data_url):
921
  return data_url.split("/")[-1].split(".")[0]
 
3
 
4
  import csv
5
  import json
 
6
  import textwrap
7
 
8
  import pandas as pd
 
514
  def _split_generators(self, dl_manager):
515
 
516
  if self.config.name.startswith("wnli"):
517
+ archive = dl_manager.download(self.config.data_url)
518
  task_name = self._get_task_name_from_data_url(self.config.data_url)
519
+ dl_dir = task_name + "/" + self.config.name.split(".")[1]
520
  return [
521
  datasets.SplitGenerator(
522
  name=datasets.Split.TRAIN,
523
  gen_kwargs={
524
+ "datafile": dl_dir + "/" + "train.csv",
525
  "split": datasets.Split.TRAIN,
526
  "key": "train-split",
527
+ "files": dl_manager.iter_archive(archive),
528
  },
529
  ),
530
  datasets.SplitGenerator(
531
  name=datasets.Split.VALIDATION,
532
  gen_kwargs={
533
+ "datafile": dl_dir + "/" + "dev.csv",
534
  "split": datasets.Split.VALIDATION,
535
  "key": "val-split",
536
+ "files": dl_manager.iter_archive(archive),
537
  },
538
  ),
539
  datasets.SplitGenerator(
540
  name=datasets.Split.TEST,
541
  gen_kwargs={
542
+ "datafile": dl_dir + "/" + "test.csv",
543
  "split": datasets.Split.TEST,
544
  "key": "test-split",
545
+ "files": dl_manager.iter_archive(archive),
546
  },
547
  ),
548
  ]
549
 
550
  if self.config.name.startswith("copa"):
551
+ archive = dl_manager.download(self.config.data_url)
552
  task_name = self._get_task_name_from_data_url(self.config.data_url)
553
+ dl_dir = task_name + "/" + self.config.name.split(".")[1]
554
 
555
  return [
556
  datasets.SplitGenerator(
557
  name=datasets.Split.TRAIN,
558
  gen_kwargs={
559
+ "datafile": dl_dir + "/" + "train.jsonl",
560
  "split": datasets.Split.TRAIN,
561
  "key": "train-split",
562
+ "files": dl_manager.iter_archive(archive),
563
  },
564
  ),
565
  datasets.SplitGenerator(
566
  name=datasets.Split.VALIDATION,
567
  gen_kwargs={
568
+ "datafile": dl_dir + "/" + "val.jsonl",
569
  "split": datasets.Split.VALIDATION,
570
  "key": "val-split",
571
+ "files": dl_manager.iter_archive(archive),
572
  },
573
  ),
574
  datasets.SplitGenerator(
575
  name=datasets.Split.TEST,
576
  gen_kwargs={
577
+ "datafile": dl_dir + "/" + "test.jsonl",
578
  "split": datasets.Split.TEST,
579
  "key": "test-split",
580
+ "files": dl_manager.iter_archive(archive),
581
  },
582
  ),
583
  ]
584
 
585
  if self.config.name.startswith("sna"):
586
+ archive = dl_manager.download(self.config.data_url)
587
  task_name = self._get_task_name_from_data_url(self.config.data_url)
588
+ dl_dir = task_name + "/" + self.config.name.split(".")[1]
589
 
590
  return [
591
  datasets.SplitGenerator(
592
  name=datasets.Split.TRAIN,
593
  gen_kwargs={
594
+ "datafile": dl_dir + "/" + "bn-train.csv",
595
  "split": datasets.Split.TRAIN,
596
+ "files": dl_manager.iter_archive(archive),
597
  },
598
  ),
599
  datasets.SplitGenerator(
600
  name=datasets.Split.VALIDATION,
601
  gen_kwargs={
602
+ "datafile": dl_dir + "/" + "bn-valid.csv",
603
  "split": datasets.Split.VALIDATION,
604
+ "files": dl_manager.iter_archive(archive),
605
  },
606
  ),
607
  datasets.SplitGenerator(
608
  name=datasets.Split.TEST,
609
  gen_kwargs={
610
+ "datafile": dl_dir + "/" + "bn-test.csv",
611
  "split": datasets.Split.TEST,
612
+ "files": dl_manager.iter_archive(archive),
613
  },
614
  ),
615
  ]
616
 
617
  if self.config.name.startswith("csqa"):
618
+ archive = dl_manager.download(self.config.data_url)
619
  task_name = self._get_task_name_from_data_url(self.config.data_url)
620
+ dl_dir = task_name
621
 
622
  return [
623
  datasets.SplitGenerator(
624
  name=datasets.Split.TEST,
625
  gen_kwargs={
626
+ "datafile": dl_dir + "/" + f"{self.config.name.split('.')[1]}.json",
627
  "split": datasets.Split.TEST,
628
+ "files": dl_manager.iter_archive(archive),
629
  },
630
  )
631
  ]
632
 
633
  if self.config.name.startswith("wstp"):
634
+ archive = dl_manager.download(self.config.data_url)
635
  task_name = self._get_task_name_from_data_url(self.config.data_url)
636
+ dl_dir = task_name + "/" + self.config.name.split(".")[1]
637
 
638
  return [
639
  datasets.SplitGenerator(
640
  name=datasets.Split.TRAIN,
641
  gen_kwargs={
642
+ "datafile": dl_dir + "/" + f"{self.config.name.split('.')[1]}-train.json",
643
  "split": datasets.Split.TRAIN,
644
+ "files": dl_manager.iter_archive(archive),
645
  },
646
  ),
647
  datasets.SplitGenerator(
648
  name=datasets.Split.VALIDATION,
649
  gen_kwargs={
650
+ "datafile": dl_dir + "/" + f"{self.config.name.split('.')[1]}-valid.json",
651
  "split": datasets.Split.VALIDATION,
652
+ "files": dl_manager.iter_archive(archive),
653
  },
654
  ),
655
  datasets.SplitGenerator(
656
  name=datasets.Split.TEST,
657
  gen_kwargs={
658
+ "datafile": dl_dir + "/" + f"{self.config.name.split('.')[1]}-test.json",
659
  "split": datasets.Split.TEST,
660
+ "files": dl_manager.iter_archive(archive),
661
  },
662
  ),
663
  ]
 
667
  or self.config.name.startswith("iitp")
668
  or self.config.name.startswith("actsa")
669
  ):
670
+ archive = dl_manager.download(self.config.data_url)
671
  task_name = self._get_task_name_from_data_url(self.config.data_url)
672
+ dl_dir = task_name + "/" + self.config.name.split(".")[1]
673
 
674
  return [
675
  datasets.SplitGenerator(
676
  name=datasets.Split.TRAIN,
677
  gen_kwargs={
678
+ "datafile": dl_dir + "/" + f"{self.config.name.split('.')[1]}-train.csv",
679
  "split": datasets.Split.TRAIN,
680
+ "files": dl_manager.iter_archive(archive),
681
  },
682
  ),
683
  datasets.SplitGenerator(
684
  name=datasets.Split.VALIDATION,
685
  gen_kwargs={
686
+ "datafile": dl_dir + "/" + f"{self.config.name.split('.')[1]}-valid.csv",
687
  "split": datasets.Split.VALIDATION,
688
+ "files": dl_manager.iter_archive(archive),
689
  },
690
  ),
691
  datasets.SplitGenerator(
692
  name=datasets.Split.TEST,
693
  gen_kwargs={
694
+ "datafile": dl_dir + "/" + f"{self.config.name.split('.')[1]}-test.csv",
695
  "split": datasets.Split.TEST,
696
+ "files": dl_manager.iter_archive(archive),
697
  },
698
  ),
699
  ]
700
 
701
  if self.config.name.startswith("bbca"):
702
+ archive = dl_manager.download(self.config.data_url)
703
  task_name = self._get_task_name_from_data_url(self.config.data_url)
704
+ dl_dir = task_name + "/" + self.config.name.split(".")[1]
705
 
706
  return [
707
  datasets.SplitGenerator(
708
  name=datasets.Split.TRAIN,
709
  gen_kwargs={
710
+ "datafile": dl_dir + "/" + f"{self.config.name.split('.')[1]}-train.csv",
711
  "split": datasets.Split.TRAIN,
712
+ "files": dl_manager.iter_archive(archive),
713
  },
714
  ),
715
  datasets.SplitGenerator(
716
  name=datasets.Split.TEST,
717
  gen_kwargs={
718
+ "datafile": dl_dir + "/" + f"{self.config.name.split('.')[1]}-test.csv",
719
  "split": datasets.Split.TEST,
720
+ "files": dl_manager.iter_archive(archive),
721
  },
722
  ),
723
  ]
724
 
725
  if self.config.name.startswith("cvit"):
726
+ archive = dl_manager.download(self.config.data_url)
727
  task_name = self._get_task_name_from_data_url(self.config.data_url)
728
+ dl_dir = task_name + "/" + self.config.name.split(".")[1]
729
 
730
  return [
731
  datasets.SplitGenerator(
732
  name=datasets.Split.TEST,
733
  gen_kwargs={
734
  "datafile": None,
735
+ "src": dl_dir + "/" + f"mkb.{self.config.name.split('.')[1].split('-')[0]}",
736
+ "tgt": dl_dir + "/" + f"mkb.{self.config.name.split('.')[1].split('-')[1]}",
737
  "split": datasets.Split.TEST,
738
+ "files": dl_manager.iter_archive(archive),
739
  },
740
  )
741
  ]
742
 
743
  if self.config.name.startswith("md"):
744
+ archive = dl_manager.download(self.config.data_url)
745
  task_name = self._get_task_name_from_data_url(self.config.data_url)
746
+ dl_dir = task_name + "/" + self.config.name.split(".")[1]
747
 
748
  return [
749
  datasets.SplitGenerator(
750
  name=datasets.Split.TRAIN,
751
  gen_kwargs={
752
+ "datafile": dl_dir + "/" + "train.json",
753
  "split": datasets.Split.TRAIN,
754
+ "files": dl_manager.iter_archive(archive),
755
  },
756
  ),
757
  datasets.SplitGenerator(
758
  name=datasets.Split.VALIDATION,
759
  gen_kwargs={
760
+ "datafile": dl_dir + "/" + "val.json",
761
  "split": datasets.Split.VALIDATION,
762
+ "files": dl_manager.iter_archive(archive),
763
  },
764
  ),
765
  datasets.SplitGenerator(
766
  name=datasets.Split.TEST,
767
  gen_kwargs={
768
+ "datafile": dl_dir + "/" + "test.json",
769
  "split": datasets.Split.TEST,
770
+ "files": dl_manager.iter_archive(archive),
771
  },
772
  ),
773
  ]
774
 
775
  if self.config.name.startswith("wiki-ner"):
776
+ archive = dl_manager.download(self.config.data_url)
777
  task_name = self._get_task_name_from_data_url(self.config.data_url)
778
+ dl_dir = task_name + "/" + self.config.name.split(".")[1]
779
 
780
  return [
781
  datasets.SplitGenerator(
782
  name=datasets.Split.TRAIN,
783
  gen_kwargs={
784
+ "datafile": dl_dir + "/" + f"{self.config.name.split('.')[1]}-train.txt",
785
  "split": datasets.Split.TRAIN,
786
+ "files": dl_manager.iter_archive(archive),
787
  },
788
  ),
789
  datasets.SplitGenerator(
790
  name=datasets.Split.VALIDATION,
791
  gen_kwargs={
792
+ "datafile": dl_dir + "/" + f"{self.config.name.split('.')[1]}-valid.txt",
793
  "split": datasets.Split.VALIDATION,
794
+ "files": dl_manager.iter_archive(archive),
795
  },
796
  ),
797
  datasets.SplitGenerator(
798
  name=datasets.Split.TEST,
799
  gen_kwargs={
800
+ "datafile": dl_dir + "/" + f"{self.config.name.split('.')[1]}-test.txt",
801
  "split": datasets.Split.TEST,
802
+ "files": dl_manager.iter_archive(archive),
803
  },
804
  ),
805
  ]
 
807
  def _generate_examples(self, **args):
808
  """Yields examples."""
809
  filepath = args["datafile"]
810
+ files = args["files"]
811
 
812
  if self.config.name.startswith("wnli"):
813
  if args["key"] == "test-split":
814
+ for path, f in files:
815
+ if path == filepath:
816
+ data = csv.DictReader((line.decode("utf-8") for line in f))
817
+ for id_, row in enumerate(data):
818
+ yield id_, {"hypothesis": row["sentence1"], "premise": row["sentence2"], "label": "None"}
819
+ break
820
  else:
821
+ for path, f in files:
822
+ if path == filepath:
823
+ data = csv.DictReader((line.decode("utf-8") for line in f))
824
+ for id_, row in enumerate(data):
825
+ label = "entailment" if row["label"] else "not_entailment"
826
+ yield id_, {
827
+ "hypothesis": row["sentence1"],
828
+ "premise": row["sentence2"],
829
+ "label": label,
830
+ }
831
+ break
832
 
833
  if self.config.name.startswith("copa"):
834
  if args["key"] == "test-split":
835
+ for path, f in files:
836
+ if path == filepath:
837
+ lines = f.readlines()
838
+ data = map(lambda l: json.loads(l), lines)
839
+ data = list(data)
840
+ for id_, row in enumerate(data):
841
+ yield id_, {
842
+ "premise": row["premise"],
843
+ "choice1": row["choice1"],
844
+ "choice2": row["choice2"],
845
+ "question": row["question"],
846
+ "label": 0,
847
+ }
848
+ break
849
  else:
850
+ for path, f in files:
851
+ if path == filepath:
852
+ lines = f.readlines()
853
+ data = map(lambda l: json.loads(l), lines)
854
+ data = list(data)
855
+ for id_, row in enumerate(data):
856
+ yield id_, {
857
+ "premise": row["premise"],
858
+ "choice1": row["choice1"],
859
+ "choice2": row["choice2"],
860
+ "question": row["question"],
861
+ "label": row["label"],
862
+ }
863
+ break
864
 
865
  if self.config.name.startswith("sna"):
866
+ for path, f in files:
867
+ if path == filepath:
868
+ df = pd.read_csv(f, names=["label", "text"])
869
+ for id_, row in df.iterrows():
870
+ yield id_, {"text": row["text"], "label": row["label"]}
871
+ break
872
 
873
  if self.config.name.startswith("csqa"):
874
+ for path, f in files:
875
+ if path == filepath:
876
+ data = json.load(f)
877
+ df = pd.DataFrame(data["cloze_data"])
878
+ df["out_of_context_options"].loc[df["out_of_context_options"].isnull()] = (
879
+ df["out_of_context_options"].loc[df["out_of_context_options"].isnull()].apply(lambda x: [])
880
+ )
881
+ for id_, row in df.iterrows():
882
+ yield id_, {
883
+ "question": row["question"],
884
+ "answer": row["answer"],
885
+ "category": row["category"],
886
+ "title": row["title"],
887
+ "out_of_context_options": row["out_of_context_options"],
888
+ "options": row["options"],
889
+ }
890
+ break
891
 
892
  if self.config.name.startswith("wstp"):
893
+ for path, f in files:
894
+ if path == filepath:
895
+ df = pd.read_json(f)
896
+ for id_, row in df.iterrows():
897
+ yield id_, {
898
+ "sectionText": row["sectionText"],
899
+ "correctTitle": row["correctTitle"],
900
+ "titleA": row["titleA"],
901
+ "titleB": row["titleB"],
902
+ "titleC": row["titleC"],
903
+ "titleD": row["titleD"],
904
+ "url": row["url"],
905
+ }
906
+ break
907
 
908
  if (
909
  self.config.name.startswith("inltkh")
910
  or self.config.name.startswith("bbca")
911
  or self.config.name.startswith("iitp")
912
  ):
913
+ for path, f in files:
914
+ if path == filepath:
915
+ df = pd.read_csv(f, names=["label", "text"])
916
+ for id_, row in df.iterrows():
917
+ yield id_, {"text": row["text"], "label": row["label"]}
918
+ break
919
 
920
  if self.config.name.startswith("actsa"):
921
+ for path, f in files:
922
+ if path == filepath:
923
+ df = pd.read_csv(f, names=["label", "text"])
924
+ for id_, row in df.iterrows():
925
+ label = "positive" if row["label"] else "negative"
926
+ yield id_, {"text": row["text"], "label": label}
927
+ break
928
 
929
  if self.config.name.startswith("cvit"):
930
  source = args["src"]
931
  target = args["tgt"]
932
+ src, tgt = None, None
933
+ for path, f in files:
934
+ if path == source:
935
+ src = f.read().decode("utf-8").splitlines()
936
+ elif path == target:
937
+ tgt = f.read().decode("utf-8").splitlines()
938
+ if src is not None and tgt is not None:
939
+ for id_, row in enumerate(zip(src, tgt)):
940
+ yield id_, {"sentence1": row[0], "sentence2": row[1]}
941
+ break
942
 
943
  if self.config.name.startswith("md"):
944
+ for path, f in files:
945
+ if path == filepath:
946
+ df = pd.read_json(f)
947
+ for id_, row in df.iterrows():
948
+ yield id_, {
949
+ "story_number": row["Story_no"],
950
+ "sentence": row["Sentence"],
951
+ "discourse_mode": row["Discourse Mode"],
952
+ "id": row["id"],
953
+ }
954
+ break
955
 
956
  if self.config.name.startswith("wiki-ner"):
957
+ for path, f in files:
958
+ if path == filepath:
959
+ data = f.read().decode("utf-8").splitlines()
960
  tokens = []
961
  labels = []
962
  infos = []
963
+ for id_, row in enumerate(data):
964
+ row = row.split()
965
+
966
+ if len(row) == 0:
967
+ yield id_, {"tokens": tokens, "ner_tags": labels, "additional_info": infos}
968
+ tokens = []
969
+ labels = []
970
+ infos = []
971
+ continue
972
+
973
+ tokens.append(row[0])
974
+ labels.append(row[-1])
975
+ infos.append(row[1:-1])
976
+ break
977
 
978
  def _get_task_name_from_data_url(self, data_url):
979
  return data_url.split("/")[-1].split(".")[0]