Datasets:

Multilinguality:
multilingual
Size Categories:
1K<n<10K
Annotations Creators:
crowdsourced
ArXiv:
Tags:
License:
albertvillanova HF staff commited on
Commit
f22aacf
1 Parent(s): 5a483a2

Add X-CSQA-ar data files

Browse files
README.md CHANGED
@@ -520,13 +520,13 @@ dataset_info:
520
  dtype: string
521
  splits:
522
  - name: test
523
- num_bytes: 288947
524
  num_examples: 1074
525
  - name: validation
526
- num_bytes: 273862
527
  num_examples: 1000
528
- download_size: 7519903
529
- dataset_size: 562809
530
  - config_name: X-CSQA-de
531
  features:
532
  - name: id
@@ -933,6 +933,12 @@ dataset_info:
933
  download_size: 207379
934
  dataset_size: 385717
935
  configs:
 
 
 
 
 
 
936
  - config_name: X-CSQA-de
937
  data_files:
938
  - split: test
 
520
  dtype: string
521
  splits:
522
  - name: test
523
+ num_bytes: 288645
524
  num_examples: 1074
525
  - name: validation
526
+ num_bytes: 273580
527
  num_examples: 1000
528
+ download_size: 255626
529
+ dataset_size: 562225
530
  - config_name: X-CSQA-de
531
  features:
532
  - name: id
 
933
  download_size: 207379
934
  dataset_size: 385717
935
  configs:
936
+ - config_name: X-CSQA-ar
937
+ data_files:
938
+ - split: test
939
+ path: X-CSQA-ar/test-*
940
+ - split: validation
941
+ path: X-CSQA-ar/validation-*
942
  - config_name: X-CSQA-de
943
  data_files:
944
  - split: test
X-CSQA-ar/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:05cd96edd9aef0f329a5e8dfa00d62cc174c00246fb6c071f8b6cc51dfa1bbed
3
+ size 132616
X-CSQA-ar/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:99ac89ded5832916fed7aa9f72426671bcc633e8d6753941430a2fe44c3ecaf6
3
+ size 123010
dataset_infos.json CHANGED
@@ -733,48 +733,38 @@
733
  "features": {
734
  "id": {
735
  "dtype": "string",
736
- "id": null,
737
  "_type": "Value"
738
  },
739
  "lang": {
740
  "dtype": "string",
741
- "id": null,
742
  "_type": "Value"
743
  },
744
  "question": {
745
  "stem": {
746
  "dtype": "string",
747
- "id": null,
748
  "_type": "Value"
749
  },
750
  "choices": {
751
  "feature": {
752
  "label": {
753
  "dtype": "string",
754
- "id": null,
755
  "_type": "Value"
756
  },
757
  "text": {
758
  "dtype": "string",
759
- "id": null,
760
  "_type": "Value"
761
  }
762
  },
763
- "length": -1,
764
- "id": null,
765
  "_type": "Sequence"
766
  }
767
  },
768
  "answerKey": {
769
  "dtype": "string",
770
- "id": null,
771
  "_type": "Value"
772
  }
773
  },
774
- "post_processed": null,
775
- "supervised_keys": null,
776
- "task_templates": null,
777
  "builder_name": "xcsr",
 
778
  "config_name": "X-CSQA-ar",
779
  "version": {
780
  "version_str": "1.1.0",
@@ -786,27 +776,20 @@
786
  "splits": {
787
  "test": {
788
  "name": "test",
789
- "num_bytes": 288947,
790
  "num_examples": 1074,
791
- "dataset_name": "xcsr"
792
  },
793
  "validation": {
794
  "name": "validation",
795
- "num_bytes": 273862,
796
  "num_examples": 1000,
797
- "dataset_name": "xcsr"
798
- }
799
- },
800
- "download_checksums": {
801
- "https://inklab.usc.edu/XCSR/xcsr_datasets.zip": {
802
- "num_bytes": 7519903,
803
- "checksum": "c45b29ece740643252d5402e76be1e33f96f9d6910053f79e80d39887f10c85e"
804
  }
805
  },
806
- "download_size": 7519903,
807
- "post_processing_size": null,
808
- "dataset_size": 562809,
809
- "size_in_bytes": 8082712
810
  },
811
  "X-CSQA-vi": {
812
  "description": "To evaluate multi-lingual language models (ML-LMs) for commonsense reasoning in a cross-lingual zero-shot transfer setting (X-CSR), i.e., training in English and test in other languages, we create two benchmark datasets, namely X-CSQA and X-CODAH. Specifically, we automatically translate the original CSQA and CODAH datasets, which only have English versions, to 15 other languages, forming development and test sets for studying X-CSR. As our goal is to evaluate different ML-LMs in a unified evaluation protocol for X-CSR, we argue that such translated examples, although might contain noise, can serve as a starting benchmark for us to obtain meaningful analysis, before more human-translated datasets will be available in the future.\n",
 
733
  "features": {
734
  "id": {
735
  "dtype": "string",
 
736
  "_type": "Value"
737
  },
738
  "lang": {
739
  "dtype": "string",
 
740
  "_type": "Value"
741
  },
742
  "question": {
743
  "stem": {
744
  "dtype": "string",
 
745
  "_type": "Value"
746
  },
747
  "choices": {
748
  "feature": {
749
  "label": {
750
  "dtype": "string",
 
751
  "_type": "Value"
752
  },
753
  "text": {
754
  "dtype": "string",
 
755
  "_type": "Value"
756
  }
757
  },
 
 
758
  "_type": "Sequence"
759
  }
760
  },
761
  "answerKey": {
762
  "dtype": "string",
 
763
  "_type": "Value"
764
  }
765
  },
 
 
 
766
  "builder_name": "xcsr",
767
+ "dataset_name": "xcsr",
768
  "config_name": "X-CSQA-ar",
769
  "version": {
770
  "version_str": "1.1.0",
 
776
  "splits": {
777
  "test": {
778
  "name": "test",
779
+ "num_bytes": 288645,
780
  "num_examples": 1074,
781
+ "dataset_name": null
782
  },
783
  "validation": {
784
  "name": "validation",
785
+ "num_bytes": 273580,
786
  "num_examples": 1000,
787
+ "dataset_name": null
 
 
 
 
 
 
788
  }
789
  },
790
+ "download_size": 255626,
791
+ "dataset_size": 562225,
792
+ "size_in_bytes": 817851
 
793
  },
794
  "X-CSQA-vi": {
795
  "description": "To evaluate multi-lingual language models (ML-LMs) for commonsense reasoning in a cross-lingual zero-shot transfer setting (X-CSR), i.e., training in English and test in other languages, we create two benchmark datasets, namely X-CSQA and X-CODAH. Specifically, we automatically translate the original CSQA and CODAH datasets, which only have English versions, to 15 other languages, forming development and test sets for studying X-CSR. As our goal is to evaluate different ML-LMs in a unified evaluation protocol for X-CSR, we argue that such translated examples, although might contain noise, can serve as a starting benchmark for us to obtain meaningful analysis, before more human-translated datasets will be available in the future.\n",