Datasets:

Multilinguality:
multilingual
Size Categories:
1K<n<10K
Annotations Creators:
crowdsourced
ArXiv:
Tags:
License:
albertvillanova HF staff commited on
Commit
1d700b1
1 Parent(s): bf9faae

Add X-CODAH-ru data files

Browse files
README.md CHANGED
@@ -377,13 +377,13 @@ dataset_info:
377
  dtype: string
378
  splits:
379
  - name: test
380
- num_bytes: 674853
381
  num_examples: 1000
382
  - name: validation
383
- num_bytes: 193825
384
  num_examples: 300
385
- download_size: 7519903
386
- dataset_size: 868678
387
  - config_name: X-CODAH-sw
388
  features:
389
  - name: id
@@ -987,6 +987,12 @@ configs:
987
  path: X-CODAH-pt/test-*
988
  - split: validation
989
  path: X-CODAH-pt/validation-*
 
 
 
 
 
 
990
  - config_name: X-CODAH-zh
991
  data_files:
992
  - split: test
 
377
  dtype: string
378
  splits:
379
  - name: test
380
+ num_bytes: 674567
381
  num_examples: 1000
382
  - name: validation
383
+ num_bytes: 193713
384
  num_examples: 300
385
+ download_size: 314200
386
+ dataset_size: 868280
387
  - config_name: X-CODAH-sw
388
  features:
389
  - name: id
 
987
  path: X-CODAH-pt/test-*
988
  - split: validation
989
  path: X-CODAH-pt/validation-*
990
+ - config_name: X-CODAH-ru
991
+ data_files:
992
+ - split: test
993
+ path: X-CODAH-ru/test-*
994
+ - split: validation
995
+ path: X-CODAH-ru/validation-*
996
  - config_name: X-CODAH-zh
997
  data_files:
998
  - split: test
X-CODAH-ru/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:98546f22f9b4cdf0a383f260f0b6f3516a50384aebeed26e146b09981f8d995b
3
+ size 241411
X-CODAH-ru/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b72880c4acba47d8229a70024873ee9b4828cf4ed376743c966fc81e654bce1f
3
+ size 72789
dataset_infos.json CHANGED
@@ -1763,53 +1763,42 @@
1763
  "features": {
1764
  "id": {
1765
  "dtype": "string",
1766
- "id": null,
1767
  "_type": "Value"
1768
  },
1769
  "lang": {
1770
  "dtype": "string",
1771
- "id": null,
1772
  "_type": "Value"
1773
  },
1774
  "question_tag": {
1775
  "dtype": "string",
1776
- "id": null,
1777
  "_type": "Value"
1778
  },
1779
  "question": {
1780
  "stem": {
1781
  "dtype": "string",
1782
- "id": null,
1783
  "_type": "Value"
1784
  },
1785
  "choices": {
1786
  "feature": {
1787
  "label": {
1788
  "dtype": "string",
1789
- "id": null,
1790
  "_type": "Value"
1791
  },
1792
  "text": {
1793
  "dtype": "string",
1794
- "id": null,
1795
  "_type": "Value"
1796
  }
1797
  },
1798
- "length": -1,
1799
- "id": null,
1800
  "_type": "Sequence"
1801
  }
1802
  },
1803
  "answerKey": {
1804
  "dtype": "string",
1805
- "id": null,
1806
  "_type": "Value"
1807
  }
1808
  },
1809
- "post_processed": null,
1810
- "supervised_keys": null,
1811
- "task_templates": null,
1812
  "builder_name": "xcsr",
 
1813
  "config_name": "X-CODAH-ru",
1814
  "version": {
1815
  "version_str": "1.1.0",
@@ -1821,27 +1810,20 @@
1821
  "splits": {
1822
  "test": {
1823
  "name": "test",
1824
- "num_bytes": 674853,
1825
  "num_examples": 1000,
1826
- "dataset_name": "xcsr"
1827
  },
1828
  "validation": {
1829
  "name": "validation",
1830
- "num_bytes": 193825,
1831
  "num_examples": 300,
1832
- "dataset_name": "xcsr"
1833
- }
1834
- },
1835
- "download_checksums": {
1836
- "https://inklab.usc.edu/XCSR/xcsr_datasets.zip": {
1837
- "num_bytes": 7519903,
1838
- "checksum": "c45b29ece740643252d5402e76be1e33f96f9d6910053f79e80d39887f10c85e"
1839
  }
1840
  },
1841
- "download_size": 7519903,
1842
- "post_processing_size": null,
1843
- "dataset_size": 868678,
1844
- "size_in_bytes": 8388581
1845
  },
1846
  "X-CODAH-ar": {
1847
  "description": "To evaluate multi-lingual language models (ML-LMs) for commonsense reasoning in a cross-lingual zero-shot transfer setting (X-CSR), i.e., training in English and test in other languages, we create two benchmark datasets, namely X-CSQA and X-CODAH. Specifically, we automatically translate the original CSQA and CODAH datasets, which only have English versions, to 15 other languages, forming development and test sets for studying X-CSR. As our goal is to evaluate different ML-LMs in a unified evaluation protocol for X-CSR, we argue that such translated examples, although might contain noise, can serve as a starting benchmark for us to obtain meaningful analysis, before more human-translated datasets will be available in the future.\n",
 
1763
  "features": {
1764
  "id": {
1765
  "dtype": "string",
 
1766
  "_type": "Value"
1767
  },
1768
  "lang": {
1769
  "dtype": "string",
 
1770
  "_type": "Value"
1771
  },
1772
  "question_tag": {
1773
  "dtype": "string",
 
1774
  "_type": "Value"
1775
  },
1776
  "question": {
1777
  "stem": {
1778
  "dtype": "string",
 
1779
  "_type": "Value"
1780
  },
1781
  "choices": {
1782
  "feature": {
1783
  "label": {
1784
  "dtype": "string",
 
1785
  "_type": "Value"
1786
  },
1787
  "text": {
1788
  "dtype": "string",
 
1789
  "_type": "Value"
1790
  }
1791
  },
 
 
1792
  "_type": "Sequence"
1793
  }
1794
  },
1795
  "answerKey": {
1796
  "dtype": "string",
 
1797
  "_type": "Value"
1798
  }
1799
  },
 
 
 
1800
  "builder_name": "xcsr",
1801
+ "dataset_name": "xcsr",
1802
  "config_name": "X-CODAH-ru",
1803
  "version": {
1804
  "version_str": "1.1.0",
 
1810
  "splits": {
1811
  "test": {
1812
  "name": "test",
1813
+ "num_bytes": 674567,
1814
  "num_examples": 1000,
1815
+ "dataset_name": null
1816
  },
1817
  "validation": {
1818
  "name": "validation",
1819
+ "num_bytes": 193713,
1820
  "num_examples": 300,
1821
+ "dataset_name": null
 
 
 
 
 
 
1822
  }
1823
  },
1824
+ "download_size": 314200,
1825
+ "dataset_size": 868280,
1826
+ "size_in_bytes": 1182480
 
1827
  },
1828
  "X-CODAH-ar": {
1829
  "description": "To evaluate multi-lingual language models (ML-LMs) for commonsense reasoning in a cross-lingual zero-shot transfer setting (X-CSR), i.e., training in English and test in other languages, we create two benchmark datasets, namely X-CSQA and X-CODAH. Specifically, we automatically translate the original CSQA and CODAH datasets, which only have English versions, to 15 other languages, forming development and test sets for studying X-CSR. As our goal is to evaluate different ML-LMs in a unified evaluation protocol for X-CSR, we argue that such translated examples, although might contain noise, can serve as a starting benchmark for us to obtain meaningful analysis, before more human-translated datasets will be available in the future.\n",