Datasets:

Multilinguality:
multilingual
Size Categories:
1K<n<10K
Annotations Creators:
crowdsourced
ArXiv:
Tags:
License:
albertvillanova HF staff commited on
Commit
9682793
1 Parent(s): 89977ca

Add X-CODAH-hi data files

Browse files
README.md CHANGED
@@ -203,13 +203,13 @@ dataset_info:
203
  dtype: string
204
  splits:
205
  - name: test
206
- num_bytes: 974019
207
  num_examples: 1000
208
  - name: validation
209
- num_bytes: 283116
210
  num_examples: 300
211
- download_size: 7519903
212
- dataset_size: 1257135
213
  - config_name: X-CODAH-it
214
  features:
215
  - name: id
@@ -963,6 +963,12 @@ configs:
963
  path: X-CODAH-fr/test-*
964
  - split: validation
965
  path: X-CODAH-fr/validation-*
 
 
 
 
 
 
966
  - config_name: X-CODAH-it
967
  data_files:
968
  - split: test
 
203
  dtype: string
204
  splits:
205
  - name: test
206
+ num_bytes: 973733
207
  num_examples: 1000
208
  - name: validation
209
+ num_bytes: 283004
210
  num_examples: 300
211
+ download_size: 336862
212
+ dataset_size: 1256737
213
  - config_name: X-CODAH-it
214
  features:
215
  - name: id
 
963
  path: X-CODAH-fr/test-*
964
  - split: validation
965
  path: X-CODAH-fr/validation-*
966
+ - config_name: X-CODAH-hi
967
+ data_files:
968
+ - split: test
969
+ path: X-CODAH-hi/test-*
970
+ - split: validation
971
+ path: X-CODAH-hi/validation-*
972
  - config_name: X-CODAH-it
973
  data_files:
974
  - split: test
X-CODAH-hi/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8729d16bd51c7e0be6ccd7afc517f573f642deca1fcc38980e9179133e1cd6ca
3
+ size 258259
X-CODAH-hi/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:27688caaaf54502086babc82c36ff2eab12637cf601d37c286684340812a02f4
3
+ size 78603
dataset_infos.json CHANGED
@@ -1973,53 +1973,42 @@
1973
  "features": {
1974
  "id": {
1975
  "dtype": "string",
1976
- "id": null,
1977
  "_type": "Value"
1978
  },
1979
  "lang": {
1980
  "dtype": "string",
1981
- "id": null,
1982
  "_type": "Value"
1983
  },
1984
  "question_tag": {
1985
  "dtype": "string",
1986
- "id": null,
1987
  "_type": "Value"
1988
  },
1989
  "question": {
1990
  "stem": {
1991
  "dtype": "string",
1992
- "id": null,
1993
  "_type": "Value"
1994
  },
1995
  "choices": {
1996
  "feature": {
1997
  "label": {
1998
  "dtype": "string",
1999
- "id": null,
2000
  "_type": "Value"
2001
  },
2002
  "text": {
2003
  "dtype": "string",
2004
- "id": null,
2005
  "_type": "Value"
2006
  }
2007
  },
2008
- "length": -1,
2009
- "id": null,
2010
  "_type": "Sequence"
2011
  }
2012
  },
2013
  "answerKey": {
2014
  "dtype": "string",
2015
- "id": null,
2016
  "_type": "Value"
2017
  }
2018
  },
2019
- "post_processed": null,
2020
- "supervised_keys": null,
2021
- "task_templates": null,
2022
  "builder_name": "xcsr",
 
2023
  "config_name": "X-CODAH-hi",
2024
  "version": {
2025
  "version_str": "1.1.0",
@@ -2031,27 +2020,20 @@
2031
  "splits": {
2032
  "test": {
2033
  "name": "test",
2034
- "num_bytes": 974019,
2035
  "num_examples": 1000,
2036
- "dataset_name": "xcsr"
2037
  },
2038
  "validation": {
2039
  "name": "validation",
2040
- "num_bytes": 283116,
2041
  "num_examples": 300,
2042
- "dataset_name": "xcsr"
2043
- }
2044
- },
2045
- "download_checksums": {
2046
- "https://inklab.usc.edu/XCSR/xcsr_datasets.zip": {
2047
- "num_bytes": 7519903,
2048
- "checksum": "c45b29ece740643252d5402e76be1e33f96f9d6910053f79e80d39887f10c85e"
2049
  }
2050
  },
2051
- "download_size": 7519903,
2052
- "post_processing_size": null,
2053
- "dataset_size": 1257135,
2054
- "size_in_bytes": 8777038
2055
  },
2056
  "X-CODAH-sw": {
2057
  "description": "To evaluate multi-lingual language models (ML-LMs) for commonsense reasoning in a cross-lingual zero-shot transfer setting (X-CSR), i.e., training in English and test in other languages, we create two benchmark datasets, namely X-CSQA and X-CODAH. Specifically, we automatically translate the original CSQA and CODAH datasets, which only have English versions, to 15 other languages, forming development and test sets for studying X-CSR. As our goal is to evaluate different ML-LMs in a unified evaluation protocol for X-CSR, we argue that such translated examples, although might contain noise, can serve as a starting benchmark for us to obtain meaningful analysis, before more human-translated datasets will be available in the future.\n",
 
1973
  "features": {
1974
  "id": {
1975
  "dtype": "string",
 
1976
  "_type": "Value"
1977
  },
1978
  "lang": {
1979
  "dtype": "string",
 
1980
  "_type": "Value"
1981
  },
1982
  "question_tag": {
1983
  "dtype": "string",
 
1984
  "_type": "Value"
1985
  },
1986
  "question": {
1987
  "stem": {
1988
  "dtype": "string",
 
1989
  "_type": "Value"
1990
  },
1991
  "choices": {
1992
  "feature": {
1993
  "label": {
1994
  "dtype": "string",
 
1995
  "_type": "Value"
1996
  },
1997
  "text": {
1998
  "dtype": "string",
 
1999
  "_type": "Value"
2000
  }
2001
  },
 
 
2002
  "_type": "Sequence"
2003
  }
2004
  },
2005
  "answerKey": {
2006
  "dtype": "string",
 
2007
  "_type": "Value"
2008
  }
2009
  },
 
 
 
2010
  "builder_name": "xcsr",
2011
+ "dataset_name": "xcsr",
2012
  "config_name": "X-CODAH-hi",
2013
  "version": {
2014
  "version_str": "1.1.0",
 
2020
  "splits": {
2021
  "test": {
2022
  "name": "test",
2023
+ "num_bytes": 973733,
2024
  "num_examples": 1000,
2025
+ "dataset_name": null
2026
  },
2027
  "validation": {
2028
  "name": "validation",
2029
+ "num_bytes": 283004,
2030
  "num_examples": 300,
2031
+ "dataset_name": null
 
 
 
 
 
 
2032
  }
2033
  },
2034
+ "download_size": 336862,
2035
+ "dataset_size": 1256737,
2036
+ "size_in_bytes": 1593599
 
2037
  },
2038
  "X-CODAH-sw": {
2039
  "description": "To evaluate multi-lingual language models (ML-LMs) for commonsense reasoning in a cross-lingual zero-shot transfer setting (X-CSR), i.e., training in English and test in other languages, we create two benchmark datasets, namely X-CSQA and X-CODAH. Specifically, we automatically translate the original CSQA and CODAH datasets, which only have English versions, to 15 other languages, forming development and test sets for studying X-CSR. As our goal is to evaluate different ML-LMs in a unified evaluation protocol for X-CSR, we argue that such translated examples, although might contain noise, can serve as a starting benchmark for us to obtain meaningful analysis, before more human-translated datasets will be available in the future.\n",