Datasets:

Multilinguality:
multilingual
Size Categories:
1K<n<10K
Annotations Creators:
crowdsourced
ArXiv:
Tags:
License:
albertvillanova HF staff commited on
Commit
9150acc
1 Parent(s): d25ae7c

Add X-CSQA-de data files

Browse files
README.md CHANGED
@@ -547,13 +547,13 @@ dataset_info:
547
  dtype: string
548
  splits:
549
  - name: test
550
- num_bytes: 234472
551
  num_examples: 1074
552
  - name: validation
553
- num_bytes: 223122
554
  num_examples: 1000
555
- download_size: 7519903
556
- dataset_size: 457594
557
  - config_name: X-CSQA-en
558
  features:
559
  - name: id
@@ -933,6 +933,12 @@ dataset_info:
933
  download_size: 207379
934
  dataset_size: 385717
935
  configs:
 
 
 
 
 
 
936
  - config_name: X-CSQA-en
937
  data_files:
938
  - split: test
 
547
  dtype: string
548
  splits:
549
  - name: test
550
+ num_bytes: 234170
551
  num_examples: 1074
552
  - name: validation
553
+ num_bytes: 222840
554
  num_examples: 1000
555
+ download_size: 242762
556
+ dataset_size: 457010
557
  - config_name: X-CSQA-en
558
  features:
559
  - name: id
 
933
  download_size: 207379
934
  dataset_size: 385717
935
  configs:
936
+ - config_name: X-CSQA-de
937
+ data_files:
938
+ - split: test
939
+ path: X-CSQA-de/test-*
940
+ - split: validation
941
+ path: X-CSQA-de/validation-*
942
  - config_name: X-CSQA-en
943
  data_files:
944
  - split: test
X-CSQA-de/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7f908018b4a881753b8b43e2a087df7a5429742fadcbe9e37c9db62743cebc2f
3
+ size 125856
X-CSQA-de/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:81bc3acecabb6ad76dd74c570c84c442224ee73d9436b185ffb6a2a467dea948
3
+ size 116906
dataset_infos.json CHANGED
@@ -139,48 +139,38 @@
139
  "features": {
140
  "id": {
141
  "dtype": "string",
142
- "id": null,
143
  "_type": "Value"
144
  },
145
  "lang": {
146
  "dtype": "string",
147
- "id": null,
148
  "_type": "Value"
149
  },
150
  "question": {
151
  "stem": {
152
  "dtype": "string",
153
- "id": null,
154
  "_type": "Value"
155
  },
156
  "choices": {
157
  "feature": {
158
  "label": {
159
  "dtype": "string",
160
- "id": null,
161
  "_type": "Value"
162
  },
163
  "text": {
164
  "dtype": "string",
165
- "id": null,
166
  "_type": "Value"
167
  }
168
  },
169
- "length": -1,
170
- "id": null,
171
  "_type": "Sequence"
172
  }
173
  },
174
  "answerKey": {
175
  "dtype": "string",
176
- "id": null,
177
  "_type": "Value"
178
  }
179
  },
180
- "post_processed": null,
181
- "supervised_keys": null,
182
- "task_templates": null,
183
  "builder_name": "xcsr",
 
184
  "config_name": "X-CSQA-de",
185
  "version": {
186
  "version_str": "1.1.0",
@@ -192,27 +182,20 @@
192
  "splits": {
193
  "test": {
194
  "name": "test",
195
- "num_bytes": 234472,
196
  "num_examples": 1074,
197
- "dataset_name": "xcsr"
198
  },
199
  "validation": {
200
  "name": "validation",
201
- "num_bytes": 223122,
202
  "num_examples": 1000,
203
- "dataset_name": "xcsr"
204
- }
205
- },
206
- "download_checksums": {
207
- "https://inklab.usc.edu/XCSR/xcsr_datasets.zip": {
208
- "num_bytes": 7519903,
209
- "checksum": "c45b29ece740643252d5402e76be1e33f96f9d6910053f79e80d39887f10c85e"
210
  }
211
  },
212
- "download_size": 7519903,
213
- "post_processing_size": null,
214
- "dataset_size": 457594,
215
- "size_in_bytes": 7977497
216
  },
217
  "X-CSQA-es": {
218
  "description": "To evaluate multi-lingual language models (ML-LMs) for commonsense reasoning in a cross-lingual zero-shot transfer setting (X-CSR), i.e., training in English and test in other languages, we create two benchmark datasets, namely X-CSQA and X-CODAH. Specifically, we automatically translate the original CSQA and CODAH datasets, which only have English versions, to 15 other languages, forming development and test sets for studying X-CSR. As our goal is to evaluate different ML-LMs in a unified evaluation protocol for X-CSR, we argue that such translated examples, although might contain noise, can serve as a starting benchmark for us to obtain meaningful analysis, before more human-translated datasets will be available in the future.\n",
 
139
  "features": {
140
  "id": {
141
  "dtype": "string",
 
142
  "_type": "Value"
143
  },
144
  "lang": {
145
  "dtype": "string",
 
146
  "_type": "Value"
147
  },
148
  "question": {
149
  "stem": {
150
  "dtype": "string",
 
151
  "_type": "Value"
152
  },
153
  "choices": {
154
  "feature": {
155
  "label": {
156
  "dtype": "string",
 
157
  "_type": "Value"
158
  },
159
  "text": {
160
  "dtype": "string",
 
161
  "_type": "Value"
162
  }
163
  },
 
 
164
  "_type": "Sequence"
165
  }
166
  },
167
  "answerKey": {
168
  "dtype": "string",
 
169
  "_type": "Value"
170
  }
171
  },
 
 
 
172
  "builder_name": "xcsr",
173
+ "dataset_name": "xcsr",
174
  "config_name": "X-CSQA-de",
175
  "version": {
176
  "version_str": "1.1.0",
 
182
  "splits": {
183
  "test": {
184
  "name": "test",
185
+ "num_bytes": 234170,
186
  "num_examples": 1074,
187
+ "dataset_name": null
188
  },
189
  "validation": {
190
  "name": "validation",
191
+ "num_bytes": 222840,
192
  "num_examples": 1000,
193
+ "dataset_name": null
 
 
 
 
 
 
194
  }
195
  },
196
+ "download_size": 242762,
197
+ "dataset_size": 457010,
198
+ "size_in_bytes": 699772
 
199
  },
200
  "X-CSQA-es": {
201
  "description": "To evaluate multi-lingual language models (ML-LMs) for commonsense reasoning in a cross-lingual zero-shot transfer setting (X-CSR), i.e., training in English and test in other languages, we create two benchmark datasets, namely X-CSQA and X-CODAH. Specifically, we automatically translate the original CSQA and CODAH datasets, which only have English versions, to 15 other languages, forming development and test sets for studying X-CSR. As our goal is to evaluate different ML-LMs in a unified evaluation protocol for X-CSR, we argue that such translated examples, although might contain noise, can serve as a starting benchmark for us to obtain meaningful analysis, before more human-translated datasets will be available in the future.\n",