Datasets:

Multilinguality:
multilingual
Size Categories:
1K<n<10K
Annotations Creators:
crowdsourced
ArXiv:
Tags:
License:
albertvillanova HF staff commited on
Commit
ed8577d
1 Parent(s): 260f0d7

Add X-CODAH-it data files

Browse files
README.md CHANGED
@@ -232,13 +232,13 @@ dataset_info:
232
  dtype: string
233
  splits:
234
  - name: test
235
- num_bytes: 457341
236
  num_examples: 1000
237
  - name: validation
238
- num_bytes: 133616
239
  num_examples: 300
240
- download_size: 7519903
241
- dataset_size: 590957
242
  - config_name: X-CODAH-jap
243
  features:
244
  - name: id
@@ -957,6 +957,12 @@ configs:
957
  path: X-CODAH-fr/test-*
958
  - split: validation
959
  path: X-CODAH-fr/validation-*
 
 
 
 
 
 
960
  - config_name: X-CODAH-zh
961
  data_files:
962
  - split: test
 
232
  dtype: string
233
  splits:
234
  - name: test
235
+ num_bytes: 457055
236
  num_examples: 1000
237
  - name: validation
238
+ num_bytes: 133504
239
  num_examples: 300
240
+ download_size: 241780
241
+ dataset_size: 590559
242
  - config_name: X-CODAH-jap
243
  features:
244
  - name: id
 
957
  path: X-CODAH-fr/test-*
958
  - split: validation
959
  path: X-CODAH-fr/validation-*
960
+ - config_name: X-CODAH-it
961
+ data_files:
962
+ - split: test
963
+ path: X-CODAH-it/test-*
964
+ - split: validation
965
+ path: X-CODAH-it/validation-*
966
  - config_name: X-CODAH-zh
967
  data_files:
968
  - split: test
X-CODAH-it/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b9b9fe1b58379594014aa355f706ca7151ddf3f81d012aab9f12859927560164
3
+ size 184714
X-CODAH-it/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c33b4181e7149f03dd45c2b5a3fea2c39274b5e1600e8bad6e12ac161c3b198d
3
+ size 57066
dataset_infos.json CHANGED
@@ -1413,53 +1413,42 @@
1413
  "features": {
1414
  "id": {
1415
  "dtype": "string",
1416
- "id": null,
1417
  "_type": "Value"
1418
  },
1419
  "lang": {
1420
  "dtype": "string",
1421
- "id": null,
1422
  "_type": "Value"
1423
  },
1424
  "question_tag": {
1425
  "dtype": "string",
1426
- "id": null,
1427
  "_type": "Value"
1428
  },
1429
  "question": {
1430
  "stem": {
1431
  "dtype": "string",
1432
- "id": null,
1433
  "_type": "Value"
1434
  },
1435
  "choices": {
1436
  "feature": {
1437
  "label": {
1438
  "dtype": "string",
1439
- "id": null,
1440
  "_type": "Value"
1441
  },
1442
  "text": {
1443
  "dtype": "string",
1444
- "id": null,
1445
  "_type": "Value"
1446
  }
1447
  },
1448
- "length": -1,
1449
- "id": null,
1450
  "_type": "Sequence"
1451
  }
1452
  },
1453
  "answerKey": {
1454
  "dtype": "string",
1455
- "id": null,
1456
  "_type": "Value"
1457
  }
1458
  },
1459
- "post_processed": null,
1460
- "supervised_keys": null,
1461
- "task_templates": null,
1462
  "builder_name": "xcsr",
 
1463
  "config_name": "X-CODAH-it",
1464
  "version": {
1465
  "version_str": "1.1.0",
@@ -1471,27 +1460,20 @@
1471
  "splits": {
1472
  "test": {
1473
  "name": "test",
1474
- "num_bytes": 457341,
1475
  "num_examples": 1000,
1476
- "dataset_name": "xcsr"
1477
  },
1478
  "validation": {
1479
  "name": "validation",
1480
- "num_bytes": 133616,
1481
  "num_examples": 300,
1482
- "dataset_name": "xcsr"
1483
- }
1484
- },
1485
- "download_checksums": {
1486
- "https://inklab.usc.edu/XCSR/xcsr_datasets.zip": {
1487
- "num_bytes": 7519903,
1488
- "checksum": "c45b29ece740643252d5402e76be1e33f96f9d6910053f79e80d39887f10c85e"
1489
  }
1490
  },
1491
- "download_size": 7519903,
1492
- "post_processing_size": null,
1493
- "dataset_size": 590957,
1494
- "size_in_bytes": 8110860
1495
  },
1496
  "X-CODAH-jap": {
1497
  "description": "To evaluate multi-lingual language models (ML-LMs) for commonsense reasoning in a cross-lingual zero-shot transfer setting (X-CSR), i.e., training in English and test in other languages, we create two benchmark datasets, namely X-CSQA and X-CODAH. Specifically, we automatically translate the original CSQA and CODAH datasets, which only have English versions, to 15 other languages, forming development and test sets for studying X-CSR. As our goal is to evaluate different ML-LMs in a unified evaluation protocol for X-CSR, we argue that such translated examples, although might contain noise, can serve as a starting benchmark for us to obtain meaningful analysis, before more human-translated datasets will be available in the future.\n",
 
1413
  "features": {
1414
  "id": {
1415
  "dtype": "string",
 
1416
  "_type": "Value"
1417
  },
1418
  "lang": {
1419
  "dtype": "string",
 
1420
  "_type": "Value"
1421
  },
1422
  "question_tag": {
1423
  "dtype": "string",
 
1424
  "_type": "Value"
1425
  },
1426
  "question": {
1427
  "stem": {
1428
  "dtype": "string",
 
1429
  "_type": "Value"
1430
  },
1431
  "choices": {
1432
  "feature": {
1433
  "label": {
1434
  "dtype": "string",
 
1435
  "_type": "Value"
1436
  },
1437
  "text": {
1438
  "dtype": "string",
 
1439
  "_type": "Value"
1440
  }
1441
  },
 
 
1442
  "_type": "Sequence"
1443
  }
1444
  },
1445
  "answerKey": {
1446
  "dtype": "string",
 
1447
  "_type": "Value"
1448
  }
1449
  },
 
 
 
1450
  "builder_name": "xcsr",
1451
+ "dataset_name": "xcsr",
1452
  "config_name": "X-CODAH-it",
1453
  "version": {
1454
  "version_str": "1.1.0",
 
1460
  "splits": {
1461
  "test": {
1462
  "name": "test",
1463
+ "num_bytes": 457055,
1464
  "num_examples": 1000,
1465
+ "dataset_name": null
1466
  },
1467
  "validation": {
1468
  "name": "validation",
1469
+ "num_bytes": 133504,
1470
  "num_examples": 300,
1471
+ "dataset_name": null
 
 
 
 
 
 
1472
  }
1473
  },
1474
+ "download_size": 241780,
1475
+ "dataset_size": 590559,
1476
+ "size_in_bytes": 832339
 
1477
  },
1478
  "X-CODAH-jap": {
1479
  "description": "To evaluate multi-lingual language models (ML-LMs) for commonsense reasoning in a cross-lingual zero-shot transfer setting (X-CSR), i.e., training in English and test in other languages, we create two benchmark datasets, namely X-CSQA and X-CODAH. Specifically, we automatically translate the original CSQA and CODAH datasets, which only have English versions, to 15 other languages, forming development and test sets for studying X-CSR. As our goal is to evaluate different ML-LMs in a unified evaluation protocol for X-CSR, we argue that such translated examples, although might contain noise, can serve as a starting benchmark for us to obtain meaningful analysis, before more human-translated datasets will be available in the future.\n",