Datasets:

Multilinguality:
multilingual
Size Categories:
1K<n<10K
Annotations Creators:
crowdsourced
ArXiv:
Tags:
License:
albertvillanova HF staff commited on
Commit
ede575b
1 Parent(s): ed8577d

Add X-CODAH-jap data files

Browse files
README.md CHANGED
@@ -261,13 +261,13 @@ dataset_info:
261
  dtype: string
262
  splits:
263
  - name: test
264
- num_bytes: 538701
265
  num_examples: 1000
266
  - name: validation
267
- num_bytes: 157504
268
  num_examples: 300
269
- download_size: 7519903
270
- dataset_size: 696205
271
  - config_name: X-CODAH-nl
272
  features:
273
  - name: id
@@ -963,6 +963,12 @@ configs:
963
  path: X-CODAH-it/test-*
964
  - split: validation
965
  path: X-CODAH-it/validation-*
 
 
 
 
 
 
966
  - config_name: X-CODAH-zh
967
  data_files:
968
  - split: test
 
261
  dtype: string
262
  splits:
263
  - name: test
264
+ num_bytes: 538415
265
  num_examples: 1000
266
  - name: validation
267
+ num_bytes: 157392
268
  num_examples: 300
269
+ download_size: 264995
270
+ dataset_size: 695807
271
  - config_name: X-CODAH-nl
272
  features:
273
  - name: id
 
963
  path: X-CODAH-it/test-*
964
  - split: validation
965
  path: X-CODAH-it/validation-*
966
+ - config_name: X-CODAH-jap
967
+ data_files:
968
+ - split: test
969
+ path: X-CODAH-jap/test-*
970
+ - split: validation
971
+ path: X-CODAH-jap/validation-*
972
  - config_name: X-CODAH-zh
973
  data_files:
974
  - split: test
X-CODAH-jap/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c466e47f9e3191c9b07639cb0bfa6578ab837f8d189780b5bb50fb2dd470bee5
3
+ size 202475
X-CODAH-jap/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a8fa2192323043959a0e3986ddc253aaf6e4beebbb20aecf5b2757117667e412
3
+ size 62520
dataset_infos.json CHANGED
@@ -1483,53 +1483,42 @@
1483
  "features": {
1484
  "id": {
1485
  "dtype": "string",
1486
- "id": null,
1487
  "_type": "Value"
1488
  },
1489
  "lang": {
1490
  "dtype": "string",
1491
- "id": null,
1492
  "_type": "Value"
1493
  },
1494
  "question_tag": {
1495
  "dtype": "string",
1496
- "id": null,
1497
  "_type": "Value"
1498
  },
1499
  "question": {
1500
  "stem": {
1501
  "dtype": "string",
1502
- "id": null,
1503
  "_type": "Value"
1504
  },
1505
  "choices": {
1506
  "feature": {
1507
  "label": {
1508
  "dtype": "string",
1509
- "id": null,
1510
  "_type": "Value"
1511
  },
1512
  "text": {
1513
  "dtype": "string",
1514
- "id": null,
1515
  "_type": "Value"
1516
  }
1517
  },
1518
- "length": -1,
1519
- "id": null,
1520
  "_type": "Sequence"
1521
  }
1522
  },
1523
  "answerKey": {
1524
  "dtype": "string",
1525
- "id": null,
1526
  "_type": "Value"
1527
  }
1528
  },
1529
- "post_processed": null,
1530
- "supervised_keys": null,
1531
- "task_templates": null,
1532
  "builder_name": "xcsr",
 
1533
  "config_name": "X-CODAH-jap",
1534
  "version": {
1535
  "version_str": "1.1.0",
@@ -1541,27 +1530,20 @@
1541
  "splits": {
1542
  "test": {
1543
  "name": "test",
1544
- "num_bytes": 538701,
1545
  "num_examples": 1000,
1546
- "dataset_name": "xcsr"
1547
  },
1548
  "validation": {
1549
  "name": "validation",
1550
- "num_bytes": 157504,
1551
  "num_examples": 300,
1552
- "dataset_name": "xcsr"
1553
- }
1554
- },
1555
- "download_checksums": {
1556
- "https://inklab.usc.edu/XCSR/xcsr_datasets.zip": {
1557
- "num_bytes": 7519903,
1558
- "checksum": "c45b29ece740643252d5402e76be1e33f96f9d6910053f79e80d39887f10c85e"
1559
  }
1560
  },
1561
- "download_size": 7519903,
1562
- "post_processing_size": null,
1563
- "dataset_size": 696205,
1564
- "size_in_bytes": 8216108
1565
  },
1566
  "X-CODAH-nl": {
1567
  "description": "To evaluate multi-lingual language models (ML-LMs) for commonsense reasoning in a cross-lingual zero-shot transfer setting (X-CSR), i.e., training in English and test in other languages, we create two benchmark datasets, namely X-CSQA and X-CODAH. Specifically, we automatically translate the original CSQA and CODAH datasets, which only have English versions, to 15 other languages, forming development and test sets for studying X-CSR. As our goal is to evaluate different ML-LMs in a unified evaluation protocol for X-CSR, we argue that such translated examples, although might contain noise, can serve as a starting benchmark for us to obtain meaningful analysis, before more human-translated datasets will be available in the future.\n",
 
1483
  "features": {
1484
  "id": {
1485
  "dtype": "string",
 
1486
  "_type": "Value"
1487
  },
1488
  "lang": {
1489
  "dtype": "string",
 
1490
  "_type": "Value"
1491
  },
1492
  "question_tag": {
1493
  "dtype": "string",
 
1494
  "_type": "Value"
1495
  },
1496
  "question": {
1497
  "stem": {
1498
  "dtype": "string",
 
1499
  "_type": "Value"
1500
  },
1501
  "choices": {
1502
  "feature": {
1503
  "label": {
1504
  "dtype": "string",
 
1505
  "_type": "Value"
1506
  },
1507
  "text": {
1508
  "dtype": "string",
 
1509
  "_type": "Value"
1510
  }
1511
  },
 
 
1512
  "_type": "Sequence"
1513
  }
1514
  },
1515
  "answerKey": {
1516
  "dtype": "string",
 
1517
  "_type": "Value"
1518
  }
1519
  },
 
 
 
1520
  "builder_name": "xcsr",
1521
+ "dataset_name": "xcsr",
1522
  "config_name": "X-CODAH-jap",
1523
  "version": {
1524
  "version_str": "1.1.0",
 
1530
  "splits": {
1531
  "test": {
1532
  "name": "test",
1533
+ "num_bytes": 538415,
1534
  "num_examples": 1000,
1535
+ "dataset_name": null
1536
  },
1537
  "validation": {
1538
  "name": "validation",
1539
+ "num_bytes": 157392,
1540
  "num_examples": 300,
1541
+ "dataset_name": null
 
 
 
 
 
 
1542
  }
1543
  },
1544
+ "download_size": 264995,
1545
+ "dataset_size": 695807,
1546
+ "size_in_bytes": 960802
 
1547
  },
1548
  "X-CODAH-nl": {
1549
  "description": "To evaluate multi-lingual language models (ML-LMs) for commonsense reasoning in a cross-lingual zero-shot transfer setting (X-CSR), i.e., training in English and test in other languages, we create two benchmark datasets, namely X-CSQA and X-CODAH. Specifically, we automatically translate the original CSQA and CODAH datasets, which only have English versions, to 15 other languages, forming development and test sets for studying X-CSR. As our goal is to evaluate different ML-LMs in a unified evaluation protocol for X-CSR, we argue that such translated examples, although might contain noise, can serve as a starting benchmark for us to obtain meaningful analysis, before more human-translated datasets will be available in the future.\n",