Datasets:

Multilinguality:
multilingual
Size Categories:
1K<n<10K
Annotations Creators:
crowdsourced
ArXiv:
Tags:
License:
albertvillanova HF staff commited on
Commit
dcc5275
1 Parent(s): ede575b

Add X-CODAH-nl data files

Browse files
README.md CHANGED
@@ -290,13 +290,13 @@ dataset_info:
290
  dtype: string
291
  splits:
292
  - name: test
293
- num_bytes: 449014
294
  num_examples: 1000
295
  - name: validation
296
- num_bytes: 130130
297
  num_examples: 300
298
- download_size: 7519903
299
- dataset_size: 579144
300
  - config_name: X-CODAH-pl
301
  features:
302
  - name: id
@@ -969,6 +969,12 @@ configs:
969
  path: X-CODAH-jap/test-*
970
  - split: validation
971
  path: X-CODAH-jap/validation-*
 
 
 
 
 
 
972
  - config_name: X-CODAH-zh
973
  data_files:
974
  - split: test
 
290
  dtype: string
291
  splits:
292
  - name: test
293
+ num_bytes: 448728
294
  num_examples: 1000
295
  - name: validation
296
+ num_bytes: 130018
297
  num_examples: 300
298
+ download_size: 237855
299
+ dataset_size: 578746
300
  - config_name: X-CODAH-pl
301
  features:
302
  - name: id
 
969
  path: X-CODAH-jap/test-*
970
  - split: validation
971
  path: X-CODAH-jap/validation-*
972
+ - config_name: X-CODAH-nl
973
+ data_files:
974
+ - split: test
975
+ path: X-CODAH-nl/test-*
976
+ - split: validation
977
+ path: X-CODAH-nl/validation-*
978
  - config_name: X-CODAH-zh
979
  data_files:
980
  - split: test
X-CODAH-nl/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d8619cac30d76b6959904cccf7c137237f879ab49abacb0c66dabfbf5040b8b8
3
+ size 182044
X-CODAH-nl/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:29a5a926300cfbc731fa88c0789488dd2bf16e18a7d3ed563227cd6b2957d2a7
3
+ size 55811
dataset_infos.json CHANGED
@@ -1553,53 +1553,42 @@
1553
  "features": {
1554
  "id": {
1555
  "dtype": "string",
1556
- "id": null,
1557
  "_type": "Value"
1558
  },
1559
  "lang": {
1560
  "dtype": "string",
1561
- "id": null,
1562
  "_type": "Value"
1563
  },
1564
  "question_tag": {
1565
  "dtype": "string",
1566
- "id": null,
1567
  "_type": "Value"
1568
  },
1569
  "question": {
1570
  "stem": {
1571
  "dtype": "string",
1572
- "id": null,
1573
  "_type": "Value"
1574
  },
1575
  "choices": {
1576
  "feature": {
1577
  "label": {
1578
  "dtype": "string",
1579
- "id": null,
1580
  "_type": "Value"
1581
  },
1582
  "text": {
1583
  "dtype": "string",
1584
- "id": null,
1585
  "_type": "Value"
1586
  }
1587
  },
1588
- "length": -1,
1589
- "id": null,
1590
  "_type": "Sequence"
1591
  }
1592
  },
1593
  "answerKey": {
1594
  "dtype": "string",
1595
- "id": null,
1596
  "_type": "Value"
1597
  }
1598
  },
1599
- "post_processed": null,
1600
- "supervised_keys": null,
1601
- "task_templates": null,
1602
  "builder_name": "xcsr",
 
1603
  "config_name": "X-CODAH-nl",
1604
  "version": {
1605
  "version_str": "1.1.0",
@@ -1611,27 +1600,20 @@
1611
  "splits": {
1612
  "test": {
1613
  "name": "test",
1614
- "num_bytes": 449014,
1615
  "num_examples": 1000,
1616
- "dataset_name": "xcsr"
1617
  },
1618
  "validation": {
1619
  "name": "validation",
1620
- "num_bytes": 130130,
1621
  "num_examples": 300,
1622
- "dataset_name": "xcsr"
1623
- }
1624
- },
1625
- "download_checksums": {
1626
- "https://inklab.usc.edu/XCSR/xcsr_datasets.zip": {
1627
- "num_bytes": 7519903,
1628
- "checksum": "c45b29ece740643252d5402e76be1e33f96f9d6910053f79e80d39887f10c85e"
1629
  }
1630
  },
1631
- "download_size": 7519903,
1632
- "post_processing_size": null,
1633
- "dataset_size": 579144,
1634
- "size_in_bytes": 8099047
1635
  },
1636
  "X-CODAH-pl": {
1637
  "description": "To evaluate multi-lingual language models (ML-LMs) for commonsense reasoning in a cross-lingual zero-shot transfer setting (X-CSR), i.e., training in English and test in other languages, we create two benchmark datasets, namely X-CSQA and X-CODAH. Specifically, we automatically translate the original CSQA and CODAH datasets, which only have English versions, to 15 other languages, forming development and test sets for studying X-CSR. As our goal is to evaluate different ML-LMs in a unified evaluation protocol for X-CSR, we argue that such translated examples, although might contain noise, can serve as a starting benchmark for us to obtain meaningful analysis, before more human-translated datasets will be available in the future.\n",
 
1553
  "features": {
1554
  "id": {
1555
  "dtype": "string",
 
1556
  "_type": "Value"
1557
  },
1558
  "lang": {
1559
  "dtype": "string",
 
1560
  "_type": "Value"
1561
  },
1562
  "question_tag": {
1563
  "dtype": "string",
 
1564
  "_type": "Value"
1565
  },
1566
  "question": {
1567
  "stem": {
1568
  "dtype": "string",
 
1569
  "_type": "Value"
1570
  },
1571
  "choices": {
1572
  "feature": {
1573
  "label": {
1574
  "dtype": "string",
 
1575
  "_type": "Value"
1576
  },
1577
  "text": {
1578
  "dtype": "string",
 
1579
  "_type": "Value"
1580
  }
1581
  },
 
 
1582
  "_type": "Sequence"
1583
  }
1584
  },
1585
  "answerKey": {
1586
  "dtype": "string",
 
1587
  "_type": "Value"
1588
  }
1589
  },
 
 
 
1590
  "builder_name": "xcsr",
1591
+ "dataset_name": "xcsr",
1592
  "config_name": "X-CODAH-nl",
1593
  "version": {
1594
  "version_str": "1.1.0",
 
1600
  "splits": {
1601
  "test": {
1602
  "name": "test",
1603
+ "num_bytes": 448728,
1604
  "num_examples": 1000,
1605
+ "dataset_name": null
1606
  },
1607
  "validation": {
1608
  "name": "validation",
1609
+ "num_bytes": 130018,
1610
  "num_examples": 300,
1611
+ "dataset_name": null
 
 
 
 
 
 
1612
  }
1613
  },
1614
+ "download_size": 237855,
1615
+ "dataset_size": 578746,
1616
+ "size_in_bytes": 816601
 
1617
  },
1618
  "X-CODAH-pl": {
1619
  "description": "To evaluate multi-lingual language models (ML-LMs) for commonsense reasoning in a cross-lingual zero-shot transfer setting (X-CSR), i.e., training in English and test in other languages, we create two benchmark datasets, namely X-CSQA and X-CODAH. Specifically, we automatically translate the original CSQA and CODAH datasets, which only have English versions, to 15 other languages, forming development and test sets for studying X-CSR. As our goal is to evaluate different ML-LMs in a unified evaluation protocol for X-CSR, we argue that such translated examples, although might contain noise, can serve as a starting benchmark for us to obtain meaningful analysis, before more human-translated datasets will be available in the future.\n",