Datasets:

Multilinguality:
multilingual
Size Categories:
1K<n<10K
Annotations Creators:
crowdsourced
ArXiv:
Tags:
License:
xcsr / README.md
albertvillanova's picture
Convert dataset to Parquet (#2)
f691f31
---
annotations_creators:
- crowdsourced
language_creators:
- crowdsourced
- machine-generated
language:
- ar
- de
- en
- es
- fr
- hi
- it
- ja
- nl
- pl
- pt
- ru
- sw
- ur
- vi
- zh
license:
- mit
multilinguality:
- multilingual
size_categories:
- 1K<n<10K
source_datasets:
- extended|codah
- extended|commonsense_qa
task_categories:
- question-answering
task_ids:
- multiple-choice-qa
pretty_name: X-CSR
dataset_info:
- config_name: X-CODAH-ar
features:
- name: id
dtype: string
- name: lang
dtype: string
- name: question_tag
dtype: string
- name: question
struct:
- name: stem
dtype: string
- name: choices
sequence:
- name: label
dtype: string
- name: text
dtype: string
- name: answerKey
dtype: string
splits:
- name: test
num_bytes: 568026
num_examples: 1000
- name: validation
num_bytes: 165022
num_examples: 300
download_size: 265474
dataset_size: 733048
- config_name: X-CODAH-de
features:
- name: id
dtype: string
- name: lang
dtype: string
- name: question_tag
dtype: string
- name: question
struct:
- name: stem
dtype: string
- name: choices
sequence:
- name: label
dtype: string
- name: text
dtype: string
- name: answerKey
dtype: string
splits:
- name: test
num_bytes: 476087
num_examples: 1000
- name: validation
num_bytes: 138764
num_examples: 300
download_size: 259705
dataset_size: 614851
- config_name: X-CODAH-en
features:
- name: id
dtype: string
- name: lang
dtype: string
- name: question_tag
dtype: string
- name: question
struct:
- name: stem
dtype: string
- name: choices
sequence:
- name: label
dtype: string
- name: text
dtype: string
- name: answerKey
dtype: string
splits:
- name: test
num_bytes: 417000
num_examples: 1000
- name: validation
num_bytes: 121811
num_examples: 300
download_size: 217262
dataset_size: 538811
- config_name: X-CODAH-es
features:
- name: id
dtype: string
- name: lang
dtype: string
- name: question_tag
dtype: string
- name: question
struct:
- name: stem
dtype: string
- name: choices
sequence:
- name: label
dtype: string
- name: text
dtype: string
- name: answerKey
dtype: string
splits:
- name: test
num_bytes: 450954
num_examples: 1000
- name: validation
num_bytes: 130678
num_examples: 300
download_size: 242647
dataset_size: 581632
- config_name: X-CODAH-fr
features:
- name: id
dtype: string
- name: lang
dtype: string
- name: question_tag
dtype: string
- name: question
struct:
- name: stem
dtype: string
- name: choices
sequence:
- name: label
dtype: string
- name: text
dtype: string
- name: answerKey
dtype: string
splits:
- name: test
num_bytes: 477525
num_examples: 1000
- name: validation
num_bytes: 137889
num_examples: 300
download_size: 244998
dataset_size: 615414
- config_name: X-CODAH-hi
features:
- name: id
dtype: string
- name: lang
dtype: string
- name: question_tag
dtype: string
- name: question
struct:
- name: stem
dtype: string
- name: choices
sequence:
- name: label
dtype: string
- name: text
dtype: string
- name: answerKey
dtype: string
splits:
- name: test
num_bytes: 973733
num_examples: 1000
- name: validation
num_bytes: 283004
num_examples: 300
download_size: 336862
dataset_size: 1256737
- config_name: X-CODAH-it
features:
- name: id
dtype: string
- name: lang
dtype: string
- name: question_tag
dtype: string
- name: question
struct:
- name: stem
dtype: string
- name: choices
sequence:
- name: label
dtype: string
- name: text
dtype: string
- name: answerKey
dtype: string
splits:
- name: test
num_bytes: 457055
num_examples: 1000
- name: validation
num_bytes: 133504
num_examples: 300
download_size: 241780
dataset_size: 590559
- config_name: X-CODAH-jap
features:
- name: id
dtype: string
- name: lang
dtype: string
- name: question_tag
dtype: string
- name: question
struct:
- name: stem
dtype: string
- name: choices
sequence:
- name: label
dtype: string
- name: text
dtype: string
- name: answerKey
dtype: string
splits:
- name: test
num_bytes: 538415
num_examples: 1000
- name: validation
num_bytes: 157392
num_examples: 300
download_size: 264995
dataset_size: 695807
- config_name: X-CODAH-nl
features:
- name: id
dtype: string
- name: lang
dtype: string
- name: question_tag
dtype: string
- name: question
struct:
- name: stem
dtype: string
- name: choices
sequence:
- name: label
dtype: string
- name: text
dtype: string
- name: answerKey
dtype: string
splits:
- name: test
num_bytes: 448728
num_examples: 1000
- name: validation
num_bytes: 130018
num_examples: 300
download_size: 237855
dataset_size: 578746
- config_name: X-CODAH-pl
features:
- name: id
dtype: string
- name: lang
dtype: string
- name: question_tag
dtype: string
- name: question
struct:
- name: stem
dtype: string
- name: choices
sequence:
- name: label
dtype: string
- name: text
dtype: string
- name: answerKey
dtype: string
splits:
- name: test
num_bytes: 438538
num_examples: 1000
- name: validation
num_bytes: 127750
num_examples: 300
download_size: 254894
dataset_size: 566288
- config_name: X-CODAH-pt
features:
- name: id
dtype: string
- name: lang
dtype: string
- name: question_tag
dtype: string
- name: question
struct:
- name: stem
dtype: string
- name: choices
sequence:
- name: label
dtype: string
- name: text
dtype: string
- name: answerKey
dtype: string
splits:
- name: test
num_bytes: 455583
num_examples: 1000
- name: validation
num_bytes: 131933
num_examples: 300
download_size: 238858
dataset_size: 587516
- config_name: X-CODAH-ru
features:
- name: id
dtype: string
- name: lang
dtype: string
- name: question_tag
dtype: string
- name: question
struct:
- name: stem
dtype: string
- name: choices
sequence:
- name: label
dtype: string
- name: text
dtype: string
- name: answerKey
dtype: string
splits:
- name: test
num_bytes: 674567
num_examples: 1000
- name: validation
num_bytes: 193713
num_examples: 300
download_size: 314200
dataset_size: 868280
- config_name: X-CODAH-sw
features:
- name: id
dtype: string
- name: lang
dtype: string
- name: question_tag
dtype: string
- name: question
struct:
- name: stem
dtype: string
- name: choices
sequence:
- name: label
dtype: string
- name: text
dtype: string
- name: answerKey
dtype: string
splits:
- name: test
num_bytes: 423421
num_examples: 1000
- name: validation
num_bytes: 124770
num_examples: 300
download_size: 214100
dataset_size: 548191
- config_name: X-CODAH-ur
features:
- name: id
dtype: string
- name: lang
dtype: string
- name: question_tag
dtype: string
- name: question
struct:
- name: stem
dtype: string
- name: choices
sequence:
- name: label
dtype: string
- name: text
dtype: string
- name: answerKey
dtype: string
splits:
- name: test
num_bytes: 687123
num_examples: 1000
- name: validation
num_bytes: 199737
num_examples: 300
download_size: 294475
dataset_size: 886860
- config_name: X-CODAH-vi
features:
- name: id
dtype: string
- name: lang
dtype: string
- name: question_tag
dtype: string
- name: question
struct:
- name: stem
dtype: string
- name: choices
sequence:
- name: label
dtype: string
- name: text
dtype: string
- name: answerKey
dtype: string
splits:
- name: test
num_bytes: 543089
num_examples: 1000
- name: validation
num_bytes: 156888
num_examples: 300
download_size: 251390
dataset_size: 699977
- config_name: X-CODAH-zh
features:
- name: id
dtype: string
- name: lang
dtype: string
- name: question_tag
dtype: string
- name: question
struct:
- name: stem
dtype: string
- name: choices
sequence:
- name: label
dtype: string
- name: text
dtype: string
- name: answerKey
dtype: string
splits:
- name: test
num_bytes: 394660
num_examples: 1000
- name: validation
num_bytes: 115025
num_examples: 300
download_size: 237827
dataset_size: 509685
- config_name: X-CSQA-ar
features:
- name: id
dtype: string
- name: lang
dtype: string
- name: question
struct:
- name: stem
dtype: string
- name: choices
sequence:
- name: label
dtype: string
- name: text
dtype: string
- name: answerKey
dtype: string
splits:
- name: test
num_bytes: 288645
num_examples: 1074
- name: validation
num_bytes: 273580
num_examples: 1000
download_size: 255626
dataset_size: 562225
- config_name: X-CSQA-de
features:
- name: id
dtype: string
- name: lang
dtype: string
- name: question
struct:
- name: stem
dtype: string
- name: choices
sequence:
- name: label
dtype: string
- name: text
dtype: string
- name: answerKey
dtype: string
splits:
- name: test
num_bytes: 234170
num_examples: 1074
- name: validation
num_bytes: 222840
num_examples: 1000
download_size: 242762
dataset_size: 457010
- config_name: X-CSQA-en
features:
- name: id
dtype: string
- name: lang
dtype: string
- name: question
struct:
- name: stem
dtype: string
- name: choices
sequence:
- name: label
dtype: string
- name: text
dtype: string
- name: answerKey
dtype: string
splits:
- name: test
num_bytes: 215617
num_examples: 1074
- name: validation
num_bytes: 205079
num_examples: 1000
download_size: 222677
dataset_size: 420696
- config_name: X-CSQA-es
features:
- name: id
dtype: string
- name: lang
dtype: string
- name: question
struct:
- name: stem
dtype: string
- name: choices
sequence:
- name: label
dtype: string
- name: text
dtype: string
- name: answerKey
dtype: string
splits:
- name: test
num_bytes: 236817
num_examples: 1074
- name: validation
num_bytes: 224497
num_examples: 1000
download_size: 238810
dataset_size: 461314
- config_name: X-CSQA-fr
features:
- name: id
dtype: string
- name: lang
dtype: string
- name: question
struct:
- name: stem
dtype: string
- name: choices
sequence:
- name: label
dtype: string
- name: text
dtype: string
- name: answerKey
dtype: string
splits:
- name: test
num_bytes: 243952
num_examples: 1074
- name: validation
num_bytes: 231396
num_examples: 1000
download_size: 244676
dataset_size: 475348
- config_name: X-CSQA-hi
features:
- name: id
dtype: string
- name: lang
dtype: string
- name: question
struct:
- name: stem
dtype: string
- name: choices
sequence:
- name: label
dtype: string
- name: text
dtype: string
- name: answerKey
dtype: string
splits:
- name: test
num_bytes: 415011
num_examples: 1074
- name: validation
num_bytes: 396318
num_examples: 1000
download_size: 304090
dataset_size: 811329
- config_name: X-CSQA-it
features:
- name: id
dtype: string
- name: lang
dtype: string
- name: question
struct:
- name: stem
dtype: string
- name: choices
sequence:
- name: label
dtype: string
- name: text
dtype: string
- name: answerKey
dtype: string
splits:
- name: test
num_bytes: 232604
num_examples: 1074
- name: validation
num_bytes: 220902
num_examples: 1000
download_size: 236130
dataset_size: 453506
- config_name: X-CSQA-jap
features:
- name: id
dtype: string
- name: lang
dtype: string
- name: question
struct:
- name: stem
dtype: string
- name: choices
sequence:
- name: label
dtype: string
- name: text
dtype: string
- name: answerKey
dtype: string
splits:
- name: test
num_bytes: 250846
num_examples: 1074
- name: validation
num_bytes: 240404
num_examples: 1000
download_size: 249420
dataset_size: 491250
- config_name: X-CSQA-nl
features:
- name: id
dtype: string
- name: lang
dtype: string
- name: question
struct:
- name: stem
dtype: string
- name: choices
sequence:
- name: label
dtype: string
- name: text
dtype: string
- name: answerKey
dtype: string
splits:
- name: test
num_bytes: 226949
num_examples: 1074
- name: validation
num_bytes: 216194
num_examples: 1000
download_size: 231078
dataset_size: 443143
- config_name: X-CSQA-pl
features:
- name: id
dtype: string
- name: lang
dtype: string
- name: question
struct:
- name: stem
dtype: string
- name: choices
sequence:
- name: label
dtype: string
- name: text
dtype: string
- name: answerKey
dtype: string
splits:
- name: test
num_bytes: 231479
num_examples: 1074
- name: validation
num_bytes: 219814
num_examples: 1000
download_size: 245829
dataset_size: 451293
- config_name: X-CSQA-pt
features:
- name: id
dtype: string
- name: lang
dtype: string
- name: question
struct:
- name: stem
dtype: string
- name: choices
sequence:
- name: label
dtype: string
- name: text
dtype: string
- name: answerKey
dtype: string
splits:
- name: test
num_bytes: 235469
num_examples: 1074
- name: validation
num_bytes: 222785
num_examples: 1000
download_size: 238902
dataset_size: 458254
- config_name: X-CSQA-ru
features:
- name: id
dtype: string
- name: lang
dtype: string
- name: question
struct:
- name: stem
dtype: string
- name: choices
sequence:
- name: label
dtype: string
- name: text
dtype: string
- name: answerKey
dtype: string
splits:
- name: test
num_bytes: 341749
num_examples: 1074
- name: validation
num_bytes: 323724
num_examples: 1000
download_size: 296252
dataset_size: 665473
- config_name: X-CSQA-sw
features:
- name: id
dtype: string
- name: lang
dtype: string
- name: question
struct:
- name: stem
dtype: string
- name: choices
sequence:
- name: label
dtype: string
- name: text
dtype: string
- name: answerKey
dtype: string
splits:
- name: test
num_bytes: 222215
num_examples: 1074
- name: validation
num_bytes: 211426
num_examples: 1000
download_size: 214954
dataset_size: 433641
- config_name: X-CSQA-ur
features:
- name: id
dtype: string
- name: lang
dtype: string
- name: question
struct:
- name: stem
dtype: string
- name: choices
sequence:
- name: label
dtype: string
- name: text
dtype: string
- name: answerKey
dtype: string
splits:
- name: test
num_bytes: 306129
num_examples: 1074
- name: validation
num_bytes: 292001
num_examples: 1000
download_size: 267789
dataset_size: 598130
- config_name: X-CSQA-vi
features:
- name: id
dtype: string
- name: lang
dtype: string
- name: question
struct:
- name: stem
dtype: string
- name: choices
sequence:
- name: label
dtype: string
- name: text
dtype: string
- name: answerKey
dtype: string
splits:
- name: test
num_bytes: 265210
num_examples: 1074
- name: validation
num_bytes: 253502
num_examples: 1000
download_size: 244641
dataset_size: 518712
- config_name: X-CSQA-zh
features:
- name: id
dtype: string
- name: lang
dtype: string
- name: question
struct:
- name: stem
dtype: string
- name: choices
sequence:
- name: label
dtype: string
- name: text
dtype: string
- name: answerKey
dtype: string
splits:
- name: test
num_bytes: 197444
num_examples: 1074
- name: validation
num_bytes: 188273
num_examples: 1000
download_size: 207379
dataset_size: 385717
configs:
- config_name: X-CODAH-ar
data_files:
- split: test
path: X-CODAH-ar/test-*
- split: validation
path: X-CODAH-ar/validation-*
- config_name: X-CODAH-de
data_files:
- split: test
path: X-CODAH-de/test-*
- split: validation
path: X-CODAH-de/validation-*
- config_name: X-CODAH-en
data_files:
- split: test
path: X-CODAH-en/test-*
- split: validation
path: X-CODAH-en/validation-*
- config_name: X-CODAH-es
data_files:
- split: test
path: X-CODAH-es/test-*
- split: validation
path: X-CODAH-es/validation-*
- config_name: X-CODAH-fr
data_files:
- split: test
path: X-CODAH-fr/test-*
- split: validation
path: X-CODAH-fr/validation-*
- config_name: X-CODAH-hi
data_files:
- split: test
path: X-CODAH-hi/test-*
- split: validation
path: X-CODAH-hi/validation-*
- config_name: X-CODAH-it
data_files:
- split: test
path: X-CODAH-it/test-*
- split: validation
path: X-CODAH-it/validation-*
- config_name: X-CODAH-jap
data_files:
- split: test
path: X-CODAH-jap/test-*
- split: validation
path: X-CODAH-jap/validation-*
- config_name: X-CODAH-nl
data_files:
- split: test
path: X-CODAH-nl/test-*
- split: validation
path: X-CODAH-nl/validation-*
- config_name: X-CODAH-pl
data_files:
- split: test
path: X-CODAH-pl/test-*
- split: validation
path: X-CODAH-pl/validation-*
- config_name: X-CODAH-pt
data_files:
- split: test
path: X-CODAH-pt/test-*
- split: validation
path: X-CODAH-pt/validation-*
- config_name: X-CODAH-ru
data_files:
- split: test
path: X-CODAH-ru/test-*
- split: validation
path: X-CODAH-ru/validation-*
- config_name: X-CODAH-sw
data_files:
- split: test
path: X-CODAH-sw/test-*
- split: validation
path: X-CODAH-sw/validation-*
- config_name: X-CODAH-ur
data_files:
- split: test
path: X-CODAH-ur/test-*
- split: validation
path: X-CODAH-ur/validation-*
- config_name: X-CODAH-vi
data_files:
- split: test
path: X-CODAH-vi/test-*
- split: validation
path: X-CODAH-vi/validation-*
- config_name: X-CODAH-zh
data_files:
- split: test
path: X-CODAH-zh/test-*
- split: validation
path: X-CODAH-zh/validation-*
- config_name: X-CSQA-ar
data_files:
- split: test
path: X-CSQA-ar/test-*
- split: validation
path: X-CSQA-ar/validation-*
- config_name: X-CSQA-de
data_files:
- split: test
path: X-CSQA-de/test-*
- split: validation
path: X-CSQA-de/validation-*
- config_name: X-CSQA-en
data_files:
- split: test
path: X-CSQA-en/test-*
- split: validation
path: X-CSQA-en/validation-*
- config_name: X-CSQA-es
data_files:
- split: test
path: X-CSQA-es/test-*
- split: validation
path: X-CSQA-es/validation-*
- config_name: X-CSQA-fr
data_files:
- split: test
path: X-CSQA-fr/test-*
- split: validation
path: X-CSQA-fr/validation-*
- config_name: X-CSQA-hi
data_files:
- split: test
path: X-CSQA-hi/test-*
- split: validation
path: X-CSQA-hi/validation-*
- config_name: X-CSQA-it
data_files:
- split: test
path: X-CSQA-it/test-*
- split: validation
path: X-CSQA-it/validation-*
- config_name: X-CSQA-jap
data_files:
- split: test
path: X-CSQA-jap/test-*
- split: validation
path: X-CSQA-jap/validation-*
- config_name: X-CSQA-nl
data_files:
- split: test
path: X-CSQA-nl/test-*
- split: validation
path: X-CSQA-nl/validation-*
- config_name: X-CSQA-pl
data_files:
- split: test
path: X-CSQA-pl/test-*
- split: validation
path: X-CSQA-pl/validation-*
- config_name: X-CSQA-pt
data_files:
- split: test
path: X-CSQA-pt/test-*
- split: validation
path: X-CSQA-pt/validation-*
- config_name: X-CSQA-ru
data_files:
- split: test
path: X-CSQA-ru/test-*
- split: validation
path: X-CSQA-ru/validation-*
- config_name: X-CSQA-sw
data_files:
- split: test
path: X-CSQA-sw/test-*
- split: validation
path: X-CSQA-sw/validation-*
- config_name: X-CSQA-ur
data_files:
- split: test
path: X-CSQA-ur/test-*
- split: validation
path: X-CSQA-ur/validation-*
- config_name: X-CSQA-vi
data_files:
- split: test
path: X-CSQA-vi/test-*
- split: validation
path: X-CSQA-vi/validation-*
- config_name: X-CSQA-zh
data_files:
- split: test
path: X-CSQA-zh/test-*
- split: validation
path: X-CSQA-zh/validation-*
---
# Dataset Card for X-CSR
## Table of Contents
- [Dataset Description](#dataset-description)
- [Dataset Summary](#dataset-summary)
- [Supported Tasks](#supported-tasks-and-leaderboards)
- [Languages](#languages)
- [Dataset Structure](#dataset-structure)
- [Data Instances](#data-instances)
- [Data Fields](#data-instances)
- [Data Splits](#data-instances)
- [Dataset Creation](#dataset-creation)
- [Curation Rationale](#curation-rationale)
- [Source Data](#source-data)
- [Annotations](#annotations)
- [Personal and Sensitive Information](#personal-and-sensitive-information)
- [Considerations for Using the Data](#considerations-for-using-the-data)
- [Social Impact of Dataset](#social-impact-of-dataset)
- [Discussion of Biases](#discussion-of-biases)
- [Other Known Limitations](#other-known-limitations)
- [Additional Information](#additional-information)
- [Dataset Curators](#dataset-curators)
- [Licensing Information](#licensing-information)
- [Citation Information](#citation-information)
## Dataset Description
- **Homepage:** https://inklab.usc.edu//XCSR/
- **Repository:** https://github.com/INK-USC/XCSR
- **Paper:** https://arxiv.org/abs/2106.06937
- **Leaderboard:** https://inklab.usc.edu//XCSR/leaderboard
- **Point of Contact:** https://yuchenlin.xyz/
### Dataset Summary
To evaluate multi-lingual language models (ML-LMs) for commonsense reasoning in a cross-lingual zero-shot transfer setting (X-CSR), i.e., training in English and test in other languages, we create two benchmark datasets, namely X-CSQA and X-CODAH. Specifically, we automatically translate the original CSQA and CODAH datasets, which only have English versions, to 15 other languages, forming development and test sets for studying X-CSR. As our goal is to evaluate different ML-LMs in a unified evaluation protocol for X-CSR, we argue that such translated examples, although might contain noise, can serve as a starting benchmark for us to obtain meaningful analysis, before more human-translated datasets will be available in the future.
### Supported Tasks and Leaderboards
https://inklab.usc.edu//XCSR/leaderboard
### Languages
The total 16 languages for X-CSR: {en, zh, de, es, fr, it, jap, nl, pl, pt, ru, ar, vi, hi, sw, ur}.
## Dataset Structure
### Data Instances
An example of the X-CSQA dataset:
```
{
"id": "be1920f7ba5454ad", # an id shared by all languages
"lang": "en", # one of the 16 language codes.
"question": {
"stem": "What will happen to your knowledge with more learning?", # question text
"choices": [
{"label": "A", "text": "headaches" },
{"label": "B", "text": "bigger brain" },
{"label": "C", "text": "education" },
{"label": "D", "text": "growth" },
{"label": "E", "text": "knowing more" }
] },
"answerKey": "D" # hidden for test data.
}
```
An example of the X-CODAH dataset:
```
{
"id": "b8eeef4a823fcd4b", # an id shared by all languages
"lang": "en", # one of the 16 language codes.
"question_tag": "o", # one of 6 question types
"question": {
"stem": " ", # always a blank as a dummy question
"choices": [
{"label": "A",
"text": "Jennifer loves her school very much, she plans to drop every courses."},
{"label": "B",
"text": "Jennifer loves her school very much, she is never absent even when she's sick."},
{"label": "C",
"text": "Jennifer loves her school very much, she wants to get a part-time job."},
{"label": "D",
"text": "Jennifer loves her school very much, she quits school happily."}
]
},
"answerKey": "B" # hidden for test data.
}
```
### Data Fields
- id: an id shared by all languages
- lang: one of the 16 language codes.
- question_tag: one of 6 question types
- stem: always a blank as a dummy question
- choices: a list of answers, each answer has:
- label: a string answer identifier for each answer
- text: the answer text
### Data Splits
- X-CSQA: There are 8,888 examples for training in English, 1,000 for development in each language, and 1,074 examples for testing in each language.
- X-CODAH: There are 8,476 examples for training in English, 300 for development in each language, and 1,000 examples for testing in each language.
## Dataset Creation
### Curation Rationale
To evaluate multi-lingual language models (ML-LMs) for commonsense reasoning in a cross-lingual zero-shot transfer setting (X-CSR), i.e., training in English and test in other languages, we create two benchmark datasets, namely X-CSQA and X-CODAH.
The details of the dataset construction, especially the translation procedures, can be found in section A of the appendix of the [paper](https://inklab.usc.edu//XCSR/XCSR_paper.pdf).
### Source Data
#### Initial Data Collection and Normalization
[Needs More Information]
#### Who are the source language producers?
[Needs More Information]
### Annotations
#### Annotation process
[Needs More Information]
#### Who are the annotators?
[Needs More Information]
### Personal and Sensitive Information
[Needs More Information]
## Considerations for Using the Data
### Social Impact of Dataset
[Needs More Information]
### Discussion of Biases
[Needs More Information]
### Other Known Limitations
[Needs More Information]
## Additional Information
### Dataset Curators
[Needs More Information]
### Licensing Information
[Needs More Information]
### Citation Information
```
# X-CSR
@inproceedings{lin-etal-2021-common,
title = "Common Sense Beyond {E}nglish: Evaluating and Improving Multilingual Language Models for Commonsense Reasoning",
author = "Lin, Bill Yuchen and
Lee, Seyeon and
Qiao, Xiaoyang and
Ren, Xiang",
booktitle = "Proceedings of the 59th Annual Meeting of the Association for Computational Linguistics and the 11th International Joint Conference on Natural Language Processing (Volume 1: Long Papers)",
month = aug,
year = "2021",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2021.acl-long.102",
doi = "10.18653/v1/2021.acl-long.102",
pages = "1274--1287",
abstract = "Commonsense reasoning research has so far been limited to English. We aim to evaluate and improve popular multilingual language models (ML-LMs) to help advance commonsense reasoning (CSR) beyond English. We collect the Mickey corpus, consisting of 561k sentences in 11 different languages, which can be used for analyzing and improving ML-LMs. We propose Mickey Probe, a language-general probing task for fairly evaluating the common sense of popular ML-LMs across different languages. In addition, we also create two new datasets, X-CSQA and X-CODAH, by translating their English versions to 14 other languages, so that we can evaluate popular ML-LMs for cross-lingual commonsense reasoning. To improve the performance beyond English, we propose a simple yet effective method {---} multilingual contrastive pretraining (MCP). It significantly enhances sentence representations, yielding a large performance gain on both benchmarks (e.g., +2.7{\%} accuracy for X-CSQA over XLM-R{\_}L).",
}
# CSQA
@inproceedings{Talmor2019commonsenseqaaq,
address = {Minneapolis, Minnesota},
author = {Talmor, Alon and Herzig, Jonathan and Lourie, Nicholas and Berant, Jonathan},
booktitle = {Proceedings of the 2019 Conference of the North {A}merican Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers)},
doi = {10.18653/v1/N19-1421},
pages = {4149--4158},
publisher = {Association for Computational Linguistics},
title = {CommonsenseQA: A Question Answering Challenge Targeting Commonsense Knowledge},
url = {https://www.aclweb.org/anthology/N19-1421},
year = {2019}
}
# CODAH
@inproceedings{Chen2019CODAHAA,
address = {Minneapolis, USA},
author = {Chen, Michael and D{'}Arcy, Mike and Liu, Alisa and Fernandez, Jared and Downey, Doug},
booktitle = {Proceedings of the 3rd Workshop on Evaluating Vector Space Representations for {NLP}},
doi = {10.18653/v1/W19-2008},
pages = {63--69},
publisher = {Association for Computational Linguistics},
title = {CODAH: An Adversarially-Authored Question Answering Dataset for Common Sense},
url = {https://www.aclweb.org/anthology/W19-2008},
year = {2019}
}
```
### Contributions
Thanks to [Bill Yuchen Lin](https://yuchenlin.xyz/), [Seyeon Lee](https://seyeon-lee.github.io/), [Xiaoyang Qiao](https://www.linkedin.com/in/xiaoyang-qiao/), [Xiang Ren](http://www-bcf.usc.edu/~xiangren/) for adding this dataset.