Datasets:
Tasks:
Question Answering
Modalities:
Text
Formats:
parquet
Sub-tasks:
open-domain-qa
Languages:
English
Size:
10K - 100K
ArXiv:
License:
File size: 3,640 Bytes
d902145 e7ccf29 d902145 e7ccf29 d902145 e7ccf29 c1a20f1 d902145 c1a20f1 d902145 c1a20f1 e7ccf29 d902145 e7ccf29 d902145 e7ccf29 d902145 e7ccf29 d902145 e7ccf29 d902145 e7ccf29 d902145 e7ccf29 d902145 e7ccf29 d902145 e7ccf29 d902145 e7ccf29 d902145 e7ccf29 d902145 e7ccf29 d902145 e7ccf29 d902145 e7ccf29 d902145 e7ccf29 d902145 e7ccf29 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 |
"""CommonsenseQA dataset."""
import json
import datasets
_HOMEPAGE = "https://www.tau-nlp.org/commonsenseqa"
_DESCRIPTION = """\
CommonsenseQA is a new multiple-choice question answering dataset that requires different types of commonsense knowledge
to predict the correct answers . It contains 12,102 questions with one correct answer and four distractor answers.
The dataset is provided in two major training/validation/testing set splits: "Random split" which is the main evaluation
split, and "Question token split", see paper for details.
"""
_CITATION = """\
@inproceedings{talmor-etal-2019-commonsenseqa,
title = "{C}ommonsense{QA}: A Question Answering Challenge Targeting Commonsense Knowledge",
author = "Talmor, Alon and
Herzig, Jonathan and
Lourie, Nicholas and
Berant, Jonathan",
booktitle = "Proceedings of the 2019 Conference of the North {A}merican Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers)",
month = jun,
year = "2019",
address = "Minneapolis, Minnesota",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/N19-1421",
doi = "10.18653/v1/N19-1421",
pages = "4149--4158",
archivePrefix = "arXiv",
eprint = "1811.00937",
primaryClass = "cs",
}
"""
_URL = "https://s3.amazonaws.com/commensenseqa"
_URLS = {
"train": f"{_URL}/train_rand_split.jsonl",
"validation": f"{_URL}/dev_rand_split.jsonl",
"test": f"{_URL}/test_rand_split_no_answers.jsonl",
}
class CommonsenseQa(datasets.GeneratorBasedBuilder):
"""CommonsenseQA dataset."""
VERSION = datasets.Version("1.0.0")
def _info(self):
features = datasets.Features(
{
"id": datasets.Value("string"),
"question": datasets.Value("string"),
"question_concept": datasets.Value("string"),
"choices": datasets.features.Sequence(
{
"label": datasets.Value("string"),
"text": datasets.Value("string"),
}
),
"answerKey": datasets.Value("string"),
}
)
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=features,
homepage=_HOMEPAGE,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
filepaths = dl_manager.download_and_extract(_URLS)
splits = [datasets.Split.TRAIN, datasets.Split.VALIDATION, datasets.Split.TEST]
return [
datasets.SplitGenerator(
name=split,
gen_kwargs={
"filepath": filepaths[split],
},
)
for split in splits
]
def _generate_examples(self, filepath):
"""Yields examples."""
with open(filepath, encoding="utf-8") as f:
for uid, row in enumerate(f):
data = json.loads(row)
choices = data["question"]["choices"]
labels = [label["label"] for label in choices]
texts = [text["text"] for text in choices]
yield uid, {
"id": data["id"],
"question": data["question"]["stem"],
"question_concept": data["question"]["question_concept"],
"choices": {"label": labels, "text": texts},
"answerKey": data.get("answerKey", ""),
}
|