Datasets:
GEM
/

Tasks:
Other
Languages:
English
Multilinguality:
unknown
Size Categories:
unknown
Language Creators:
unknown
Annotations Creators:
expert-created
Source Datasets:
original
ArXiv:
Tags:
question-generation
License:
FairytaleQA / FairytaleQA.json
Sebastian Gehrmann
data card.
d89ac97
{
"overview": {
"what": {
"dataset": "The FairytaleQA Dataset is an English-language dataset focusing on narrative comprehension of kindergarten to eighth-grade students. Generated by educational experts based on an evidence-based theoretical framework, FairytaleQA consists of 10,580 explicit and implicit questions derived from 278 children-friendly stories, covering seven types of narrative elements or relations. The Dataset was corrected to support both the tasks of Question Generation and Question Answering."
},
"where": {
"has-leaderboard": "yes",
"leaderboard-url": "[PapersWithCode](https://paperswithcode.com/sota/question-generation-on-fairytaleqa)",
"leaderboard-description": "The task was to generate questions corresponding to the given answers and the story context. Success on the Question Generation task is typically measured by achieving a high ROUGE-L score to the reference ground-truth question.",
"data-url": "[Github](https://github.com/uci-soe/FairytaleQAData)",
"paper-url": "[ArXiv](https://arxiv.org/abs/2203.13947)",
"paper-bibtext": "@inproceedings{xu2022fairytaleqa,\n author={Xu, Ying and Wang, Dakuo and Yu, Mo and Ritchie, Daniel and Yao, Bingsheng and Wu, Tongshuang and Zhang, Zheng and Li, Toby Jia-Jun and Bradford, Nora and Sun, Branda and Hoang, Tran Bao and Sang, Yisi and Hou, Yufang and Ma, Xiaojuan and Yang, Diyi and Peng, Nanyun and Yu, Zhou and Warschauer, Mark},\n title = {Fantastic Questions and Where to Find Them: Fairytale{QA} -- An Authentic Dataset for Narrative Comprehension},\n publisher = {Association for Computational Linguistics},\n year = {2022}\n}",
"contact-name": "Ying Xu, Dakuo Wang",
"contact-email": "ying.xu@uci.edu, dakuo.wang@ibm.com"
},
"languages": {
"is-multilingual": "no",
"license": "unknown: License information unavailable",
"task-other": "N/A",
"language-names": [
"English"
],
"language-dialects": "[N/A]",
"intended-use": "The purpose of this dataset is to help develop systems to facilitate assessment and training of narrative comprehension skills for children in education domain. The dataset distinguishes fine-grained reading skills, such as the understanding of varying narrative elements, and contains high-quality QA-pairs generated by education experts with sufficient training and education domain knowledge to create valid QA-pairs in a consistent way. \n\nThis dataset is suitable for developing models to automatically generate questions and QA-Pairs that satisfy the need for a continuous supply of new questions, which can potentially enable large-scale development of AI-supported interactive platforms for the learning and assessment of reading comprehension skills.",
"language-speakers": "[N/A]",
"license-other": "N/A",
"task": "Question Generation",
"communicative": "The task was to generate questions corresponding to the given answers and the story context. Models trained for this task can potentially enable large-scale development of AI-supported interactive platforms for the learning and assessment of reading comprehension skills."
},
"credit": {
"organization-type": [
"academic"
],
"organization-names": "University of California Irvine",
"creators": "Ying Xu (University of California Irvine); Dakuo Wang (IBM Research); Mo Yu (IBM Research); Daniel Ritchie (University of California Irvine); Bingsheng Yao (Rensselaer Polytechnic Institute); Tongshuang Wu (University of Washington); Zheng Zhang (University of Notre Dame); Toby Jia-Jun Li (University of Notre Dame); Nora Bradford (University of California Irvine); Branda Sun (University of California Irvine); Tran Bao Hoang (University of California Irvine); Yisi Sang (Syracuse University); Yufang Hou (IBM Research Ireland); Xiaojuan Ma (Hong Kong Univ. of Sci and Tech); Diyi Yang (Georgia Institute of Technology); Nanyun Peng (University of California Los Angeles); Zhou Yu (Columbia University); Mark Warschauer (University of California Irvine)",
"funding": "Schmidt Futures",
"gem-added-by": "Dakuo Wang (IBM Research); Bingsheng Yao (Rensselaer Polytechnic Institute); Ying Xu (University of California Irvine)"
},
"structure": {
"data-fields": "- `story_name`: a string of the story name to which the story section content belongs. Full story data can be found [here](https://github.com/uci-soe/FairytaleQAData).\n\n- `content`: a string of the story section(s) content related to the experts' labeled QA-pair. Used as the input for both Question Generation and Question Answering tasks. \n\n- `question`: a string of the question content. Used as the input for Question Answering task and as the output for Question Generation task. \n\n- `answer`: a string of the answer content for all splits. Used as the input for Question Generation task and as the output for Question Answering task.\n\n- `gem_id`: a string of id follows GEM naming convention ```GEM-${DATASET_NAME}-${SPLIT-NAME}-${id}``` where id is an incrementing number starting at 1\n\n- `target`: a string of the question content being used for training\n\n- `references`: a list of string containing the question content being used for automatic eval\n\n- `local_or_sum`: a string of either local or summary, indicating whether the QA is related to one story section or multiple sections \n\n- `attribute`: a string of one of character, causal relationship, action, setting, feeling, prediction, or outcome resolution. Classification of the QA by education experts annotators via 7 narrative elements on an established framework\n \n- `ex_or_im`: a string of either explicit or implicit, indicating whether the answers can be directly found in the story content or cannot be directly from the story content.\n",
"structure-example": "{'story_name': 'self-did-it', \n'content': '\" what is your name ? \" asked the girl from underground . \" self is my name , \" said the woman . that seemed a curious name to the girl , and she once more began to pull the fire apart . then the woman grew angry and began to scold , and built it all up again . thus they went on for a good while ; but at last , while they were in the midst of their pulling apart and building up of the fire , the woman upset the tar - barrel on the girl from underground . then the latter screamed and ran away , crying : \" father , father ! self burned me ! \" \" nonsense , if self did it , then self must suffer for it ! \" came the answer from below the hill .', \n'answer': 'the woman told the girl her name was self .', \n'question': \"why did the girl's father think the girl burned herself ?\", \n'gem_id': 'GEM-FairytaleQA-test-1006', \n'target': \"why did the girl's father think the girl burned herself ?\", \n'references': [\"why did the girl's father think the girl burned herself ?\"], \n'local_or_sum': 'local', \n'attribute': 'causal relationship', \n'ex_or_im': 'implicit'}",
"structure-splits": "The data is split into a train, validation, and test split randomly. The final split sizes are as follows:\n\n| | Train | Validation | Test |\n| ----- | ----- | ----- | ----- |\n| # Books | 232 | 23 | 23 |\n| # QA-Pairs | 8548 | 1025 |1007 |",
"structure-splits-criteria": "The books are randomly split into train/validation/test splits. We control the ratio of QA-pair numbers in train:validation:test splits close to 8:1:1",
"structure-outlier": "[N/A]",
"structure-labels": "A typical data point comprises a question, the corresponding story content, and one answer. Education expert annotators labeled whether the answer is locally relevant to one story section or requires summarization capabilities from multiple story sections, and whether the answers are explicit (can be directly found in the stories) or implicit (cannot be directly found in the story text). Additionally, education expert annotators categorize the QA-pairs via 7 narrative elements from an establish framework.",
"structure-description": "[N/A]"
}
},
"curation": {
"original": {
"is-aggregated": "no",
"aggregated-sources": "N/A",
"rationale": "FairytaleQA was built to focus on comprehension of narratives in the education domain, targeting students from kindergarten to eighth grade. We focus on narrative comprehension for 1. it is a high-level comprehension skill strongly predictive of reading achievement and plays a central role in daily life as people frequently encounter narratives in different forms, 2. narrative stories have a clear structure of specific elements and relations among these elements, and there are existing validated narrative comprehension frameworks around this structure, which provides a basis for developing the annotation schema for our dataset.",
"communicative": "The purpose of this dataset is to help develop systems to facilitate assessment and training of narrative comprehension skills for children in education domain."
},
"language": {
"found": [
"Single website"
],
"crowdsourced": [],
"created": "N/A",
"machine-generated": "N/A",
"validated": "validated by data curator",
"is-filtered": "manually",
"filtered-criteria": "For each story, we evaluated the reading difficulty level using the [textstat](https://pypi.org/project/textstat/) Python package, primarily based on sentence length, word length, and commonness of words. We excluded stories that are at 10th grade level or above.",
"obtained": [
"Found"
],
"producers-description": "The fairytale story texts are from the [Project Gutenberg](https://www.gutenberg.org/) website",
"topics": "We gathered the text from the Project Gutenberg website, using \u201cfairytale\u201d as the search term. ",
"pre-processed": "Due to a large number of fairytales found, we used the most popular stories based on the number of downloads since these stories are presumably of higher quality. To ensure the readability of the text, we made a small number of minor revisions to some obviously outdated vocabulary (e.g., changing \u201cere\u201d to \u201cbefore\u201d) and the unconventional use of punctuation (e.g., changing consecutive semi-colons to periods). \n\nThese texts were broken down into small sections based on their semantic content by our annotators. The annotators were instructed to split the story into sections of 100-300 words that also contain meaningful content and are separated at natural story breaks. An initial annotator would split the story, and this would be reviewed by a cross-checking annotator. Most of the resulting sections were one natural paragraph of the original text."
},
"annotations": {
"origin": "expert created",
"rater-number": "2<n<10",
"rater-qualifications": "All of these annotators have a B.A. degree in education, psychology, or cognitive science and have substantial experience in teaching and reading assessment. These annotators were supervised by three experts in literacy education.",
"rater-training-num": "2",
"rater-test-num": "3",
"rater-annotation-service-bool": "no",
"rater-annotation-service": [],
"values": "The dataset annotation distinguishes fine-grained reading skills, such as the understanding of varying narrative elements, and contains high-quality QA-pairs generated by education experts with sufficient training and education domain knowledge to create valid QA-pairs in a consistent way.",
"quality-control": "validated by data curators",
"quality-control-details": "The annotators were instructed to imagine that they were creating questions to test elementary or middle school students in the process of reading a complete story. We required the annotators to generate only natural, open-ended questions, avoiding \u201cyes-\u201d or \u201cno-\u201d questions. We also instructed them to provide a diverse set of questions about 7 different narrative elements, and with both implicit and explicit questions. \n\nWe asked the annotators to also generate answers for each of their questions. We asked them to provide the shortest possible answers but did not restrict them to complete sentences or short phrases. We also asked the annotators to label which section(s) the question and answer was from.\n\nAll annotators received a two-week training in which each of them was familiarized with the coding template and conducted practice coding on the same five stories. The practice QA pairs were then reviewed by the other annotators and the three experts, and discrepancies among annotators were discussed. During the annotation process, the team met once every week to review and discuss each member\u2019s work. All QA pairs were cross-checked by two annotators, and 10% of the QA pairs were additionally checked by the expert supervisor.\n\nFor the 46 stories used as the evaluation set, we annotate a second reference answer by asking an annotator to independently read the story and answer the questions generated by others."
},
"consent": {
"has-consent": "yes",
"consent-policy": "During the annotation process, the team met once every week to review and discuss each member\u2019s work. All QA pairs were cross-checked by two annotators, and 10% of the QA pairs were additionally checked by the expert supervisor.",
"consent-other": "Aside from Question Generation task, the data creators and curators used this data for Question Answering, and QA-Pair Generation tasks, and to identify social stereotypes represented in story narratives.",
"no-consent-justification": "N/A"
},
"pii": {
"has-pii": "no PII",
"no-pii-justification": "The story content is from publically available knowledge website and the annotated QA-pairs are about general knowledge to the story content without references to the author or to any persons",
"is-pii-identified": "N/A",
"pii-identified-method": "N/A",
"is-pii-replaced": "N/A",
"pii-replaced-method": "N/A",
"pii-categories": []
},
"maintenance": {
"has-maintenance": "yes",
"description": "We plan to host various splits for the FairytaleQA dataset to better serve various types of research interests. We have the original data for 2 different split approaches including train/validation/test splits and split by fairytale origins. We are also plan to host the dataset on multiple platforms for various tasks. ",
"contact": "Daniel Ritchie",
"contestation-mechanism": "no mechanism",
"contestation-link": "N/A",
"contestation-description": "N/A"
}
},
"gem": {
"rationale": {
"sole-task-dataset": "no",
"sole-language-task-dataset": "N/A",
"distinction-description": "N/A",
"contribution": "The dataset distinguishes fine-grained reading skills, such as the understanding of varying narrative elements, and contains high-quality QA-pairs generated by education experts with sufficient training and education domain knowledge to create valid QA-pairs in a consistent way.\n\n",
"model-ability": "This dataset is suitable for developing models to automatically generate questions or QA-pairs that satisfy the need for a continuous supply of new questions, which can potentially enable large-scale development of AI-supported interactive platforms for the learning and assessment of reading comprehension skills."
},
"curation": {
"has-additional-curation": "yes",
"modification-types": [
"data points removed"
],
"modification-description": "The original data contains two answers by different annotators in validation/test splits, we removed the 2nd answer for GEM version because it is not being used for the Question Generation task. ",
"has-additional-splits": "no",
"additional-splits-description": "N/A",
"additional-splits-capacicites": "N/A"
},
"starting": {
"research-pointers": "[N/A]"
}
},
"results": {
"results": {
"other-metrics-definitions": "N/A",
"has-previous-results": "yes",
"current-evaluation": "N/A",
"previous-results": "A [BART-based model](https://huggingface.co/facebook/bart-large) currently achieves a [ROUGE-L of 0.527/0.527](https://github.com/uci-soe/FairytaleQAData) on valid/test splits, which is reported as the baseline experiment for the dataset [paper](https://arxiv.org/pdf/2203.13947.pdf).",
"metrics": [
"ROUGE"
],
"model-abilities": "We are able to measure model's capabilities of generating various types of questions that corresponds to different narrative elements with the FairytaleQA dataset on the Question Generation Task",
"original-evaluation": "The task was to generate questions corresponding to the given answers and the story context. Success on this task is typically measured by achieving a high [ROUGE](https://huggingface.co/metrics/rouge) score to the reference ground-truth questions. "
}
},
"considerations": {
"pii": {
"risks-description": "[N/A]"
},
"licenses": {
"dataset-restrictions-other": "N/A",
"data-copyright-other": "N/A",
"dataset-restrictions": [
"research use only"
],
"data-copyright": [
"public domain"
]
},
"limitations": {
"data-technical-limitations": "We noticed that human results are obtained via cross-estimation between the two annotated answers, thus are underestimated. One possibility for future work is to conduct a large-scale human annotation to collect more answers per question and then leverage the massively annotated answers to better establish a human performance evaluation.",
"data-unsuited-applications": "The QA-pairs annotated by education experts are targeting the audience of children from kindergarten to eighth grade, so the difficulty of QA-pairs are not suitable to compare with other existing dataset that are sourced from knowledge graphs or knowledge bases like Wikipedia.",
"data-discouraged-use": "[N/A]"
}
},
"context": {
"previous": {
"is-deployed": "yes - models trained on this dataset",
"described-risks": "[N/A]",
"changes-from-observation": "[N/A]"
},
"underserved": {
"helps-underserved": "yes",
"underserved-description": "From the educational perspective, given that reading comprehension is a multicomponent skill, it is ideal for comprehension questions to be able to identify students\u2019 performance in specific sub-skills, thus allowing teachers to provide tailored guidance. "
},
"biases": {
"has-biases": "unsure",
"bias-analyses": "N/A",
"speaker-distibution": "[N/A]"
}
}
}