yuvalkirstain commited on
Commit
ea11720
1 Parent(s): f1495bc

Upload dataset_infos.json

Browse files
Files changed (1) hide show
  1. dataset_infos.json +1 -0
dataset_infos.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"yuvalkirstain--quality_t5": {"description": "\nSCROLLS: Standardized CompaRison Over Long Language Sequences.\nA suite of natural language datasets that require reasoning over long texts.\nhttps://scrolls-benchmark.com/\n@article{pang2021quality,\n title={{QuALITY}: Question Answering with Long Input Texts, Yes!},\n author={Pang, Richard Yuanzhe and Parrish, Alicia and Joshi, Nitish and Nangia, Nikita and Phang, Jason and Chen, Angelica and Padmakumar, Vishakh and Ma, Johnny and Thompson, Jana and He, He and Bowman, Samuel R.},\n journal={arXiv preprint arXiv:2112.08608},\n year={2021}\n}\n", "citation": "\nQuALITY (Pang et al., 2021) is a multiple-choice question answering dataset over articles and stories sourced from Project Gutenberg, \nthe Open American National Corpus, and more.\nExperienced writers wrote questions and distractors, and were incentivized to write answerable, unambiguous questions such that in order to correctly answer them, \nhuman annotators must read large portions of the given document. \nReference answers were then calculated using the majority vote between of the annotators and writer's answers.\nTo measure the difficulty of their questions, Pang et al. conducted a speed validation process, \nwhere another set of annotators were asked to answer questions given only a short period of time to skim through the document.\nAs a result, 50% of the questions in QuALITY are labeled as hard, i.e. the majority of the annotators in the speed validation setting chose the wrong answer.\n\n@article{ TODO citation here\n}\nNote that each SCROLLS dataset has its own citation. Please see the source to\nget the correct citation for each contained dataset.\n", "homepage": "https://github.com/nyu-mll/quality", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "pid": {"dtype": "string", "id": null, "_type": "Value"}, "input": {"dtype": "string", "id": null, "_type": "Value"}, "output": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": null, "config_name": null, "version": null, "splits": {"train": {"name": "train", "num_bytes": 5274925, "num_examples": 2523, "dataset_name": "quality_t5"}, "validation": {"name": "validation", "num_bytes": 4384578, "num_examples": 2086, "dataset_name": "quality_t5"}, "test": {"name": "test", "num_bytes": 4384578, "num_examples": 2086, "dataset_name": "quality_t5"}}, "download_checksums": null, "download_size": 3330019, "post_processing_size": null, "dataset_size": 14044081, "size_in_bytes": 17374100}}