Datasets:

Languages:
English
Multilinguality:
monolingual
Size Categories:
10K<n<100K
Language Creators:
crowdsourced
Annotations Creators:
crowdsourced
Source Datasets:
original
ArXiv:
Tags:
License:
albertvillanova HF staff commited on
Commit
c9e8b5f
1 Parent(s): 1251ea3

Delete legacy dataset_infos.json

Browse files
Files changed (1) hide show
  1. dataset_infos.json +0 -65
dataset_infos.json DELETED
@@ -1,65 +0,0 @@
1
- {
2
- "default": {
3
- "description": "DROP: A Reading Comprehension Benchmark Requiring Discrete Reasoning Over Paragraphs.\n. DROP is a crowdsourced, adversarially-created, 96k-question benchmark, in which a system must resolve references in a\nquestion, perhaps to multiple input positions, and perform discrete operations over them (such as addition, counting, or\n sorting). These operations require a much more comprehensive understanding of the content of paragraphs than what was\n necessary for prior datasets.\n",
4
- "citation": "@inproceedings{Dua2019DROP,\n author={Dheeru Dua and Yizhong Wang and Pradeep Dasigi and Gabriel Stanovsky and Sameer Singh and Matt Gardner},\n title={DROP: A Reading Comprehension Benchmark Requiring Discrete Reasoning Over Paragraphs},\n booktitle={Proc. of NAACL},\n year={2019}\n}\n",
5
- "homepage": "https://allennlp.org/drop",
6
- "license": "",
7
- "features": {
8
- "section_id": {
9
- "dtype": "string",
10
- "_type": "Value"
11
- },
12
- "query_id": {
13
- "dtype": "string",
14
- "_type": "Value"
15
- },
16
- "passage": {
17
- "dtype": "string",
18
- "_type": "Value"
19
- },
20
- "question": {
21
- "dtype": "string",
22
- "_type": "Value"
23
- },
24
- "answers_spans": {
25
- "feature": {
26
- "spans": {
27
- "dtype": "string",
28
- "_type": "Value"
29
- },
30
- "types": {
31
- "dtype": "string",
32
- "_type": "Value"
33
- }
34
- },
35
- "_type": "Sequence"
36
- }
37
- },
38
- "builder_name": "drop",
39
- "dataset_name": "drop",
40
- "config_name": "default",
41
- "version": {
42
- "version_str": "0.1.0",
43
- "major": 0,
44
- "minor": 1,
45
- "patch": 0
46
- },
47
- "splits": {
48
- "train": {
49
- "name": "train",
50
- "num_bytes": 105572506,
51
- "num_examples": 77400,
52
- "dataset_name": null
53
- },
54
- "validation": {
55
- "name": "validation",
56
- "num_bytes": 11737755,
57
- "num_examples": 9535,
58
- "dataset_name": null
59
- }
60
- },
61
- "download_size": 11538387,
62
- "dataset_size": 117310261,
63
- "size_in_bytes": 128848648
64
- }
65
- }