Datasets:

Modalities:
Text
Formats:
parquet
Languages:
English
ArXiv:
Libraries:
Datasets
pandas
License:
albertvillanova HF staff commited on
Commit
1681964
1 Parent(s): a202432

Delete legacy dataset_infos.json

Browse files
Files changed (1) hide show
  1. dataset_infos.json +0 -190
dataset_infos.json DELETED
@@ -1,190 +0,0 @@
1
- {
2
- "light": {
3
- "description": "AmbigNQ, a dataset covering 14,042 questions from NQ-open, an existing open-domain QA benchmark. We find that over half of the questions in NQ-open are ambiguous. The types of ambiguity are diverse and sometimes subtle, many of which are only apparent after examining evidence provided by a very large text corpus. AMBIGNQ, a dataset with\n14,042 annotations on NQ-OPEN questions containing diverse types of ambiguity.\nWe provide two distributions of our new dataset AmbigNQ: a full version with all annotation metadata and a light version with only inputs and outputs.\n",
4
- "citation": "@inproceedings{ min2020ambigqa,\n title={ {A}mbig{QA}: Answering Ambiguous Open-domain Questions },\n author={ Min, Sewon and Michael, Julian and Hajishirzi, Hannaneh and Zettlemoyer, Luke },\n booktitle={ EMNLP },\n year={2020}\n}\n",
5
- "homepage": "https://nlp.cs.washington.edu/ambigqa/",
6
- "license": "CC BY-SA 3.0",
7
- "features": {
8
- "id": {
9
- "dtype": "string",
10
- "_type": "Value"
11
- },
12
- "question": {
13
- "dtype": "string",
14
- "_type": "Value"
15
- },
16
- "annotations": {
17
- "feature": {
18
- "type": {
19
- "dtype": "string",
20
- "_type": "Value"
21
- },
22
- "answer": {
23
- "feature": {
24
- "dtype": "string",
25
- "_type": "Value"
26
- },
27
- "_type": "Sequence"
28
- },
29
- "qaPairs": {
30
- "feature": {
31
- "question": {
32
- "dtype": "string",
33
- "_type": "Value"
34
- },
35
- "answer": {
36
- "feature": {
37
- "dtype": "string",
38
- "_type": "Value"
39
- },
40
- "_type": "Sequence"
41
- }
42
- },
43
- "_type": "Sequence"
44
- }
45
- },
46
- "_type": "Sequence"
47
- }
48
- },
49
- "builder_name": "parquet",
50
- "dataset_name": "ambig_qa",
51
- "config_name": "light",
52
- "version": {
53
- "version_str": "1.0.0",
54
- "major": 1,
55
- "minor": 0,
56
- "patch": 0
57
- },
58
- "splits": {
59
- "train": {
60
- "name": "train",
61
- "num_bytes": 2739628,
62
- "num_examples": 10036,
63
- "dataset_name": null
64
- },
65
- "validation": {
66
- "name": "validation",
67
- "num_bytes": 805756,
68
- "num_examples": 2002,
69
- "dataset_name": null
70
- }
71
- },
72
- "download_size": 1777867,
73
- "dataset_size": 3545384,
74
- "size_in_bytes": 5323251
75
- },
76
- "full": {
77
- "description": "AmbigNQ, a dataset covering 14,042 questions from NQ-open, an existing open-domain QA benchmark. We find that over half of the questions in NQ-open are ambiguous. The types of ambiguity are diverse and sometimes subtle, many of which are only apparent after examining evidence provided by a very large text corpus. AMBIGNQ, a dataset with\n14,042 annotations on NQ-OPEN questions containing diverse types of ambiguity.\nWe provide two distributions of our new dataset AmbigNQ: a full version with all annotation metadata and a light version with only inputs and outputs.\n",
78
- "citation": "@inproceedings{ min2020ambigqa,\n title={ {A}mbig{QA}: Answering Ambiguous Open-domain Questions },\n author={ Min, Sewon and Michael, Julian and Hajishirzi, Hannaneh and Zettlemoyer, Luke },\n booktitle={ EMNLP },\n year={2020}\n}\n",
79
- "homepage": "https://nlp.cs.washington.edu/ambigqa/",
80
- "license": "CC BY-SA 3.0",
81
- "features": {
82
- "id": {
83
- "dtype": "string",
84
- "_type": "Value"
85
- },
86
- "question": {
87
- "dtype": "string",
88
- "_type": "Value"
89
- },
90
- "annotations": {
91
- "feature": {
92
- "type": {
93
- "dtype": "string",
94
- "_type": "Value"
95
- },
96
- "answer": {
97
- "feature": {
98
- "dtype": "string",
99
- "_type": "Value"
100
- },
101
- "_type": "Sequence"
102
- },
103
- "qaPairs": {
104
- "feature": {
105
- "question": {
106
- "dtype": "string",
107
- "_type": "Value"
108
- },
109
- "answer": {
110
- "feature": {
111
- "dtype": "string",
112
- "_type": "Value"
113
- },
114
- "_type": "Sequence"
115
- }
116
- },
117
- "_type": "Sequence"
118
- }
119
- },
120
- "_type": "Sequence"
121
- },
122
- "viewed_doc_titles": {
123
- "feature": {
124
- "dtype": "string",
125
- "_type": "Value"
126
- },
127
- "_type": "Sequence"
128
- },
129
- "used_queries": {
130
- "feature": {
131
- "query": {
132
- "dtype": "string",
133
- "_type": "Value"
134
- },
135
- "results": {
136
- "feature": {
137
- "title": {
138
- "dtype": "string",
139
- "_type": "Value"
140
- },
141
- "snippet": {
142
- "dtype": "string",
143
- "_type": "Value"
144
- }
145
- },
146
- "_type": "Sequence"
147
- }
148
- },
149
- "_type": "Sequence"
150
- },
151
- "nq_answer": {
152
- "feature": {
153
- "dtype": "string",
154
- "_type": "Value"
155
- },
156
- "_type": "Sequence"
157
- },
158
- "nq_doc_title": {
159
- "dtype": "string",
160
- "_type": "Value"
161
- }
162
- },
163
- "builder_name": "parquet",
164
- "dataset_name": "ambig_qa",
165
- "config_name": "full",
166
- "version": {
167
- "version_str": "1.0.0",
168
- "major": 1,
169
- "minor": 0,
170
- "patch": 0
171
- },
172
- "splits": {
173
- "train": {
174
- "name": "train",
175
- "num_bytes": 43538533,
176
- "num_examples": 10036,
177
- "dataset_name": null
178
- },
179
- "validation": {
180
- "name": "validation",
181
- "num_bytes": 15383268,
182
- "num_examples": 2002,
183
- "dataset_name": null
184
- }
185
- },
186
- "download_size": 30674462,
187
- "dataset_size": 58921801,
188
- "size_in_bytes": 89596263
189
- }
190
- }