Datasets:

Modalities:
Text
Formats:
parquet
Languages:
English
ArXiv:
Libraries:
Datasets
pandas
License:
albertvillanova HF staff commited on
Commit
1251ea3
1 Parent(s): ffd96bd

Delete loading script

Browse files
Files changed (1) hide show
  1. drop.py +0 -202
drop.py DELETED
@@ -1,202 +0,0 @@
1
- """TODO(drop): Add a description here."""
2
-
3
-
4
- import json
5
- import os
6
-
7
- import datasets
8
-
9
-
10
- _CITATION = """\
11
- @inproceedings{Dua2019DROP,
12
- author={Dheeru Dua and Yizhong Wang and Pradeep Dasigi and Gabriel Stanovsky and Sameer Singh and Matt Gardner},
13
- title={DROP: A Reading Comprehension Benchmark Requiring Discrete Reasoning Over Paragraphs},
14
- booktitle={Proc. of NAACL},
15
- year={2019}
16
- }
17
- """
18
-
19
- _DESCRIPTION = """\
20
- DROP: A Reading Comprehension Benchmark Requiring Discrete Reasoning Over Paragraphs.
21
- . DROP is a crowdsourced, adversarially-created, 96k-question benchmark, in which a system must resolve references in a
22
- question, perhaps to multiple input positions, and perform discrete operations over them (such as addition, counting, or
23
- sorting). These operations require a much more comprehensive understanding of the content of paragraphs than what was
24
- necessary for prior datasets.
25
- """
26
- _URL = "https://s3-us-west-2.amazonaws.com/allennlp/datasets/drop/drop_dataset.zip"
27
-
28
-
29
- class AnswerParsingError(Exception):
30
- pass
31
-
32
-
33
- class DropDateObject:
34
- """
35
- Custom parser for date answers in DROP.
36
- A date answer is a dict <date> with at least one of day|month|year.
37
-
38
- Example: date == {
39
- 'day': '9',
40
- 'month': 'March',
41
- 'year': '2021'
42
- }
43
-
44
- This dict is parsed and flattend to '{day} {month} {year}', not including
45
- blank values.
46
-
47
- Example: str(DropDateObject(date)) == '9 March 2021'
48
- """
49
-
50
- def __init__(self, dict_date):
51
- self.year = dict_date.get("year", "")
52
- self.month = dict_date.get("month", "")
53
- self.day = dict_date.get("day", "")
54
-
55
- def __iter__(self):
56
- yield from [self.day, self.month, self.year]
57
-
58
- def __bool__(self):
59
- return any(self)
60
-
61
- def __repr__(self):
62
- return " ".join(self).strip()
63
-
64
-
65
- class Drop(datasets.GeneratorBasedBuilder):
66
- """TODO(drop): Short description of my dataset."""
67
-
68
- # TODO(drop): Set up version.
69
- VERSION = datasets.Version("0.1.0")
70
-
71
- def _info(self):
72
- # TODO(drop): Specifies the datasets.DatasetInfo object
73
- return datasets.DatasetInfo(
74
- # This is the description that will appear on the datasets page.
75
- description=_DESCRIPTION,
76
- # datasets.features.FeatureConnectors
77
- features=datasets.Features(
78
- {
79
- "section_id": datasets.Value("string"),
80
- "query_id": datasets.Value("string"),
81
- "passage": datasets.Value("string"),
82
- "question": datasets.Value("string"),
83
- "answers_spans": datasets.features.Sequence(
84
- {"spans": datasets.Value("string"), "types": datasets.Value("string")}
85
- )
86
- # These are the features of your dataset like images, labels ...
87
- }
88
- ),
89
- # If there's a common (input, target) tuple from the features,
90
- # specify them here. They'll be used if as_supervised=True in
91
- # builder.as_dataset.
92
- supervised_keys=None,
93
- # Homepage of the dataset for documentation
94
- homepage="https://allennlp.org/drop",
95
- citation=_CITATION,
96
- )
97
-
98
- def _split_generators(self, dl_manager):
99
- """Returns SplitGenerators."""
100
- # TODO(drop): Downloads the data and defines the splits
101
- # dl_manager is a datasets.download.DownloadManager that can be used to
102
- # download and extract URLs
103
- dl_dir = dl_manager.download_and_extract(_URL)
104
- data_dir = os.path.join(dl_dir, "drop_dataset")
105
- return [
106
- datasets.SplitGenerator(
107
- name=datasets.Split.TRAIN,
108
- # These kwargs will be passed to _generate_examples
109
- gen_kwargs={"filepath": os.path.join(data_dir, "drop_dataset_train.json"), "split": "train"},
110
- ),
111
- datasets.SplitGenerator(
112
- name=datasets.Split.VALIDATION,
113
- # These kwargs will be passed to _generate_examples
114
- gen_kwargs={"filepath": os.path.join(data_dir, "drop_dataset_dev.json"), "split": "validation"},
115
- ),
116
- ]
117
-
118
- def _generate_examples(self, filepath, split):
119
- """Yields examples."""
120
- # TODO(drop): Yields (key, example) tuples from the dataset
121
- with open(filepath, mode="r", encoding="utf-8") as f:
122
- data = json.load(f)
123
- id_ = 0
124
- for i, (section_id, section) in enumerate(data.items()):
125
- for j, qa in enumerate(section["qa_pairs"]):
126
-
127
- example = {
128
- "section_id": section_id,
129
- "query_id": qa["query_id"],
130
- "passage": section["passage"],
131
- "question": qa["question"],
132
- }
133
-
134
- if split == "train":
135
- answers = [qa["answer"]]
136
- else:
137
- answers = qa["validated_answers"]
138
-
139
- try:
140
- example["answers_spans"] = self.build_answers(answers)
141
- yield id_, example
142
- id_ += 1
143
- except AnswerParsingError:
144
- # This is expected for 9 examples of train
145
- # and 1 of validation.
146
- continue
147
-
148
- @staticmethod
149
- def _raise(message):
150
- """
151
- Raise a custom AnswerParsingError, to be sure to only catch our own
152
- errors. Messages are irrelavant for this script, but are written to
153
- ease understanding the code.
154
- """
155
- raise AnswerParsingError(message)
156
-
157
- def build_answers(self, answers):
158
-
159
- returned_answers = {
160
- "spans": list(),
161
- "types": list(),
162
- }
163
- for answer in answers:
164
- date = DropDateObject(answer["date"])
165
-
166
- if answer["number"] != "":
167
- # sanity checks
168
- if date:
169
- self._raise("This answer is both number and date!")
170
- if len(answer["spans"]):
171
- self._raise("This answer is both number and text!")
172
-
173
- returned_answers["spans"].append(answer["number"])
174
- returned_answers["types"].append("number")
175
-
176
- elif date:
177
- # sanity check
178
- if len(answer["spans"]):
179
- self._raise("This answer is both date and text!")
180
-
181
- returned_answers["spans"].append(str(date))
182
- returned_answers["types"].append("date")
183
-
184
- # won't triger if len(answer['spans']) == 0
185
- for span in answer["spans"]:
186
- # sanity checks
187
- if answer["number"] != "":
188
- self._raise("This answer is both text and number!")
189
- if date:
190
- self._raise("This answer is both text and date!")
191
-
192
- returned_answers["spans"].append(span)
193
- returned_answers["types"].append("span")
194
-
195
- # sanity check
196
- _len = len(returned_answers["spans"])
197
- if not _len:
198
- self._raise("Empty answer.")
199
- if any(len(l) != _len for _, l in returned_answers.items()):
200
- self._raise("Something went wrong while parsing answer values/types")
201
-
202
- return returned_answers