Datasets:

Modalities:
Text
Formats:
parquet
Languages:
English
ArXiv:
Libraries:
Datasets
pandas
License:
system HF staff commited on
Commit
47c9448
1 Parent(s): d067603

Update files from the datasets library (from 1.5.0)

Browse files

Release notes: https://github.com/huggingface/datasets/releases/tag/1.5.0

Files changed (2) hide show
  1. dataset_infos.json +1 -1
  2. drop.py +122 -15
dataset_infos.json CHANGED
@@ -1 +1 @@
1
- {"default": {"description": "DROP: A Reading Comprehension Benchmark Requiring Discrete Reasoning Over Paragraphs.\n. DROP is a crowdsourced, adversarially-created, 96k-question benchmark, in which a system must resolve references in a \nquestion, perhaps to multiple input positions, and perform discrete operations over them (such as addition, counting, or\n sorting). These operations require a much more comprehensive understanding of the content of paragraphs than what was \n necessary for prior datasets.\n", "citation": "@inproceedings{Dua2019DROP,\n author={Dheeru Dua and Yizhong Wang and Pradeep Dasigi and Gabriel Stanovsky and Sameer Singh and Matt Gardner},\n title={ {DROP}: A Reading Comprehension Benchmark Requiring Discrete Reasoning Over Paragraphs},\n booktitle={Proc. of NAACL},\n year={2019}\n}\n", "homepage": "https://allennlp.org/drop", "license": "", "features": {"passage": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers_spans": {"feature": {"spans": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "supervised_keys": null, "builder_name": "drop", "config_name": "default", "version": {"version_str": "0.1.0", "description": null, "datasets_version_to_prepare": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 100119741, "num_examples": 77409, "dataset_name": "drop"}, "validation": {"name": "validation", "num_bytes": 10788180, "num_examples": 9536, "dataset_name": "drop"}}, "download_checksums": {"https://s3-us-west-2.amazonaws.com/allennlp/datasets/drop/drop_dataset.zip": {"num_bytes": 8308692, "checksum": "39d2278a29fd729de301b111a45f434c24834f40df8f4ff116d864589e3249d6"}}, "download_size": 8308692, "dataset_size": 110907921, "size_in_bytes": 119216613}}
 
1
+ {"default": {"description": "DROP: A Reading Comprehension Benchmark Requiring Discrete Reasoning Over Paragraphs.\n. DROP is a crowdsourced, adversarially-created, 96k-question benchmark, in which a system must resolve references in a\nquestion, perhaps to multiple input positions, and perform discrete operations over them (such as addition, counting, or\n sorting). These operations require a much more comprehensive understanding of the content of paragraphs than what was\n necessary for prior datasets.\n", "citation": "@inproceedings{Dua2019DROP,\n author={Dheeru Dua and Yizhong Wang and Pradeep Dasigi and Gabriel Stanovsky and Sameer Singh and Matt Gardner},\n title={DROP: A Reading Comprehension Benchmark Requiring Discrete Reasoning Over Paragraphs},\n booktitle={Proc. of NAACL},\n year={2019}\n}\n", "homepage": "https://allennlp.org/drop", "license": "", "features": {"section_id": {"dtype": "string", "id": null, "_type": "Value"}, "query_id": {"dtype": "string", "id": null, "_type": "Value"}, "passage": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers_spans": {"feature": {"spans": {"dtype": "string", "id": null, "_type": "Value"}, "types": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "drop", "config_name": "default", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 105572762, "num_examples": 77400, "dataset_name": "drop"}, "validation": {"name": "validation", "num_bytes": 11737787, "num_examples": 9535, "dataset_name": "drop"}}, "download_checksums": {"https://s3-us-west-2.amazonaws.com/allennlp/datasets/drop/drop_dataset.zip": {"num_bytes": 8308692, "checksum": "39d2278a29fd729de301b111a45f434c24834f40df8f4ff116d864589e3249d6"}}, "download_size": 8308692, "post_processing_size": null, "dataset_size": 117310549, "size_in_bytes": 125619241}}
drop.py CHANGED
@@ -29,6 +29,42 @@ question, perhaps to multiple input positions, and perform discrete operations o
29
  _URl = "https://s3-us-west-2.amazonaws.com/allennlp/datasets/drop/drop_dataset.zip"
30
 
31
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
32
  class Drop(datasets.GeneratorBasedBuilder):
33
  """TODO(drop): Short description of my dataset."""
34
 
@@ -43,9 +79,13 @@ class Drop(datasets.GeneratorBasedBuilder):
43
  # datasets.features.FeatureConnectors
44
  features=datasets.Features(
45
  {
 
 
46
  "passage": datasets.Value("string"),
47
  "question": datasets.Value("string"),
48
- "answers_spans": datasets.features.Sequence({"spans": datasets.Value("string")})
 
 
49
  # These are the features of your dataset like images, labels ...
50
  }
51
  ),
@@ -69,28 +109,95 @@ class Drop(datasets.GeneratorBasedBuilder):
69
  datasets.SplitGenerator(
70
  name=datasets.Split.TRAIN,
71
  # These kwargs will be passed to _generate_examples
72
- gen_kwargs={"filepath": os.path.join(data_dir, "drop_dataset_train.json")},
73
  ),
74
  datasets.SplitGenerator(
75
  name=datasets.Split.VALIDATION,
76
  # These kwargs will be passed to _generate_examples
77
- gen_kwargs={"filepath": os.path.join(data_dir, "drop_dataset_dev.json")},
78
  ),
79
  ]
80
 
81
- def _generate_examples(self, filepath):
82
  """Yields examples."""
83
  # TODO(drop): Yields (key, example) tuples from the dataset
84
- with open(filepath, encoding="utf-8") as f:
85
  data = json.load(f)
86
- for i, key in enumerate(data):
87
- example = data[key]
88
- qa_pairs = example["qa_pairs"]
89
- for j, qa in enumerate(qa_pairs):
90
- question = qa["question"]
91
- answers = qa["answer"]["spans"]
92
- yield str(i) + "_" + str(j), {
93
- "passage": example["passage"],
94
- "question": question,
95
- "answers_spans": {"spans": answers},
96
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
29
  _URl = "https://s3-us-west-2.amazonaws.com/allennlp/datasets/drop/drop_dataset.zip"
30
 
31
 
32
+ class AnswerParsingError(Exception):
33
+ pass
34
+
35
+
36
+ class DropDateObject:
37
+ """
38
+ Custom parser for date answers in DROP.
39
+ A date answer is a dict <date> with at least one of day|month|year.
40
+
41
+ Example: date == {
42
+ 'day': '9',
43
+ 'month': 'March',
44
+ 'year': '2021'
45
+ }
46
+
47
+ This dict is parsed and flattend to '{day} {month} {year}', not including
48
+ blank values.
49
+
50
+ Example: str(DropDateObject(date)) == '9 March 2021'
51
+ """
52
+
53
+ def __init__(self, dict_date):
54
+ self.year = dict_date.get("year", "")
55
+ self.month = dict_date.get("month", "")
56
+ self.day = dict_date.get("day", "")
57
+
58
+ def __iter__(self):
59
+ yield from [self.day, self.month, self.year]
60
+
61
+ def __bool__(self):
62
+ return any(self)
63
+
64
+ def __repr__(self):
65
+ return " ".join(self).strip()
66
+
67
+
68
  class Drop(datasets.GeneratorBasedBuilder):
69
  """TODO(drop): Short description of my dataset."""
70
 
 
79
  # datasets.features.FeatureConnectors
80
  features=datasets.Features(
81
  {
82
+ "section_id": datasets.Value("string"),
83
+ "query_id": datasets.Value("string"),
84
  "passage": datasets.Value("string"),
85
  "question": datasets.Value("string"),
86
+ "answers_spans": datasets.features.Sequence(
87
+ {"spans": datasets.Value("string"), "types": datasets.Value("string")}
88
+ )
89
  # These are the features of your dataset like images, labels ...
90
  }
91
  ),
 
109
  datasets.SplitGenerator(
110
  name=datasets.Split.TRAIN,
111
  # These kwargs will be passed to _generate_examples
112
+ gen_kwargs={"filepath": os.path.join(data_dir, "drop_dataset_train.json"), "split": "train"},
113
  ),
114
  datasets.SplitGenerator(
115
  name=datasets.Split.VALIDATION,
116
  # These kwargs will be passed to _generate_examples
117
+ gen_kwargs={"filepath": os.path.join(data_dir, "drop_dataset_dev.json"), "split": "validation"},
118
  ),
119
  ]
120
 
121
+ def _generate_examples(self, filepath, split):
122
  """Yields examples."""
123
  # TODO(drop): Yields (key, example) tuples from the dataset
124
+ with open(filepath, mode="r", encoding="utf-8") as f:
125
  data = json.load(f)
126
+ for i, (section_id, section) in enumerate(data.items()):
127
+ for j, qa in enumerate(section["qa_pairs"]):
128
+
129
+ example = {
130
+ "section_id": section_id,
131
+ "query_id": qa["query_id"],
132
+ "passage": section["passage"],
133
+ "question": qa["question"],
 
 
134
  }
135
+
136
+ if split == "train":
137
+ answers = [qa["answer"]]
138
+ else:
139
+ answers = qa["validated_answers"]
140
+
141
+ try:
142
+ example["answers_spans"] = self.build_answers(answers)
143
+ yield example["query_id"], example
144
+ except AnswerParsingError:
145
+ # This is expected for 9 examples of train
146
+ # and 1 of validation.
147
+ continue
148
+
149
+ @staticmethod
150
+ def _raise(message):
151
+ """
152
+ Raise a custom AnswerParsingError, to be sure to only catch our own
153
+ errors. Messages are irrelavant for this script, but are written to
154
+ ease understanding the code.
155
+ """
156
+ raise AnswerParsingError(message)
157
+
158
+ def build_answers(self, answers):
159
+
160
+ returned_answers = {
161
+ "spans": list(),
162
+ "types": list(),
163
+ }
164
+ for answer in answers:
165
+ date = DropDateObject(answer["date"])
166
+
167
+ if answer["number"] != "":
168
+ # sanity checks
169
+ if date:
170
+ self._raise("This answer is both number and date!")
171
+ if len(answer["spans"]):
172
+ self._raise("This answer is both number and text!")
173
+
174
+ returned_answers["spans"].append(answer["number"])
175
+ returned_answers["types"].append("number")
176
+
177
+ elif date:
178
+ # sanity check
179
+ if len(answer["spans"]):
180
+ self._raise("This answer is both date and text!")
181
+
182
+ returned_answers["spans"].append(str(date))
183
+ returned_answers["types"].append("date")
184
+
185
+ # won't triger if len(answer['spans']) == 0
186
+ for span in answer["spans"]:
187
+ # sanity checks
188
+ if answer["number"] != "":
189
+ self._raise("This answer is both text and number!")
190
+ if date:
191
+ self._raise("This answer is both text and date!")
192
+
193
+ returned_answers["spans"].append(span)
194
+ returned_answers["types"].append("span")
195
+
196
+ # sanity check
197
+ _len = len(returned_answers["spans"])
198
+ if not _len:
199
+ self._raise("Empty answer.")
200
+ if any(len(l) != _len for _, l in returned_answers.items()):
201
+ self._raise("Something went wrong while parsing answer values/types")
202
+
203
+ return returned_answers