albertvillanova HF staff commited on
Commit
f59b4da
1 Parent(s): a0a1708

Delete loading script

Browse files
Files changed (1) hide show
  1. trivia_qa.py +0 -329
trivia_qa.py DELETED
@@ -1,329 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
-
16
- # Lint as: python3
17
- """TriviaQA: A Reading Comprehension Dataset."""
18
-
19
-
20
- import glob
21
- import json
22
- import os
23
-
24
- import datasets
25
-
26
-
27
- logger = datasets.logging.get_logger(__name__)
28
-
29
-
30
- _CITATION = """
31
- @article{2017arXivtriviaqa,
32
- author = {{Joshi}, Mandar and {Choi}, Eunsol and {Weld},
33
- Daniel and {Zettlemoyer}, Luke},
34
- title = "{triviaqa: A Large Scale Distantly Supervised Challenge Dataset for Reading Comprehension}",
35
- journal = {arXiv e-prints},
36
- year = 2017,
37
- eid = {arXiv:1705.03551},
38
- pages = {arXiv:1705.03551},
39
- archivePrefix = {arXiv},
40
- eprint = {1705.03551},
41
- }
42
- """
43
- _DOWNLOAD_URL_TMPL = "data/triviaqa-{}.zip"
44
- _WEB_EVIDENCE_DIR = "evidence/web"
45
- _WIKI_EVIDENCE_DIR = "evidence/wikipedia"
46
-
47
- _DESCRIPTION = """\
48
- TriviaqQA is a reading comprehension dataset containing over 650K
49
- question-answer-evidence triples. TriviaqQA includes 95K question-answer
50
- pairs authored by trivia enthusiasts and independently gathered evidence
51
- documents, six per question on average, that provide high quality distant
52
- supervision for answering the questions.
53
- """
54
-
55
- _RC_DESCRIPTION = """\
56
- Question-answer pairs where all documents for a given question contain the
57
- answer string(s).
58
- """
59
-
60
- _UNFILTERED_DESCRIPTION = """\
61
- 110k question-answer pairs for open domain QA where not all documents for a
62
- given question contain the answer string(s). This makes the unfiltered dataset
63
- more appropriate for IR-style QA.
64
- """
65
-
66
- _CONTEXT_ADDENDUM = "Includes context from Wikipedia and search results."
67
-
68
-
69
- def _web_evidence_dir(tmp_dir):
70
- return sorted(glob.glob(os.path.join(tmp_dir, _WEB_EVIDENCE_DIR)))
71
-
72
-
73
- def _wiki_evidence_dir(tmp_dir):
74
- return sorted(glob.glob(os.path.join(tmp_dir, _WIKI_EVIDENCE_DIR)))
75
-
76
-
77
- def _qa_files(file_paths, sources, split, unfiltered):
78
- qa_dir = (
79
- os.path.join(file_paths["unfiltered"], "triviaqa-unfiltered")
80
- if unfiltered
81
- else os.path.join(file_paths["rc"], "qa")
82
- )
83
-
84
- suffix_mapping = {
85
- datasets.Split.TRAIN: "train.json",
86
- datasets.Split.VALIDATION: "dev.json",
87
- datasets.Split.TEST: "test-without-answers.json",
88
- }
89
- suffix = suffix_mapping[split]
90
-
91
- filenames = [f"unfiltered-web-{suffix}"] if unfiltered else [f"{source}-{suffix}" for source in sources]
92
-
93
- filenames = [os.path.join(qa_dir, filename) for filename in filenames]
94
-
95
- return sorted(filenames)
96
-
97
-
98
- class TriviaQaConfig(datasets.BuilderConfig):
99
- """BuilderConfig for TriviaQA."""
100
-
101
- def __init__(self, source="all", unfiltered=False, exclude_context=False, **kwargs):
102
- """BuilderConfig for TriviaQA.
103
-
104
- Args:
105
- unfiltered: bool, whether to use the unfiltered version of the dataset,
106
- intended for open-domain QA.
107
- exclude_context: bool, whether to exclude Wikipedia and search context for
108
- reduced size.
109
- **kwargs: keyword arguments forwarded to super.
110
- """
111
- name = "unfiltered" if unfiltered else "rc"
112
-
113
- assert source in ["all", "web", "wikipedia"]
114
-
115
- # there is no unfiltered version for the wikipedia subset
116
- # --> unfiltered subset for source="all" is the same as for source="web"
117
- # --> only accept source="all" if unfiltered is True
118
- assert not unfiltered or source == "all"
119
-
120
- if source != "all":
121
- name += f".{source}"
122
-
123
- if exclude_context:
124
- name += ".nocontext"
125
- description = _UNFILTERED_DESCRIPTION if unfiltered else _RC_DESCRIPTION
126
- if not exclude_context:
127
- description += _CONTEXT_ADDENDUM
128
- super(TriviaQaConfig, self).__init__(
129
- name=name, description=description, version=datasets.Version("1.2.0"), **kwargs
130
- )
131
-
132
- self.sources = ["web", "wikipedia"] if source == "all" else [source]
133
- self.unfiltered = unfiltered
134
- self.exclude_context = exclude_context
135
-
136
-
137
- class TriviaQa(datasets.GeneratorBasedBuilder):
138
- """TriviaQA is a reading comprehension dataset.
139
-
140
- It containss over 650K question-answer-evidence triples.
141
- """
142
-
143
- BUILDER_CONFIGS = [
144
- TriviaQaConfig(source="all", unfiltered=False, exclude_context=False), # rc
145
- TriviaQaConfig(source="all", unfiltered=False, exclude_context=True), # rc.nocontext
146
- TriviaQaConfig(source="all", unfiltered=True, exclude_context=False), # unfiltered
147
- TriviaQaConfig(source="all", unfiltered=True, exclude_context=True), # unfilered.nocontext
148
- TriviaQaConfig(source="web", unfiltered=False, exclude_context=False), # rc
149
- TriviaQaConfig(source="web", unfiltered=False, exclude_context=True), # rc.nocontext
150
- TriviaQaConfig(source="wikipedia", unfiltered=False, exclude_context=False), # rc
151
- TriviaQaConfig(source="wikipedia", unfiltered=False, exclude_context=True), # rc.nocontext
152
- ]
153
- DEFAULT_WRITER_BATCH_SIZE = 1000 # examples are quite big, so set this value to save some RAM
154
-
155
- def _info(self):
156
- return datasets.DatasetInfo(
157
- description=_DESCRIPTION,
158
- features=datasets.Features(
159
- {
160
- "question": datasets.Value("string"),
161
- "question_id": datasets.Value("string"),
162
- "question_source": datasets.Value("string"),
163
- "entity_pages": datasets.features.Sequence(
164
- {
165
- "doc_source": datasets.Value("string"),
166
- "filename": datasets.Value("string"),
167
- "title": datasets.Value("string"),
168
- "wiki_context": datasets.Value("string"),
169
- }
170
- ),
171
- "search_results": datasets.features.Sequence(
172
- {
173
- "description": datasets.Value("string"),
174
- "filename": datasets.Value("string"),
175
- "rank": datasets.Value("int32"),
176
- "title": datasets.Value("string"),
177
- "url": datasets.Value("string"),
178
- "search_context": datasets.Value("string"),
179
- }
180
- ),
181
- "answer": dict(
182
- {
183
- "aliases": datasets.features.Sequence(datasets.Value("string")),
184
- "normalized_aliases": datasets.features.Sequence(datasets.Value("string")),
185
- "matched_wiki_entity_name": datasets.Value("string"),
186
- "normalized_matched_wiki_entity_name": datasets.Value("string"),
187
- "normalized_value": datasets.Value("string"),
188
- "type": datasets.Value("string"),
189
- "value": datasets.Value("string"),
190
- }
191
- ),
192
- }
193
- ),
194
- supervised_keys=None,
195
- homepage="http://nlp.cs.washington.edu/triviaqa/",
196
- citation=_CITATION,
197
- )
198
-
199
- def _split_generators(self, dl_manager):
200
- """Returns SplitGenerators."""
201
- cfg = self.config
202
- download_urls = dict()
203
- if not (cfg.unfiltered and cfg.exclude_context):
204
- download_urls["rc"] = _DOWNLOAD_URL_TMPL.format("rc")
205
- if cfg.unfiltered:
206
- download_urls["unfiltered"] = _DOWNLOAD_URL_TMPL.format("unfiltered")
207
- file_paths = dl_manager.download_and_extract(download_urls)
208
-
209
- if cfg.exclude_context:
210
- web_evidence_dir = None
211
- wiki_evidence_dir = None
212
- else:
213
- web_evidence_dir = os.path.join(file_paths["rc"], _WEB_EVIDENCE_DIR)
214
- wiki_evidence_dir = os.path.join(file_paths["rc"], _WIKI_EVIDENCE_DIR)
215
-
216
- return [
217
- datasets.SplitGenerator(
218
- name=name,
219
- gen_kwargs={
220
- "files": _qa_files(file_paths, cfg.sources, name, cfg.unfiltered),
221
- "web_dir": web_evidence_dir,
222
- "wiki_dir": wiki_evidence_dir,
223
- },
224
- )
225
- for name in [datasets.Split.TRAIN, datasets.Split.VALIDATION, datasets.Split.TEST]
226
- ]
227
-
228
- def _generate_examples(self, files, web_dir, wiki_dir):
229
- """This function returns the examples."""
230
-
231
- def parse_example(article):
232
- """Return a single example from an article JSON record."""
233
-
234
- def _strip(collection):
235
- return [item.strip() for item in collection]
236
-
237
- if "Answer" in article:
238
- answer = article["Answer"]
239
- answer_dict = {
240
- "aliases": _strip(answer["Aliases"]),
241
- "normalized_aliases": _strip(answer["NormalizedAliases"]),
242
- "matched_wiki_entity_name": answer.get("MatchedWikiEntryName", "").strip(),
243
- "normalized_matched_wiki_entity_name": answer.get("NormalizedMatchedWikiEntryName", "").strip(),
244
- "normalized_value": answer["NormalizedValue"].strip(),
245
- "type": answer["Type"].strip(),
246
- "value": answer["Value"].strip(),
247
- }
248
- else:
249
- answer_dict = {
250
- "aliases": [],
251
- "normalized_aliases": [],
252
- "matched_wiki_entity_name": "<unk>",
253
- "normalized_matched_wiki_entity_name": "<unk>",
254
- "normalized_value": "<unk>",
255
- "type": "",
256
- "value": "<unk>",
257
- }
258
-
259
- if self.config.exclude_context:
260
- article["SearchResults"] = []
261
- article["EntityPages"] = []
262
-
263
- def _add_context(collection, context_field, file_dir):
264
- """Adds context from file, or skips if file does not exist."""
265
- new_items = []
266
- for item in collection:
267
- if "Filename" not in item:
268
- logger.info("Missing context 'Filename', skipping.")
269
- continue
270
-
271
- new_item = item.copy()
272
- fname = item["Filename"]
273
- try:
274
- with open(os.path.join(file_dir, fname), encoding="utf-8") as f:
275
- new_item[context_field] = f.read()
276
- except (IOError, FileNotFoundError):
277
- logger.info("File does not exist, skipping: %s", fname)
278
- continue
279
- new_items.append(new_item)
280
- return new_items
281
-
282
- def _strip_if_str(v):
283
- return v.strip() if isinstance(v, str) else v
284
-
285
- def _transpose_and_strip_dicts(dicts, field_names):
286
- return {
287
- datasets.naming.camelcase_to_snakecase(k): [_strip_if_str(d[k]) for d in dicts]
288
- for k in field_names
289
- }
290
-
291
- search_results = _transpose_and_strip_dicts(
292
- _add_context(article.get("SearchResults", []), "SearchContext", web_dir),
293
- ["Description", "Filename", "Rank", "Title", "Url", "SearchContext"],
294
- )
295
-
296
- entity_pages = _transpose_and_strip_dicts(
297
- _add_context(article.get("EntityPages", []), "WikiContext", wiki_dir),
298
- ["DocSource", "Filename", "Title", "WikiContext"],
299
- )
300
-
301
- question = article["Question"].strip()
302
- question_id = article["QuestionId"]
303
- question_source = article["QuestionSource"].strip()
304
-
305
- return {
306
- "entity_pages": entity_pages,
307
- "search_results": search_results,
308
- "question": question,
309
- "question_id": question_id,
310
- "question_source": question_source,
311
- "answer": answer_dict,
312
- }
313
-
314
- for filepath in files:
315
- logger.info("generating examples from = %s", filepath)
316
- fname = os.path.basename(filepath)
317
-
318
- with open(filepath, encoding="utf-8") as f:
319
- current_record = ""
320
- for line in f:
321
- if line == " {\n":
322
- current_record = line
323
- elif line.startswith(" }"): # Handles final record as well.
324
- article = json.loads(current_record + "}")
325
- current_record = ""
326
- example = parse_example(article)
327
- yield "%s_%s" % (fname, example["question_id"]), example
328
- else:
329
- current_record += line