Datasets:

Modalities:
Text
Languages:
English
Libraries:
Datasets
License:

Support streaming

#1
by albertvillanova HF staff - opened
Files changed (1) hide show
  1. biology_how_why_corpus.py +30 -40
biology_how_why_corpus.py CHANGED
@@ -21,18 +21,15 @@ used for the question-answering system described in the paper “Discourse Compl
21
  Answer Reranking” (ACL 2014).
22
  """
23
 
24
- import os
25
  import xml.dom.minidom as xml
26
- from itertools import chain, count
27
- from typing import Dict, List, Tuple
28
 
29
  import datasets
30
 
31
- from .bigbiohub import qa_features
32
- from .bigbiohub import BigBioConfig
33
- from .bigbiohub import Tasks
34
 
35
- _LANGUAGES = ['English']
36
  _PUBMED = False
37
  _LOCAL = False
38
  _CITATION = """\
@@ -65,7 +62,7 @@ Answer Reranking” (ACL 2014).
65
 
66
  _HOMEPAGE = "https://allenai.org/data/biology-how-why-corpus"
67
 
68
- _LICENSE = 'License information unavailable'
69
 
70
  _URLS = {
71
  _DATASETNAME: "https://ai2-public-datasets.s3.amazonaws.com/biology-how-why-corpus/BiologyHowWhyCorpus.tar",
@@ -133,53 +130,46 @@ class BiologyHowWhyCorpusDataset(datasets.GeneratorBasedBuilder):
133
 
134
  def _split_generators(self, dl_manager) -> List[datasets.SplitGenerator]:
135
  urls = _URLS[_DATASETNAME]
136
- data_dir = dl_manager.download_and_extract(urls)
137
 
138
  return [
139
  datasets.SplitGenerator(
140
  name=datasets.Split.TRAIN,
141
  gen_kwargs={
142
- "how_path": os.path.join(
143
- data_dir, "BiologyHowWhyCorpus", "GoldStandardVulcanHOW.all.xml"
144
- ),
145
- "why_path": os.path.join(
146
- data_dir, "BiologyHowWhyCorpus", "GoldStandardVulcanWHY.all.xml"
147
- ),
148
  },
149
  ),
150
  ]
151
 
152
- def _generate_examples(self, how_path: str, why_path: str) -> Tuple[int, Dict]:
153
 
154
  uid = count(0)
155
 
156
  if self.config.schema == "source":
157
- for question in chain(
158
- self._parse_questions(how_path, "how"),
159
- self._parse_questions(why_path, "why"),
160
- ):
161
- yield next(uid), question
162
 
163
  elif self.config.schema == "bigbio_qa":
164
- for question in chain(
165
- self._parse_questions(how_path, "how"),
166
- self._parse_questions(why_path, "why"),
167
- ):
168
- for answer in question["answers"]:
169
- id = next(uid)
170
- yield id, {
171
- "id": id,
172
- "question_id": next(uid),
173
- "document_id": answer["docid"],
174
- "question": question["text"],
175
- "type": question["type"],
176
- "choices": [],
177
- "context": "",
178
- "answer": [answer["justification"]],
179
- }
180
 
181
- def _parse_questions(self, path: str, type: str):
182
- collection = xml.parse(path).documentElement
183
  questions = collection.getElementsByTagName("question")
184
  for question in questions:
185
  text = question.getElementsByTagName("text")[0].childNodes[0].data
@@ -202,4 +192,4 @@ class BiologyHowWhyCorpusDataset(datasets.GeneratorBasedBuilder):
202
  "sentences": sentences,
203
  }
204
  )
205
- yield {"text": text, "type": type, "answers": answers_}
 
21
  Answer Reranking” (ACL 2014).
22
  """
23
 
 
24
  import xml.dom.minidom as xml
25
+ from itertools import count
26
+ from typing import BinaryIO, Dict, List, Tuple
27
 
28
  import datasets
29
 
30
+ from .bigbiohub import BigBioConfig, Tasks, qa_features
 
 
31
 
32
+ _LANGUAGES = ["English"]
33
  _PUBMED = False
34
  _LOCAL = False
35
  _CITATION = """\
 
62
 
63
  _HOMEPAGE = "https://allenai.org/data/biology-how-why-corpus"
64
 
65
+ _LICENSE = "License information unavailable"
66
 
67
  _URLS = {
68
  _DATASETNAME: "https://ai2-public-datasets.s3.amazonaws.com/biology-how-why-corpus/BiologyHowWhyCorpus.tar",
 
130
 
131
  def _split_generators(self, dl_manager) -> List[datasets.SplitGenerator]:
132
  urls = _URLS[_DATASETNAME]
133
+ archive_path = dl_manager.download(urls)
134
 
135
  return [
136
  datasets.SplitGenerator(
137
  name=datasets.Split.TRAIN,
138
  gen_kwargs={
139
+ "archive": dl_manager.iter_archive(archive_path),
 
 
 
 
 
140
  },
141
  ),
142
  ]
143
 
144
+ def _generate_examples(self, archive: Tuple[str, BinaryIO]) -> Tuple[int, Dict]:
145
 
146
  uid = count(0)
147
 
148
  if self.config.schema == "source":
149
+ for path, file in archive:
150
+ question_type = path.split(".")[-3][-3:].lower()
151
+ for question in self._parse_questions(file, question_type):
152
+ yield next(uid), question
 
153
 
154
  elif self.config.schema == "bigbio_qa":
155
+ for path, file in archive:
156
+ question_type = path.split(".")[-3][-3:].lower()
157
+ for question in self._parse_questions(file, question_type):
158
+ for answer in question["answers"]:
159
+ guid = next(uid)
160
+ yield guid, {
161
+ "id": guid,
162
+ "question_id": next(uid),
163
+ "document_id": answer["docid"],
164
+ "question": question["text"],
165
+ "type": question["type"],
166
+ "choices": [],
167
+ "context": "",
168
+ "answer": [answer["justification"]],
169
+ }
 
170
 
171
+ def _parse_questions(self, path_or_file: BinaryIO, question_type: str):
172
+ collection = xml.parse(path_or_file).documentElement
173
  questions = collection.getElementsByTagName("question")
174
  for question in questions:
175
  text = question.getElementsByTagName("text")[0].childNodes[0].data
 
192
  "sentences": sentences,
193
  }
194
  )
195
+ yield {"text": text, "type": question_type, "answers": answers_}