albertvillanova HF staff commited on
Commit
83429eb
1 Parent(s): 96fe571

Use hosted data files in JSON Lines format

Browse files
Files changed (1) hide show
  1. biomrc.py +21 -24
biomrc.py CHANGED
@@ -21,7 +21,6 @@ import json
21
 
22
  import datasets
23
 
24
-
25
  logger = datasets.logging.get_logger(__name__)
26
 
27
 
@@ -47,6 +46,22 @@ _DESCRIPTION = """\
47
  We introduce BIOMRC, a large-scale cloze-style biomedical MRC dataset. Care was taken to reduce noise, compared to the previous BIOREAD dataset of Pappas et al. (2018). Experiments show that simple heuristics do not perform well on the new dataset and that two neural MRC models that had been tested on BIOREAD perform much better on BIOMRC, indicating that the new dataset is indeed less noisy or at least that its task is more feasible. Non-expert human performance is also higher on the new dataset compared to BIOREAD, and biomedical experts perform even better. We also introduce a new BERT-based MRC model, the best version of which substantially outperforms all other methods tested, reaching or surpassing the accuracy of biomedical experts in some experiments. We make the new dataset available in three different sizes, also releasing our code, and providing a leaderboard.
48
  """
49
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
50
 
51
  class BiomrcConfig(datasets.BuilderConfig):
52
  """BuilderConfig for BioMRC."""
@@ -143,23 +158,9 @@ class Biomrc(datasets.GeneratorBasedBuilder):
143
 
144
  def _split_generators(self, dl_manager):
145
  setting = "" if self.config.biomrc_setting == "A" else "_B"
146
- if self.config.biomrc_version == "large":
147
- urls_to_download = {
148
- "train": f"https://archive.org/download/biomrc_dataset/biomrc_large/dataset_train{setting}.json.gz",
149
- "val": f"https://archive.org/download/biomrc_dataset/biomrc_large/dataset_val{setting}.json.gz",
150
- "test": f"https://archive.org/download/biomrc_dataset/biomrc_large/dataset_test{setting}.json.gz",
151
- }
152
- elif self.config.biomrc_version == "small":
153
- urls_to_download = {
154
- "train": f"https://archive.org/download/biomrc_dataset/biomrc_small/dataset_train_small{setting}.json.gz",
155
- "val": f"https://archive.org/download/biomrc_dataset/biomrc_small/dataset_val_small{setting}.json.gz",
156
- "test": f"https://archive.org/download/biomrc_dataset/biomrc_small/dataset_test_small{setting}.json.gz",
157
- }
158
- else:
159
- urls_to_download = {
160
- "test": f"https://archive.org/download/biomrc_dataset/biomrc_tiny/dataset_tiny{setting}.json.gz"
161
- }
162
-
163
  downloaded_files = dl_manager.download_and_extract(urls_to_download)
164
 
165
  if self.config.biomrc_version == "tiny":
@@ -178,10 +179,6 @@ class Biomrc(datasets.GeneratorBasedBuilder):
178
  def _generate_examples(self, filepath):
179
  """This function returns the examples in the raw (text) form."""
180
  logger.info("generating examples from = %s", filepath)
181
- # Id for the biomrc dataset
182
  with open(filepath, encoding="utf-8") as fp:
183
- biomrc = json.load(fp)
184
- for _id, (ab, ti, el, an) in enumerate(
185
- zip(biomrc["abstracts"], biomrc["titles"], biomrc["entities_list"], biomrc["answers"])
186
- ):
187
- yield _id, {"abstract": ab, "title": ti, "entities_list": el, "answer": an}
 
21
 
22
  import datasets
23
 
 
24
  logger = datasets.logging.get_logger(__name__)
25
 
26
 
 
46
  We introduce BIOMRC, a large-scale cloze-style biomedical MRC dataset. Care was taken to reduce noise, compared to the previous BIOREAD dataset of Pappas et al. (2018). Experiments show that simple heuristics do not perform well on the new dataset and that two neural MRC models that had been tested on BIOREAD perform much better on BIOMRC, indicating that the new dataset is indeed less noisy or at least that its task is more feasible. Non-expert human performance is also higher on the new dataset compared to BIOREAD, and biomedical experts perform even better. We also introduce a new BERT-based MRC model, the best version of which substantially outperforms all other methods tested, reaching or surpassing the accuracy of biomedical experts in some experiments. We make the new dataset available in three different sizes, also releasing our code, and providing a leaderboard.
47
  """
48
 
49
+ _URLS = {
50
+ "large": {
51
+ "train": "data/biomrc_large/dataset_train{setting}.jsonl.gz",
52
+ "val": "data/biomrc_large/dataset_val{setting}.jsonl.gz",
53
+ "test": "data/biomrc_large/dataset_test{setting}.jsonl.gz",
54
+ },
55
+ "small": {
56
+ "train": "data/biomrc_small/dataset_train_small{setting}.jsonl.gz",
57
+ "val": "data/biomrc_small/dataset_val_small{setting}.jsonl.gz",
58
+ "test": "data/biomrc_small/dataset_test_small{setting}.jsonl.gz",
59
+ },
60
+ "tiny": {
61
+ "test": "data/biomrc_tiny/dataset_tiny{setting}.jsonl.gz",
62
+ },
63
+ }
64
+
65
 
66
  class BiomrcConfig(datasets.BuilderConfig):
67
  """BuilderConfig for BioMRC."""
 
158
 
159
  def _split_generators(self, dl_manager):
160
  setting = "" if self.config.biomrc_setting == "A" else "_B"
161
+ urls_to_download = {
162
+ split: url.format(setting=setting) for split, url in _URLS[self.config.biomrc_version].items()
163
+ }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
164
  downloaded_files = dl_manager.download_and_extract(urls_to_download)
165
 
166
  if self.config.biomrc_version == "tiny":
 
179
  def _generate_examples(self, filepath):
180
  """This function returns the examples in the raw (text) form."""
181
  logger.info("generating examples from = %s", filepath)
 
182
  with open(filepath, encoding="utf-8") as fp:
183
+ for idx, example in enumerate(fp):
184
+ yield idx, json.loads(example)