Datasets:
ohsumed

Task Categories: text-classification
Languages: English
Multilinguality: monolingual
Size Categories: 100K<n<1M
Language Creators: crowdsourced
Annotations Creators: expert-generated
Source Datasets: original
system commited on
Commit
08e2114
1 Parent(s): 22a72ad

Update files from the datasets library (from 1.16.0)

Browse files

Release notes: https://github.com/huggingface/datasets/releases/tag/1.16.0

Files changed (2) hide show
  1. README.md +1 -0
  2. ohsumed.py +51 -48
README.md CHANGED
@@ -1,4 +1,5 @@
1
  ---
 
2
  annotations_creators:
3
  - human-annotated
4
  language_creators:
1
  ---
2
+ pretty_name: Ohsumed
3
  annotations_creators:
4
  - human-annotated
5
  language_creators:
ohsumed.py CHANGED
@@ -15,8 +15,6 @@
15
  """OHSUMED: An Interactive Retrieval Evaluation and New Large Test Collection for Research."""
16
 
17
 
18
- import os
19
-
20
  import datasets
21
 
22
 
@@ -128,24 +126,27 @@ class Ohsumed(datasets.GeneratorBasedBuilder):
128
  # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
129
  # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
130
  my_urls = _URLs[self.config.name]
131
- data_dir = dl_manager.download_and_extract(my_urls)
132
  return [
133
  datasets.SplitGenerator(
134
  name=datasets.Split.TRAIN,
135
  # These kwargs will be passed to _generate_examples
136
  gen_kwargs={
137
- "filepath": os.path.join(data_dir, "ohsu-trec/trec9-train/ohsumed.87"),
138
- "split": "train",
139
  },
140
  ),
141
  datasets.SplitGenerator(
142
  name=datasets.Split.TEST,
143
  # These kwargs will be passed to _generate_examples
144
- gen_kwargs={"filepath": os.path.join(data_dir, "ohsu-trec/trec9-test/ohsumed.88-91"), "split": "test"},
 
 
 
145
  ),
146
  ]
147
 
148
- def _generate_examples(self, filepath, split):
149
  """Yields examples."""
150
  # TODO: This method will receive as arguments the `gen_kwargs` defined in the previous `_split_generators` method.
151
  # It is in charge of opening the given file and yielding (key, example) tuples from the dataset
@@ -179,44 +180,46 @@ class Ohsumed(datasets.GeneratorBasedBuilder):
179
  ".S": "source",
180
  }
181
 
182
- with open(filepath, encoding="utf-8") as f:
183
- data = ohsumed_dict()
184
-
185
- for line in f.readlines():
186
- line = line.strip()
187
-
188
- if line.startswith(".I"):
189
- tag = ".I"
190
- if data["medline_ui"] != -1:
191
- id_ = data["seq_id"] + "_" + data["medline_ui"]
192
- yield id_, {
193
- "seq_id": data["seq_id"],
194
- "medline_ui": data["medline_ui"],
195
- "mesh_terms": str(data["mesh_terms"]),
196
- "title": str(data["title"]),
197
- "publication_type": str(data["publication_type"]),
198
- "abstract": str(data["abstract"]),
199
- "author": str(data["author"]),
200
- "source": str(data["source"]),
201
- }
202
- else:
203
- data = ohsumed_dict()
204
- line = line.replace(".I", "").strip()
205
- data["seq_id"] = line
206
- elif tag and not line.startswith("."):
207
- key = column_map[tag]
208
- data[key] = line
209
- elif ".U" in line:
210
- tag = ".U"
211
- elif ".M" in line:
212
- tag = ".M"
213
- elif ".T" in line:
214
- tag = ".T"
215
- elif ".P" in line:
216
- tag = ".P"
217
- elif ".W" in line:
218
- tag = ".W"
219
- elif ".A" in line:
220
- tag = ".A"
221
- elif ".S" in line:
222
- tag = ".S"
 
 
15
  """OHSUMED: An Interactive Retrieval Evaluation and New Large Test Collection for Research."""
16
 
17
 
 
 
18
  import datasets
19
 
20
 
126
  # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
127
  # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
128
  my_urls = _URLs[self.config.name]
129
+ archive = dl_manager.download(my_urls)
130
  return [
131
  datasets.SplitGenerator(
132
  name=datasets.Split.TRAIN,
133
  # These kwargs will be passed to _generate_examples
134
  gen_kwargs={
135
+ "filepath": "ohsu-trec/trec9-train/ohsumed.87",
136
+ "files": dl_manager.iter_archive(archive),
137
  },
138
  ),
139
  datasets.SplitGenerator(
140
  name=datasets.Split.TEST,
141
  # These kwargs will be passed to _generate_examples
142
+ gen_kwargs={
143
+ "filepath": "ohsu-trec/trec9-test/ohsumed.88-91",
144
+ "files": dl_manager.iter_archive(archive),
145
+ },
146
  ),
147
  ]
148
 
149
+ def _generate_examples(self, filepath, files):
150
  """Yields examples."""
151
  # TODO: This method will receive as arguments the `gen_kwargs` defined in the previous `_split_generators` method.
152
  # It is in charge of opening the given file and yielding (key, example) tuples from the dataset
180
  ".S": "source",
181
  }
182
 
183
+ for path, f in files:
184
+ if path == filepath:
185
+ data = ohsumed_dict()
186
+
187
+ for line in f.readlines():
188
+ line = line.decode("utf-8").strip()
189
+
190
+ if line.startswith(".I"):
191
+ tag = ".I"
192
+ if data["medline_ui"] != -1:
193
+ id_ = data["seq_id"] + "_" + data["medline_ui"]
194
+ yield id_, {
195
+ "seq_id": data["seq_id"],
196
+ "medline_ui": data["medline_ui"],
197
+ "mesh_terms": str(data["mesh_terms"]),
198
+ "title": str(data["title"]),
199
+ "publication_type": str(data["publication_type"]),
200
+ "abstract": str(data["abstract"]),
201
+ "author": str(data["author"]),
202
+ "source": str(data["source"]),
203
+ }
204
+ else:
205
+ data = ohsumed_dict()
206
+ line = line.replace(".I", "").strip()
207
+ data["seq_id"] = line
208
+ elif tag and not line.startswith("."):
209
+ key = column_map[tag]
210
+ data[key] = line
211
+ elif ".U" in line:
212
+ tag = ".U"
213
+ elif ".M" in line:
214
+ tag = ".M"
215
+ elif ".T" in line:
216
+ tag = ".T"
217
+ elif ".P" in line:
218
+ tag = ".P"
219
+ elif ".W" in line:
220
+ tag = ".W"
221
+ elif ".A" in line:
222
+ tag = ".A"
223
+ elif ".S" in line:
224
+ tag = ".S"
225
+ break