gabrielaltay commited on
Commit
fcf2be2
1 Parent(s): f7df8f1

upload hub_repos/distemist/distemist.py to hub from bigbio repo

Browse files
Files changed (1) hide show
  1. distemist.py +44 -17
distemist.py CHANGED
@@ -47,12 +47,12 @@ The DisTEMIST corpus is a collection of 1000 clinical cases with disease annotat
47
  All documents are released in the context of the BioASQ DisTEMIST track for CLEF 2022.
48
  """
49
 
50
- _HOMEPAGE = "https://zenodo.org/record/6671292"
51
 
52
- _LICENSE = 'Creative Commons Attribution 4.0 International'
53
 
54
  _URLS = {
55
- _DATASETNAME: "https://zenodo.org/record/6671292/files/distemist.zip?download=1",
56
  }
57
 
58
  _SUPPORTED_TASKS = [Tasks.NAMED_ENTITY_RECOGNITION, Tasks.NAMED_ENTITY_DISAMBIGUATION]
@@ -145,38 +145,65 @@ class DistemistDataset(datasets.GeneratorBasedBuilder):
145
  """Returns SplitGenerators."""
146
  urls = _URLS[_DATASETNAME]
147
  data_dir = dl_manager.download_and_extract(urls)
148
- base_bath = Path(data_dir) / "distemist" / "training"
149
- if self.config.subset_id == "distemist_entities":
150
- entity_mapping_files = [base_bath / "subtrack1_entities" / "distemist_subtrack1_training_mentions.tsv"]
151
- else:
152
- entity_mapping_files = [
153
- base_bath / "subtrack2_linking" / "distemist_subtrack2_training1_linking.tsv",
154
- base_bath / "subtrack2_linking" / "distemist_subtrack2_training2_linking.tsv",
155
- ]
156
  return [
157
  datasets.SplitGenerator(
158
  name=datasets.Split.TRAIN,
159
  gen_kwargs={
160
- "entity_mapping_files": entity_mapping_files,
161
- "text_files_dir": base_bath / "text_files",
 
 
 
 
 
 
 
 
 
162
  },
163
  ),
164
  ]
165
 
166
  def _generate_examples(
167
  self,
168
- entity_mapping_files: List[Path],
169
- text_files_dir: Path,
 
170
  ) -> Tuple[int, Dict]:
171
  """Yields examples as (key, example) tuples."""
172
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
173
  entities_mapping = pd.concat([pd.read_csv(file, sep="\t") for file in entity_mapping_files])
174
  entity_file_names = entities_mapping["filename"].unique()
175
 
176
  for uid, filename in enumerate(entity_file_names):
177
  text_file = text_files_dir / f"{filename}.txt"
178
 
179
- doc_text = text_file.read_text()
180
  # doc_text = doc_text.replace("\n", "")
181
 
182
  entities_df: pd.DataFrame = entities_mapping[entities_mapping["filename"] == filename]
 
47
  All documents are released in the context of the BioASQ DisTEMIST track for CLEF 2022.
48
  """
49
 
50
+ _HOMEPAGE = "https://zenodo.org/record/7614764"
51
 
52
+ _LICENSE = 'CC_BY_4p0'
53
 
54
  _URLS = {
55
+ _DATASETNAME: "https://zenodo.org/record/7614764/files/distemist_zenodo.zip?download=1",
56
  }
57
 
58
  _SUPPORTED_TASKS = [Tasks.NAMED_ENTITY_RECOGNITION, Tasks.NAMED_ENTITY_DISAMBIGUATION]
 
145
  """Returns SplitGenerators."""
146
  urls = _URLS[_DATASETNAME]
147
  data_dir = dl_manager.download_and_extract(urls)
148
+ base_bath = Path(data_dir) / "distemist_zenodo"
149
+ track = self.config.subset_id.split('_')[1]
150
+
 
 
 
 
 
151
  return [
152
  datasets.SplitGenerator(
153
  name=datasets.Split.TRAIN,
154
  gen_kwargs={
155
+ "split": "train",
156
+ "track": track,
157
+ "base_bath": base_bath,
158
+ },
159
+ ),
160
+ datasets.SplitGenerator(
161
+ name=datasets.Split.TEST,
162
+ gen_kwargs={
163
+ "split": "test",
164
+ "track": track,
165
+ "base_bath": base_bath,
166
  },
167
  ),
168
  ]
169
 
170
  def _generate_examples(
171
  self,
172
+ split: str,
173
+ track: str,
174
+ base_bath: Path,
175
  ) -> Tuple[int, Dict]:
176
  """Yields examples as (key, example) tuples."""
177
+
178
+ tsv_files = {
179
+ ('entities', 'train'): [
180
+ base_bath / "training" / "subtrack1_entities" / "distemist_subtrack1_training_mentions.tsv"
181
+ ],
182
+ ('entities', 'test'): [
183
+ base_bath / "test_annotated" / "subtrack1_entities" / "distemist_subtrack1_test_mentions.tsv"
184
+ ],
185
+ ('linking', 'train'): [
186
+ base_bath / "training" / "subtrack2_linking" / "distemist_subtrack2_training1_linking.tsv",
187
+ base_bath / "training" / "subtrack2_linking" / "distemist_subtrack2_training2_linking.tsv",
188
+ ],
189
+ ('linking', 'test'): [
190
+ base_bath / "test_annotated" / "subtrack2_linking" / "distemist_subtrack2_test_linking.tsv"
191
+ ],
192
+ }
193
+ entity_mapping_files = tsv_files[(track, split)]
194
+
195
+ if split == "train":
196
+ text_files_dir = base_bath / "training" / "text_files"
197
+ elif split == "test":
198
+ text_files_dir = base_bath / "test_annotated" / "text_files"
199
+
200
  entities_mapping = pd.concat([pd.read_csv(file, sep="\t") for file in entity_mapping_files])
201
  entity_file_names = entities_mapping["filename"].unique()
202
 
203
  for uid, filename in enumerate(entity_file_names):
204
  text_file = text_files_dir / f"{filename}.txt"
205
 
206
+ doc_text = text_file.read_text(encoding='utf8')
207
  # doc_text = doc_text.replace("\n", "")
208
 
209
  entities_df: pd.DataFrame = entities_mapping[entities_mapping["filename"] == filename]