gabrielaltay commited on
Commit
373d79b
1 Parent(s): 4699c72

upload hubscripts/distemist_hub.py to hub from bigbio repo

Browse files
Files changed (1) hide show
  1. distemist.py +220 -0
distemist.py ADDED
@@ -0,0 +1,220 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ from pathlib import Path
17
+ from typing import Dict, List, Tuple
18
+
19
+ import datasets
20
+ import pandas as pd
21
+
22
+ from .bigbiohub import kb_features
23
+ from .bigbiohub import BigBioConfig
24
+ from .bigbiohub import Tasks
25
+
26
+ _LANGUAGES = ['English']
27
+ _PUBMED = False
28
+ _LOCAL = False
29
+ _CITATION = """\
30
+ @dataset{luis_gasco_2022_6458455,
31
+ author = {Luis Gasco and Eulàlia Farré and Miranda-Escalada, Antonio and Salvador Lima and Martin Krallinger},
32
+ title = {{DisTEMIST corpus: detection and normalization of disease mentions in spanish clinical cases}},
33
+ month = apr,
34
+ year = 2022,
35
+ note = {{Funded by the Plan de Impulso de las Tecnologías del Lenguaje (Plan TL).}},
36
+ publisher = {Zenodo},
37
+ version = {2.0.0},
38
+ doi = {10.5281/zenodo.6458455},
39
+ url = {https://doi.org/10.5281/zenodo.6458455}
40
+ }
41
+ """
42
+
43
+ _DATASETNAME = "distemist"
44
+ _DISPLAYNAME = "DisTEMIST"
45
+
46
+ _DESCRIPTION = """\
47
+ The DisTEMIST corpus is a collection of 1000 clinical cases with disease annotations linked with Snomed-CT concepts.
48
+ All documents are released in the context of the BioASQ DisTEMIST track for CLEF 2022.
49
+ """
50
+
51
+ _HOMEPAGE = "https://zenodo.org/record/6458455"
52
+
53
+ _LICENSE = 'Creative Commons Attribution 4.0 International'
54
+
55
+ _URLS = {
56
+ _DATASETNAME: "https://zenodo.org/record/6458455/files/distemist.zip?download=1",
57
+ }
58
+
59
+ _SUPPORTED_TASKS = [Tasks.NAMED_ENTITY_RECOGNITION]
60
+
61
+ _SOURCE_VERSION = "2.0.0"
62
+ _BIGBIO_VERSION = "1.0.0"
63
+
64
+
65
+ class DistemistDataset(datasets.GeneratorBasedBuilder):
66
+ """
67
+ The DisTEMIST corpus is a collection of 1000 clinical cases with disease annotations linked with Snomed-CT
68
+ concepts.
69
+ """
70
+
71
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
72
+ BIGBIO_VERSION = datasets.Version(_BIGBIO_VERSION)
73
+
74
+ BUILDER_CONFIGS = [
75
+ BigBioConfig(
76
+ name="distemist_source",
77
+ version=SOURCE_VERSION,
78
+ description="DisTEMIST source schema",
79
+ schema="source",
80
+ subset_id="distemist",
81
+ ),
82
+ BigBioConfig(
83
+ name="distemist_bigbio_kb",
84
+ version=BIGBIO_VERSION,
85
+ description="DisTEMIST BigBio schema",
86
+ schema="bigbio_kb",
87
+ subset_id="distemist",
88
+ ),
89
+ ]
90
+
91
+ DEFAULT_CONFIG_NAME = "distemist_source"
92
+
93
+ def _info(self) -> datasets.DatasetInfo:
94
+
95
+ if self.config.schema == "source":
96
+ features = datasets.Features(
97
+ {
98
+ "id": datasets.Value("string"),
99
+ "document_id": datasets.Value("string"),
100
+ "passages": [
101
+ {
102
+ "id": datasets.Value("string"),
103
+ "type": datasets.Value("string"),
104
+ "text": datasets.Sequence(datasets.Value("string")),
105
+ "offsets": datasets.Sequence([datasets.Value("int32")]),
106
+ }
107
+ ],
108
+ "entities": [
109
+ {
110
+ "id": datasets.Value("string"),
111
+ "type": datasets.Value("string"),
112
+ "text": datasets.Sequence(datasets.Value("string")),
113
+ "offsets": datasets.Sequence([datasets.Value("int32")]),
114
+ "concept_codes": datasets.Sequence(
115
+ datasets.Value("string")
116
+ ),
117
+ "semantic_relations": datasets.Sequence(
118
+ datasets.Value("string")
119
+ ),
120
+ }
121
+ ],
122
+ }
123
+ )
124
+ elif self.config.schema == "bigbio_kb":
125
+ features = kb_features
126
+
127
+ return datasets.DatasetInfo(
128
+ description=_DESCRIPTION,
129
+ features=features,
130
+ homepage=_HOMEPAGE,
131
+ license=str(_LICENSE),
132
+ citation=_CITATION,
133
+ )
134
+
135
+ def _split_generators(self, dl_manager) -> List[datasets.SplitGenerator]:
136
+ """Returns SplitGenerators."""
137
+ urls = _URLS[_DATASETNAME]
138
+ data_dir = dl_manager.download_and_extract(urls)
139
+ return [
140
+ datasets.SplitGenerator(
141
+ name=datasets.Split.TRAIN,
142
+ gen_kwargs={
143
+ "entities_mapping_file_path": Path(data_dir)
144
+ / "training/subtrack1_entities/distemist_subtrack1_training_mentions.tsv",
145
+ "linking_mapping_file_path": Path(data_dir)
146
+ / "training/subtrack2_linking/distemist_subtrack1_training1_linking.tsv",
147
+ "text_files_dir": Path(data_dir) / "training/text_files",
148
+ },
149
+ ),
150
+ ]
151
+
152
+ def _generate_examples(
153
+ self,
154
+ entities_mapping_file_path: Path,
155
+ linking_mapping_file_path: Path,
156
+ text_files_dir: Path,
157
+ ) -> Tuple[int, Dict]:
158
+ """Yields examples as (key, example) tuples."""
159
+ entities_mapping = pd.read_csv(entities_mapping_file_path, sep="\t")
160
+ linking_mapping = pd.read_csv(linking_mapping_file_path, sep="\t")
161
+
162
+ entity_file_names = set(entities_mapping["filename"])
163
+ linking_file_names = set(linking_mapping["filename"])
164
+
165
+ # entity_file_names = entity_file_names.difference(linking_file_names)
166
+
167
+ for uid, filename in enumerate(entity_file_names):
168
+ text_file = text_files_dir / f"{filename}.txt"
169
+
170
+ doc_text = text_file.read_text()
171
+ # doc_text = doc_text.replace("\n", "")
172
+
173
+ if filename in linking_file_names:
174
+ entities_df: pd.DataFrame = linking_mapping[
175
+ linking_mapping["filename"] == filename
176
+ ]
177
+ else:
178
+ entities_df: pd.DataFrame = entities_mapping[
179
+ entities_mapping["filename"] == filename
180
+ ]
181
+
182
+ example = {
183
+ "id": f"{uid}",
184
+ "document_id": filename,
185
+ "passages": [
186
+ {
187
+ "id": f"{uid}_{filename}_passage",
188
+ "type": "clinical_case",
189
+ "text": [doc_text],
190
+ "offsets": [[0, len(doc_text)]],
191
+ }
192
+ ],
193
+ }
194
+ if self.config.schema == "bigbio_kb":
195
+ example["events"] = []
196
+ example["coreferences"] = []
197
+ example["relations"] = []
198
+
199
+ entities = []
200
+ for row in entities_df.itertuples(name="Entity"):
201
+ entity = {
202
+ "id": f"{uid}_{row.filename}_{row.Index}_entity_id_{row.mark}",
203
+ "type": row.label,
204
+ "text": [row.span],
205
+ "offsets": [[row.off0, row.off1]],
206
+ }
207
+ if self.config.schema == "source":
208
+ entity["concept_codes"] = []
209
+ entity["semantic_relations"] = []
210
+ if filename in linking_file_names:
211
+ entity["concept_codes"] = row.code.split("+")
212
+ entity["semantic_relations"] = row.semantic_rel.split("+")
213
+
214
+ elif self.config.schema == "bigbio_kb":
215
+ entity["normalized"] = []
216
+
217
+ entities.append(entity)
218
+
219
+ example["entities"] = entities
220
+ yield uid, example