gabrielaltay commited on
Commit
7f5b4e5
1 Parent(s): e9c0036

upload hubscripts/swedish_medical_ner_hub.py to hub from bigbio repo

Browse files
Files changed (1) hide show
  1. swedish_medical_ner.py +302 -0
swedish_medical_ner.py ADDED
@@ -0,0 +1,302 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+
17
+ """
18
+ swedish_medical_ner is Named Entity Recognition dataset on medical text in Swedish.
19
+
20
+ It consists three subsets which are in turn derived from three different sources respectively:
21
+
22
+ * the Swedish Wikipedia (a.k.a. wiki): Wiki_annotated_60.txt
23
+ * Läkartidningen (a.k.a. lt): LT_annotated_60.txt
24
+ * 1177 Vårdguiden (a.k.a. 1177): 1177_annotated_sentences.txt
25
+
26
+ Texts from both Swedish Wikipedia and Läkartidningen were automatically annotated using a
27
+ list of medical seed terms. Sentences from 1177 Vårdguiden were manuually annotated.
28
+
29
+ It can be found in Hugging Face Datasets: https://huggingface.co/datasets/swedish_medical_ner.
30
+
31
+ """
32
+
33
+ import os
34
+ import re
35
+ from typing import Dict, List, Tuple
36
+
37
+ import datasets
38
+
39
+ from .bigbiohub import kb_features
40
+ from .bigbiohub import BigBioConfig
41
+ from .bigbiohub import Tasks
42
+
43
+ _DATASETNAME = "swedish_medical_ner"
44
+ _DISPLAYNAME = "Swedish Medical NER"
45
+
46
+ _LANGUAGES = ['Swedish']
47
+ _PUBMED = False
48
+ _LOCAL = False
49
+ _CITATION = """\
50
+ @inproceedings{almgren-etal-2016-named,
51
+ author = {
52
+ Almgren, Simon and
53
+ Pavlov, Sean and
54
+ Mogren, Olof
55
+ },
56
+ title = {Named Entity Recognition in Swedish Medical Journals with Deep Bidirectional Character-Based LSTMs},
57
+ booktitle = {Proceedings of the Fifth Workshop on Building and Evaluating Resources for Biomedical Text Mining (BioTxtM 2016)},
58
+ publisher = {The COLING 2016 Organizing Committee},
59
+ pages = {30-39},
60
+ year = {2016},
61
+ month = {12},
62
+ url = {https://aclanthology.org/W16-5104},
63
+ eprint = {https://aclanthology.org/W16-5104.pdf}
64
+ }
65
+ """
66
+
67
+ _DESCRIPTION = """\
68
+ swedish_medical_ner is Named Entity Recognition dataset on medical text in Swedish.
69
+ It consists three subsets which are in turn derived from three different sources
70
+ respectively: the Swedish Wikipedia (a.k.a. wiki), Läkartidningen (a.k.a. lt),
71
+ and 1177 Vårdguiden (a.k.a. 1177). While the Swedish Wikipedia and Läkartidningen
72
+ subsets in total contains over 790000 sequences with 60 characters each,
73
+ the 1177 Vårdguiden subset is manually annotated and contains 927 sentences,
74
+ 2740 annotations, out of which 1574 are disorder and findings, 546 are
75
+ pharmaceutical drug, and 620 are body structure.
76
+
77
+ Texts from both Swedish Wikipedia and Läkartidningen were automatically annotated
78
+ using a list of medical seed terms. Sentences from 1177 Vårdguiden were manuually
79
+ annotated.
80
+ """
81
+
82
+ _HOMEPAGE = "https://github.com/olofmogren/biomedical-ner-data-swedish/"
83
+
84
+ _LICENSE = 'Creative Commons Attribution Share Alike 4.0 International'
85
+
86
+ _URLS = {
87
+ "swedish_medical_ner_wiki": "https://raw.githubusercontent.com/olofmogren/biomedical-ner-data-swedish/master/Wiki_annotated_60.txt",
88
+ "swedish_medical_ner_lt": "https://raw.githubusercontent.com/olofmogren/biomedical-ner-data-swedish/master/LT_annotated_60.txt",
89
+ "swedish_medical_ner_1177": "https://raw.githubusercontent.com/olofmogren/biomedical-ner-data-swedish/master/1177_annotated_sentences.txt",
90
+ }
91
+
92
+ _SUPPORTED_TASKS = [Tasks.NAMED_ENTITY_RECOGNITION]
93
+
94
+ _SOURCE_VERSION = "1.0.0"
95
+ _BIGBIO_VERSION = "1.0.0"
96
+
97
+
98
+ class SwedishMedicalNerDataset(datasets.GeneratorBasedBuilder):
99
+ """
100
+ Swedish medical named entity recognition
101
+
102
+ The dataset contains three subsets, namely "wiki", "lt" and "1177".
103
+ """
104
+
105
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
106
+ BIGBIO_VERSION = datasets.Version(_BIGBIO_VERSION)
107
+
108
+ BUILDER_CONFIGS = []
109
+ for subset in ["wiki", "lt", "1177"]:
110
+ BUILDER_CONFIGS.append(
111
+ BigBioConfig(
112
+ name=f"swedish_medical_ner_{subset}_source",
113
+ version=SOURCE_VERSION,
114
+ description="swedish_medical_ner source schema",
115
+ schema="source",
116
+ subset_id=f"swedish_medical_ner_{subset}",
117
+ )
118
+ )
119
+ BUILDER_CONFIGS.append(
120
+ BigBioConfig(
121
+ name=f"swedish_medical_ner_{subset}_bigbio_kb",
122
+ version=BIGBIO_VERSION,
123
+ description="swedish_medical_ner BigBio schema",
124
+ schema="bigbio_kb",
125
+ subset_id=f"swedish_medical_ner_{subset}",
126
+ )
127
+ )
128
+
129
+ DEFAULT_CONFIG_NAME = "swedish_medical_ner_source"
130
+
131
+ def _info(self) -> datasets.DatasetInfo:
132
+
133
+ if self.config.schema == "source":
134
+ features = datasets.Features(
135
+ {
136
+ "sid": datasets.Value("string"),
137
+ "sentence": datasets.Value("string"),
138
+ "entities": [
139
+ {
140
+ "start": datasets.Value("int32"),
141
+ "end": datasets.Value("int32"),
142
+ "text": datasets.Value("string"),
143
+ "type": datasets.Value("string"),
144
+ }
145
+ ],
146
+ }
147
+ )
148
+
149
+ elif self.config.schema == "bigbio_kb":
150
+ features = kb_features
151
+
152
+ return datasets.DatasetInfo(
153
+ description=_DESCRIPTION,
154
+ features=features,
155
+ homepage=_HOMEPAGE,
156
+ license=str(_LICENSE),
157
+ citation=_CITATION,
158
+ )
159
+
160
+ def _split_generators(
161
+ self, dl_manager: datasets.DownloadManager
162
+ ) -> List[datasets.SplitGenerator]:
163
+ """Returns SplitGenerators."""
164
+
165
+ urls = _URLS
166
+ filepath = dl_manager.download_and_extract(urls[self.config.subset_id])
167
+
168
+ return [
169
+ datasets.SplitGenerator(
170
+ name=datasets.Split.TRAIN,
171
+ gen_kwargs={
172
+ "filepath": filepath,
173
+ "split": "train",
174
+ },
175
+ ),
176
+ ]
177
+
178
+ @staticmethod
179
+ def get_type(text):
180
+ """
181
+ Tagging format per the dataset authors
182
+ - Prenthesis, (): Disorder and Finding
183
+ - Brackets, []: Pharmaceutical Drug
184
+ - Curly brackets, {}: Body Structure
185
+
186
+ """
187
+ if text[0] == "(":
188
+ return "disorder_finding"
189
+ elif text[1] == "[":
190
+ return "pharma_drug"
191
+ return "body_structure"
192
+
193
+ @staticmethod
194
+ def get_source_example(uid, tagged):
195
+
196
+ ents, text = zip(*tagged)
197
+ text = list(text)
198
+
199
+ # build offsets
200
+ offsets = []
201
+ curr = 0
202
+ for span in text:
203
+ offsets.append((curr, curr + len(span)))
204
+ curr = curr + len(span)
205
+
206
+ text = "".join(text)
207
+ doc = {"sid": "s" + str(uid), "sentence": text, "entities": []}
208
+
209
+ # Create entities
210
+ for i, (start, end) in enumerate(offsets):
211
+ if ents[i] is not None:
212
+ doc["entities"].append(
213
+ {
214
+ "start": start,
215
+ "end": end,
216
+ "text": text[start:end],
217
+ "type": ents[i],
218
+ }
219
+ )
220
+
221
+ return uid, doc
222
+
223
+ @staticmethod
224
+ def get_bigbio_example(uid, tagged, remove_markup=True):
225
+ doc = {
226
+ "id": str(uid),
227
+ "document_id": "s" + str(uid),
228
+ "passages": [],
229
+ "entities": [],
230
+ "events": [],
231
+ "coreferences": [],
232
+ "relations": [],
233
+ }
234
+
235
+ ents, text = zip(*tagged)
236
+ text = list(text)
237
+ if remove_markup:
238
+ for i in range(len(ents)):
239
+ if ents[i] is not None:
240
+ text[i] = re.sub(r"[(){}\[\]]", "", text[i]).strip()
241
+
242
+ # build offsets
243
+ offsets = []
244
+ curr = 0
245
+ for span in text:
246
+ offsets.append((curr, curr + len(span)))
247
+ curr = curr + len(span)
248
+
249
+ # Create passage
250
+ passage = "".join(text)
251
+ doc["passages"].append(
252
+ {
253
+ "id": str(uid) + "-passage-0",
254
+ "type": "sentence",
255
+ "text": [passage],
256
+ "offsets": [[0, len(passage)]],
257
+ }
258
+ )
259
+
260
+ # Create entities
261
+ ii = 0
262
+ for i, (start, end) in enumerate(offsets):
263
+ if ents[i] is not None:
264
+ doc["entities"].append(
265
+ {
266
+ "id": str(uid) + "-entity-" + str(ii),
267
+ "type": ents[i],
268
+ "text": [passage[start:end]],
269
+ "offsets": [[start, end]],
270
+ "normalized": [],
271
+ }
272
+ )
273
+ ii += 1
274
+
275
+ return uid, doc
276
+
277
+ def _generate_examples(self, filepath, split: str) -> Tuple[int, Dict]:
278
+ """Yields examples as (key, example) tuples."""
279
+
280
+ entity_rgx = re.compile(r"[(].+?[)]|[\[].+?[\]]|[{].+?[}]")
281
+
282
+ uid = 0
283
+ with open(filepath, "rt", encoding="utf-8") as file:
284
+ for i, row in enumerate(file):
285
+ row = row.replace("\n", "")
286
+ if row:
287
+ curr = 0
288
+ stack = []
289
+ # match entities and build spans for sentence string
290
+ for m in entity_rgx.finditer(row):
291
+ span = m.group()
292
+ if m.start() != 0:
293
+ stack.append([None, row[curr : m.start()]])
294
+ stack.append((self.get_type(span), span))
295
+ curr = m.start() + len(span)
296
+ stack.append([None, row[curr:]])
297
+
298
+ if self.config.schema == "source":
299
+ yield self.get_source_example(uid, stack)
300
+ elif self.config.schema == "bigbio_kb":
301
+ yield self.get_bigbio_example(uid, stack)
302
+ uid += 1