gabrielaltay commited on
Commit
e072fcf
1 Parent(s): d01f091

upload hub_repos/bronco/bronco.py to hub from bigbio repo

Browse files
Files changed (1) hide show
  1. bronco.py +289 -0
bronco.py ADDED
@@ -0,0 +1,289 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ import os
17
+ from typing import Dict, List, Tuple
18
+
19
+ import datasets
20
+ from bioc import biocxml
21
+
22
+ from .bigbiohub import BigBioConfig, Tasks, kb_features
23
+
24
+ _LOCAL = True
25
+ _CITATION = """\
26
+ @article{10.1093/jamiaopen/ooab025,
27
+ author = {Kittner, Madeleine and Lamping, Mario and Rieke, Damian T and Götze, Julian and Bajwa, Bariya and
28
+ Jelas, Ivan and Rüter, Gina and Hautow, Hanjo and Sänger, Mario and Habibi, Maryam and Zettwitz, Marit and
29
+ Bortoli, Till de and Ostermann, Leonie and Ševa, Jurica and Starlinger, Johannes and Kohlbacher, Oliver and
30
+ Malek, Nisar P and Keilholz, Ulrich and Leser, Ulf},
31
+ title = "{Annotation and initial evaluation of a large annotated German oncological corpus}",
32
+ journal = {JAMIA Open},
33
+ volume = {4},
34
+ number = {2},
35
+ year = {2021},
36
+ month = {04},
37
+ issn = {2574-2531},
38
+ doi = {10.1093/jamiaopen/ooab025},
39
+ url = {https://doi.org/10.1093/jamiaopen/ooab025},
40
+ note = {ooab025},
41
+ eprint = {https://academic.oup.com/jamiaopen/article-pdf/4/2/ooab025/38830128/ooab025.pdf},
42
+ }
43
+ """
44
+ _DESCRIPTION = """\
45
+ BRONCO150 is a corpus containing selected sentences of 150 German discharge summaries of cancer patients (hepatocelluar
46
+ carcinoma or melanoma) treated at Charite Universitaetsmedizin Berlin or Universitaetsklinikum Tuebingen. All discharge
47
+ summaries were manually anonymized. The original documents were scrambled at the sentence level to make reconstruction
48
+ of individual reports impossible.
49
+ """
50
+ _HOMEPAGE = "https://www2.informatik.hu-berlin.de/~leser/bronco/index.html"
51
+ _LICENSE = "DUA"
52
+ _URLS = {}
53
+ _PUBMED = False
54
+ _SUPPORTED_TASKS = [Tasks.NAMED_ENTITY_RECOGNITION, Tasks.NAMED_ENTITY_DISAMBIGUATION]
55
+ _SOURCE_VERSION = "1.0.0"
56
+ _BIGBIO_VERSION = "1.0.0"
57
+ _DATASETNAME = "bronco"
58
+ _DISPLAYNAME = "BRONCO"
59
+ _LANGUAGES = ["German"]
60
+
61
+
62
+ class Bronco(datasets.GeneratorBasedBuilder):
63
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
64
+ BIGBIO_VERSION = datasets.Version(_BIGBIO_VERSION)
65
+ DEFAULT_CONFIG_NAME = "bronco_bigbio_kb"
66
+ BUILDER_CONFIGS = [
67
+ BigBioConfig(
68
+ name="bronco_source",
69
+ version=SOURCE_VERSION,
70
+ description="BRONCO source schema",
71
+ schema="source",
72
+ subset_id="bronco",
73
+ ),
74
+ BigBioConfig(
75
+ name="bronco_bigbio_kb",
76
+ version=BIGBIO_VERSION,
77
+ description="BRONCO BigBio schema",
78
+ schema="bigbio_kb",
79
+ subset_id="bronco",
80
+ ),
81
+ ]
82
+
83
+ def _info(self) -> datasets.DatasetInfo:
84
+ if self.config.schema == "source":
85
+ features = datasets.Features(
86
+ {
87
+ "id": datasets.Value("string"),
88
+ "passage": {
89
+ "offset": datasets.Value("int32"),
90
+ "text": datasets.Value("string"),
91
+ "annotation": [
92
+ {
93
+ "id": datasets.Value("string"),
94
+ "infon": {
95
+ "file": datasets.Value("string"),
96
+ "type": datasets.Value("string"),
97
+ },
98
+ "location": [
99
+ {
100
+ "offset": datasets.Value("int32"),
101
+ "length": datasets.Value("int32"),
102
+ }
103
+ ],
104
+ "text": datasets.Value("string"),
105
+ }
106
+ ],
107
+ "relation": [
108
+ {
109
+ "id": datasets.Value("string"),
110
+ "infon": {
111
+ "file": datasets.Value("string"),
112
+ "type": datasets.Value("string"),
113
+ "norm/atr": datasets.Value("string"),
114
+ "string": datasets.Value("string"),
115
+ },
116
+ "node": [
117
+ {
118
+ "refid": datasets.Value("string"),
119
+ "role": datasets.Value("string"),
120
+ }
121
+ ],
122
+ }
123
+ ],
124
+ },
125
+ }
126
+ )
127
+
128
+ elif self.config.schema == "bigbio_kb":
129
+ features = kb_features
130
+
131
+ return datasets.DatasetInfo(
132
+ description=_DESCRIPTION,
133
+ features=features,
134
+ homepage=_HOMEPAGE,
135
+ citation=_CITATION,
136
+ )
137
+
138
+ def _split_generators(self, dl_manager) -> List[datasets.SplitGenerator]:
139
+ """Returns SplitGenerators."""
140
+ if self.config.data_dir is None:
141
+ raise ValueError("This is a local dataset. Please pass the data_dir kwarg to load_dataset.")
142
+ else:
143
+ data_dir = self.config.data_dir
144
+
145
+ return [
146
+ datasets.SplitGenerator(
147
+ name=datasets.Split.TRAIN,
148
+ gen_kwargs={
149
+ "filepath": os.path.join(data_dir, "bioCFiles", "BRONCO150.xml"),
150
+ "split": "train",
151
+ },
152
+ ),
153
+ ]
154
+
155
+ def _generate_examples(self, filepath, split: str) -> Tuple[int, Dict]:
156
+ """Yields examples as (key, example) tuples."""
157
+ with open(filepath, "r") as fp:
158
+ data = biocxml.load(fp).documents
159
+
160
+ if self.config.schema == "source":
161
+ for uid, doc in enumerate(data):
162
+ out = {
163
+ "id": doc.id,
164
+ "passage": {
165
+ "offset": doc.passages[0].offset,
166
+ "text": doc.passages[0].text,
167
+ "annotation": [],
168
+ "relation": [],
169
+ },
170
+ }
171
+
172
+ # handle entities
173
+ for annotation in doc.passages[0].annotations:
174
+ anno = {
175
+ "id": annotation.id,
176
+ "infon": annotation.infons,
177
+ "text": annotation.text,
178
+ "location": [],
179
+ }
180
+ for location in annotation.locations:
181
+ anno["location"].append(
182
+ {
183
+ "offset": location.offset,
184
+ "length": location.length,
185
+ }
186
+ )
187
+ out["passage"]["annotation"].append(anno)
188
+
189
+ # handle relations
190
+ for relation in doc.passages[0].relations:
191
+ rel = {
192
+ "id": relation.id,
193
+ "node": [],
194
+ }
195
+
196
+ # relation.infons has different keys depending on the relation type
197
+ # these must be unified to comply with a fixed schema
198
+ if relation.infons["type"] == "Normalization":
199
+ rel["infon"] = {
200
+ "file": relation.infons["file"],
201
+ "type": relation.infons["type"],
202
+ "norm/atr": relation.infons["normalization type"],
203
+ "string": relation.infons["string"],
204
+ }
205
+ else:
206
+ rel["infon"] = {
207
+ "file": relation.infons["file"],
208
+ "type": relation.infons["type"],
209
+ "norm/atr": relation.infons["attribute type"],
210
+ "string": "",
211
+ }
212
+
213
+ for node in relation.nodes:
214
+ rel["node"].append(
215
+ {
216
+ "refid": node.refid,
217
+ "role": node.role,
218
+ }
219
+ )
220
+
221
+ out["passage"]["relation"].append(rel)
222
+
223
+ yield uid, out
224
+
225
+ elif self.config.schema == "bigbio_kb":
226
+ # reorder the documents so they appear in increasing order
227
+ ordered_data = [data[2], data[4], data[0], data[3], data[1]]
228
+ for uid, doc in enumerate(ordered_data):
229
+ out = {
230
+ "id": uid,
231
+ "document_id": doc.id,
232
+ "passages": [],
233
+ "entities": [],
234
+ "events": [],
235
+ "coreferences": [],
236
+ "relations": [],
237
+ }
238
+
239
+ # catch all normalized entities for lookup
240
+ norm_map = {}
241
+ for rel in doc.passages[0].relations:
242
+ if rel.infons["type"] == "Normalization":
243
+ norm_map[rel.nodes[0].role] = rel.nodes[0].refid
244
+
245
+ # handle passages - split text into sentences
246
+ for i, passage in enumerate(doc.passages[0].text.split("\n")):
247
+ # match the offsets on the text after removing \n
248
+ if i == 0:
249
+ marker = 0
250
+ else:
251
+ marker = out["passages"][-1]["offsets"][-1][-1] + 1
252
+
253
+ out["passages"].append(
254
+ {
255
+ "id": f"{uid}-{i}",
256
+ "text": [passage],
257
+ "type": "sentence",
258
+ "offsets": [[marker, marker + len(passage)]],
259
+ }
260
+ )
261
+
262
+ # handle entities
263
+ for ent in doc.passages[0].annotations:
264
+ offsets = []
265
+ text_s = []
266
+ for loc in ent.locations:
267
+ offsets.append([loc.offset, loc.offset + loc.length])
268
+ text_s.append(doc.passages[0].text[loc.offset: loc.offset + loc.length])
269
+
270
+ out["entities"].append(
271
+ {
272
+ "id": f"{uid}-{ent.id}",
273
+ "type": ent.infons["type"],
274
+ "text": text_s,
275
+ "offsets": offsets,
276
+ "normalized": [
277
+ {
278
+ "db_name": norm_map.get(ent.id, ":").split(":")[0],
279
+ # replace faulty connectors in db_ids
280
+ "db_id": norm_map.get(ent.id, ":")
281
+ .split(":")[1]
282
+ .replace(",", ".")
283
+ .replace("+", ""),
284
+ }
285
+ ],
286
+ }
287
+ )
288
+
289
+ yield uid, out