Datasets:

Languages:
German
License:
masaenger commited on
Commit
103e6fd
1 Parent(s): 9fd6597

Update grascco based on git version 6ab4752

Browse files
Files changed (3) hide show
  1. README.md +47 -0
  2. bigbiohub.py +590 -0
  3. grascco.py +228 -0
README.md ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ language:
3
+ - de
4
+ bigbio_language:
5
+ - German
6
+ license: cc-by-4.0
7
+ bigbio_license_shortname: CC_BY_4p0
8
+ multilinguality: monolingual
9
+ pretty_name: GraSCCo
10
+ homepage: https://zenodo.org/records/6539131
11
+ bigbio_pubmed: false
12
+ bigbio_public: true
13
+ bigbio_tasks:
14
+ - NAMED_ENTITY_RECOGNITION
15
+ ---
16
+
17
+
18
+ # Dataset Card for GraSCCo
19
+
20
+ ## Dataset Description
21
+
22
+ - **Homepage:** https://zenodo.org/records/6539131
23
+ - **Pubmed:** False
24
+ - **Public:** True
25
+ - **Tasks:** NER
26
+
27
+ GraSCCo is a collection of artificially generated semi-structured and unstructured German-language clinical summaries. These summaries are formulated as letters from the hospital to the patient's GP after in-patient or out-patient care.
28
+ This is common practice in Germany, Austria and Switzerland.
29
+
30
+ The creation of the GraSCCo documents were inspired by existing clinical texts, but all names and dates are purely fictional.
31
+ There is no relation to existing patients, clinicians or institutions. Whereas the texts try to represent the range of German clinical language as best as possible, medical plausibility must not be assumed.
32
+
33
+ GraSCCo can therefore only be used to train clinical language models, not clinical domain models.
34
+
35
+
36
+ ## Citation Information
37
+
38
+ ```
39
+ @incollection{modersohn2022grascco,
40
+ title={GRASCCO—The First Publicly Shareable, Multiply-Alienated German Clinical Text Corpus},
41
+ author={Modersohn, Luise and Schulz, Stefan and Lohr, Christina and Hahn, Udo},
42
+ booktitle={German Medical Data Sciences 2022--Future Medicine: More Precise, More Integrative, More Sustainable!},
43
+ pages={66--72},
44
+ year={2022},
45
+ publisher={IOS Press}
46
+ }
47
+ ```
bigbiohub.py ADDED
@@ -0,0 +1,590 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from collections import defaultdict
2
+ from dataclasses import dataclass
3
+ from enum import Enum
4
+ import logging
5
+ from pathlib import Path
6
+ from types import SimpleNamespace
7
+ from typing import TYPE_CHECKING, Dict, Iterable, List, Tuple
8
+
9
+ import datasets
10
+
11
+ if TYPE_CHECKING:
12
+ import bioc
13
+
14
+ logger = logging.getLogger(__name__)
15
+
16
+
17
+ BigBioValues = SimpleNamespace(NULL="<BB_NULL_STR>")
18
+
19
+
20
+ @dataclass
21
+ class BigBioConfig(datasets.BuilderConfig):
22
+ """BuilderConfig for BigBio."""
23
+
24
+ name: str = None
25
+ version: datasets.Version = None
26
+ description: str = None
27
+ schema: str = None
28
+ subset_id: str = None
29
+
30
+
31
+ class Tasks(Enum):
32
+ NAMED_ENTITY_RECOGNITION = "NER"
33
+ NAMED_ENTITY_DISAMBIGUATION = "NED"
34
+ EVENT_EXTRACTION = "EE"
35
+ RELATION_EXTRACTION = "RE"
36
+ COREFERENCE_RESOLUTION = "COREF"
37
+ QUESTION_ANSWERING = "QA"
38
+ TEXTUAL_ENTAILMENT = "TE"
39
+ SEMANTIC_SIMILARITY = "STS"
40
+ TEXT_PAIRS_CLASSIFICATION = "TXT2CLASS"
41
+ PARAPHRASING = "PARA"
42
+ TRANSLATION = "TRANSL"
43
+ SUMMARIZATION = "SUM"
44
+ TEXT_CLASSIFICATION = "TXTCLASS"
45
+
46
+
47
+ entailment_features = datasets.Features(
48
+ {
49
+ "id": datasets.Value("string"),
50
+ "premise": datasets.Value("string"),
51
+ "hypothesis": datasets.Value("string"),
52
+ "label": datasets.Value("string"),
53
+ }
54
+ )
55
+
56
+ pairs_features = datasets.Features(
57
+ {
58
+ "id": datasets.Value("string"),
59
+ "document_id": datasets.Value("string"),
60
+ "text_1": datasets.Value("string"),
61
+ "text_2": datasets.Value("string"),
62
+ "label": datasets.Value("string"),
63
+ }
64
+ )
65
+
66
+ qa_features = datasets.Features(
67
+ {
68
+ "id": datasets.Value("string"),
69
+ "question_id": datasets.Value("string"),
70
+ "document_id": datasets.Value("string"),
71
+ "question": datasets.Value("string"),
72
+ "type": datasets.Value("string"),
73
+ "choices": [datasets.Value("string")],
74
+ "context": datasets.Value("string"),
75
+ "answer": datasets.Sequence(datasets.Value("string")),
76
+ }
77
+ )
78
+
79
+ text_features = datasets.Features(
80
+ {
81
+ "id": datasets.Value("string"),
82
+ "document_id": datasets.Value("string"),
83
+ "text": datasets.Value("string"),
84
+ "labels": [datasets.Value("string")],
85
+ }
86
+ )
87
+
88
+ text2text_features = datasets.Features(
89
+ {
90
+ "id": datasets.Value("string"),
91
+ "document_id": datasets.Value("string"),
92
+ "text_1": datasets.Value("string"),
93
+ "text_2": datasets.Value("string"),
94
+ "text_1_name": datasets.Value("string"),
95
+ "text_2_name": datasets.Value("string"),
96
+ }
97
+ )
98
+
99
+ kb_features = datasets.Features(
100
+ {
101
+ "id": datasets.Value("string"),
102
+ "document_id": datasets.Value("string"),
103
+ "passages": [
104
+ {
105
+ "id": datasets.Value("string"),
106
+ "type": datasets.Value("string"),
107
+ "text": datasets.Sequence(datasets.Value("string")),
108
+ "offsets": datasets.Sequence([datasets.Value("int32")]),
109
+ }
110
+ ],
111
+ "entities": [
112
+ {
113
+ "id": datasets.Value("string"),
114
+ "type": datasets.Value("string"),
115
+ "text": datasets.Sequence(datasets.Value("string")),
116
+ "offsets": datasets.Sequence([datasets.Value("int32")]),
117
+ "normalized": [
118
+ {
119
+ "db_name": datasets.Value("string"),
120
+ "db_id": datasets.Value("string"),
121
+ }
122
+ ],
123
+ }
124
+ ],
125
+ "events": [
126
+ {
127
+ "id": datasets.Value("string"),
128
+ "type": datasets.Value("string"),
129
+ # refers to the text_bound_annotation of the trigger
130
+ "trigger": {
131
+ "text": datasets.Sequence(datasets.Value("string")),
132
+ "offsets": datasets.Sequence([datasets.Value("int32")]),
133
+ },
134
+ "arguments": [
135
+ {
136
+ "role": datasets.Value("string"),
137
+ "ref_id": datasets.Value("string"),
138
+ }
139
+ ],
140
+ }
141
+ ],
142
+ "coreferences": [
143
+ {
144
+ "id": datasets.Value("string"),
145
+ "entity_ids": datasets.Sequence(datasets.Value("string")),
146
+ }
147
+ ],
148
+ "relations": [
149
+ {
150
+ "id": datasets.Value("string"),
151
+ "type": datasets.Value("string"),
152
+ "arg1_id": datasets.Value("string"),
153
+ "arg2_id": datasets.Value("string"),
154
+ "normalized": [
155
+ {
156
+ "db_name": datasets.Value("string"),
157
+ "db_id": datasets.Value("string"),
158
+ }
159
+ ],
160
+ }
161
+ ],
162
+ }
163
+ )
164
+
165
+
166
+ TASK_TO_SCHEMA = {
167
+ Tasks.NAMED_ENTITY_RECOGNITION.name: "KB",
168
+ Tasks.NAMED_ENTITY_DISAMBIGUATION.name: "KB",
169
+ Tasks.EVENT_EXTRACTION.name: "KB",
170
+ Tasks.RELATION_EXTRACTION.name: "KB",
171
+ Tasks.COREFERENCE_RESOLUTION.name: "KB",
172
+ Tasks.QUESTION_ANSWERING.name: "QA",
173
+ Tasks.TEXTUAL_ENTAILMENT.name: "TE",
174
+ Tasks.SEMANTIC_SIMILARITY.name: "PAIRS",
175
+ Tasks.TEXT_PAIRS_CLASSIFICATION.name: "PAIRS",
176
+ Tasks.PARAPHRASING.name: "T2T",
177
+ Tasks.TRANSLATION.name: "T2T",
178
+ Tasks.SUMMARIZATION.name: "T2T",
179
+ Tasks.TEXT_CLASSIFICATION.name: "TEXT",
180
+ }
181
+
182
+ SCHEMA_TO_TASKS = defaultdict(set)
183
+ for task, schema in TASK_TO_SCHEMA.items():
184
+ SCHEMA_TO_TASKS[schema].add(task)
185
+ SCHEMA_TO_TASKS = dict(SCHEMA_TO_TASKS)
186
+
187
+ VALID_TASKS = set(TASK_TO_SCHEMA.keys())
188
+ VALID_SCHEMAS = set(TASK_TO_SCHEMA.values())
189
+
190
+ SCHEMA_TO_FEATURES = {
191
+ "KB": kb_features,
192
+ "QA": qa_features,
193
+ "TE": entailment_features,
194
+ "T2T": text2text_features,
195
+ "TEXT": text_features,
196
+ "PAIRS": pairs_features,
197
+ }
198
+
199
+
200
+ def get_texts_and_offsets_from_bioc_ann(ann: "bioc.BioCAnnotation") -> Tuple:
201
+
202
+ offsets = [(loc.offset, loc.offset + loc.length) for loc in ann.locations]
203
+
204
+ text = ann.text
205
+
206
+ if len(offsets) > 1:
207
+ i = 0
208
+ texts = []
209
+ for start, end in offsets:
210
+ chunk_len = end - start
211
+ texts.append(text[i : chunk_len + i])
212
+ i += chunk_len
213
+ while i < len(text) and text[i] == " ":
214
+ i += 1
215
+ else:
216
+ texts = [text]
217
+
218
+ return offsets, texts
219
+
220
+
221
+ def remove_prefix(a: str, prefix: str) -> str:
222
+ if a.startswith(prefix):
223
+ a = a[len(prefix) :]
224
+ return a
225
+
226
+
227
+ def parse_brat_file(
228
+ txt_file: Path,
229
+ annotation_file_suffixes: List[str] = None,
230
+ parse_notes: bool = False,
231
+ ) -> Dict:
232
+ """
233
+ Parse a brat file into the schema defined below.
234
+ `txt_file` should be the path to the brat '.txt' file you want to parse, e.g. 'data/1234.txt'
235
+ Assumes that the annotations are contained in one or more of the corresponding '.a1', '.a2' or '.ann' files,
236
+ e.g. 'data/1234.ann' or 'data/1234.a1' and 'data/1234.a2'.
237
+ Will include annotator notes, when `parse_notes == True`.
238
+ brat_features = datasets.Features(
239
+ {
240
+ "id": datasets.Value("string"),
241
+ "document_id": datasets.Value("string"),
242
+ "text": datasets.Value("string"),
243
+ "text_bound_annotations": [ # T line in brat, e.g. type or event trigger
244
+ {
245
+ "offsets": datasets.Sequence([datasets.Value("int32")]),
246
+ "text": datasets.Sequence(datasets.Value("string")),
247
+ "type": datasets.Value("string"),
248
+ "id": datasets.Value("string"),
249
+ }
250
+ ],
251
+ "events": [ # E line in brat
252
+ {
253
+ "trigger": datasets.Value(
254
+ "string"
255
+ ), # refers to the text_bound_annotation of the trigger,
256
+ "id": datasets.Value("string"),
257
+ "type": datasets.Value("string"),
258
+ "arguments": datasets.Sequence(
259
+ {
260
+ "role": datasets.Value("string"),
261
+ "ref_id": datasets.Value("string"),
262
+ }
263
+ ),
264
+ }
265
+ ],
266
+ "relations": [ # R line in brat
267
+ {
268
+ "id": datasets.Value("string"),
269
+ "head": {
270
+ "ref_id": datasets.Value("string"),
271
+ "role": datasets.Value("string"),
272
+ },
273
+ "tail": {
274
+ "ref_id": datasets.Value("string"),
275
+ "role": datasets.Value("string"),
276
+ },
277
+ "type": datasets.Value("string"),
278
+ }
279
+ ],
280
+ "equivalences": [ # Equiv line in brat
281
+ {
282
+ "id": datasets.Value("string"),
283
+ "ref_ids": datasets.Sequence(datasets.Value("string")),
284
+ }
285
+ ],
286
+ "attributes": [ # M or A lines in brat
287
+ {
288
+ "id": datasets.Value("string"),
289
+ "type": datasets.Value("string"),
290
+ "ref_id": datasets.Value("string"),
291
+ "value": datasets.Value("string"),
292
+ }
293
+ ],
294
+ "normalizations": [ # N lines in brat
295
+ {
296
+ "id": datasets.Value("string"),
297
+ "type": datasets.Value("string"),
298
+ "ref_id": datasets.Value("string"),
299
+ "resource_name": datasets.Value(
300
+ "string"
301
+ ), # Name of the resource, e.g. "Wikipedia"
302
+ "cuid": datasets.Value(
303
+ "string"
304
+ ), # ID in the resource, e.g. 534366
305
+ "text": datasets.Value(
306
+ "string"
307
+ ), # Human readable description/name of the entity, e.g. "Barack Obama"
308
+ }
309
+ ],
310
+ ### OPTIONAL: Only included when `parse_notes == True`
311
+ "notes": [ # # lines in brat
312
+ {
313
+ "id": datasets.Value("string"),
314
+ "type": datasets.Value("string"),
315
+ "ref_id": datasets.Value("string"),
316
+ "text": datasets.Value("string"),
317
+ }
318
+ ],
319
+ },
320
+ )
321
+ """
322
+
323
+ example = {}
324
+ example["document_id"] = txt_file.with_suffix("").name
325
+ with txt_file.open() as f:
326
+ example["text"] = f.read()
327
+
328
+ # If no specific suffixes of the to-be-read annotation files are given - take standard suffixes
329
+ # for event extraction
330
+ if annotation_file_suffixes is None:
331
+ annotation_file_suffixes = [".a1", ".a2", ".ann"]
332
+
333
+ if len(annotation_file_suffixes) == 0:
334
+ raise AssertionError(
335
+ "At least one suffix for the to-be-read annotation files should be given!"
336
+ )
337
+
338
+ ann_lines = []
339
+ for suffix in annotation_file_suffixes:
340
+ annotation_file = txt_file.with_suffix(suffix)
341
+ if annotation_file.exists():
342
+ with annotation_file.open() as f:
343
+ ann_lines.extend(f.readlines())
344
+
345
+ example["text_bound_annotations"] = []
346
+ example["events"] = []
347
+ example["relations"] = []
348
+ example["equivalences"] = []
349
+ example["attributes"] = []
350
+ example["normalizations"] = []
351
+
352
+ if parse_notes:
353
+ example["notes"] = []
354
+
355
+ for line in ann_lines:
356
+ line = line.strip()
357
+ if not line:
358
+ continue
359
+
360
+ if line.startswith("T"): # Text bound
361
+ ann = {}
362
+ fields = line.split("\t")
363
+
364
+ ann["id"] = fields[0]
365
+ ann["type"] = fields[1].split()[0]
366
+ ann["offsets"] = []
367
+ span_str = remove_prefix(fields[1], (ann["type"] + " "))
368
+ text = fields[2]
369
+ for span in span_str.split(";"):
370
+ start, end = span.split()
371
+ ann["offsets"].append([int(start), int(end)])
372
+
373
+ # Heuristically split text of discontiguous entities into chunks
374
+ ann["text"] = []
375
+ if len(ann["offsets"]) > 1:
376
+ i = 0
377
+ for start, end in ann["offsets"]:
378
+ chunk_len = end - start
379
+ ann["text"].append(text[i : chunk_len + i])
380
+ i += chunk_len
381
+ while i < len(text) and text[i] == " ":
382
+ i += 1
383
+ else:
384
+ ann["text"] = [text]
385
+
386
+ example["text_bound_annotations"].append(ann)
387
+
388
+ elif line.startswith("E"):
389
+ ann = {}
390
+ fields = line.split("\t")
391
+
392
+ ann["id"] = fields[0]
393
+
394
+ ann["type"], ann["trigger"] = fields[1].split()[0].split(":")
395
+
396
+ ann["arguments"] = []
397
+ for role_ref_id in fields[1].split()[1:]:
398
+ argument = {
399
+ "role": (role_ref_id.split(":"))[0],
400
+ "ref_id": (role_ref_id.split(":"))[1],
401
+ }
402
+ ann["arguments"].append(argument)
403
+
404
+ example["events"].append(ann)
405
+
406
+ elif line.startswith("R"):
407
+ ann = {}
408
+ fields = line.split("\t")
409
+
410
+ ann["id"] = fields[0]
411
+ ann["type"] = fields[1].split()[0]
412
+
413
+ ann["head"] = {
414
+ "role": fields[1].split()[1].split(":")[0],
415
+ "ref_id": fields[1].split()[1].split(":")[1],
416
+ }
417
+ ann["tail"] = {
418
+ "role": fields[1].split()[2].split(":")[0],
419
+ "ref_id": fields[1].split()[2].split(":")[1],
420
+ }
421
+
422
+ example["relations"].append(ann)
423
+
424
+ # '*' seems to be the legacy way to mark equivalences,
425
+ # but I couldn't find any info on the current way
426
+ # this might have to be adapted dependent on the brat version
427
+ # of the annotation
428
+ elif line.startswith("*"):
429
+ ann = {}
430
+ fields = line.split("\t")
431
+
432
+ ann["id"] = fields[0]
433
+ ann["ref_ids"] = fields[1].split()[1:]
434
+
435
+ example["equivalences"].append(ann)
436
+
437
+ elif line.startswith("A") or line.startswith("M"):
438
+ ann = {}
439
+ fields = line.split("\t")
440
+
441
+ ann["id"] = fields[0]
442
+
443
+ info = fields[1].split()
444
+ ann["type"] = info[0]
445
+ ann["ref_id"] = info[1]
446
+
447
+ if len(info) > 2:
448
+ ann["value"] = info[2]
449
+ else:
450
+ ann["value"] = ""
451
+
452
+ example["attributes"].append(ann)
453
+
454
+ elif line.startswith("N"):
455
+ ann = {}
456
+ fields = line.split("\t")
457
+
458
+ ann["id"] = fields[0]
459
+ ann["text"] = fields[2]
460
+
461
+ info = fields[1].split()
462
+
463
+ ann["type"] = info[0]
464
+ ann["ref_id"] = info[1]
465
+ ann["resource_name"] = info[2].split(":")[0]
466
+ ann["cuid"] = info[2].split(":")[1]
467
+ example["normalizations"].append(ann)
468
+
469
+ elif parse_notes and line.startswith("#"):
470
+ ann = {}
471
+ fields = line.split("\t")
472
+
473
+ ann["id"] = fields[0]
474
+ ann["text"] = fields[2] if len(fields) == 3 else BigBioValues.NULL
475
+
476
+ info = fields[1].split()
477
+
478
+ ann["type"] = info[0]
479
+ ann["ref_id"] = info[1]
480
+ example["notes"].append(ann)
481
+
482
+ return example
483
+
484
+
485
+ def brat_parse_to_bigbio_kb(brat_parse: Dict) -> Dict:
486
+ """
487
+ Transform a brat parse (conforming to the standard brat schema) obtained with
488
+ `parse_brat_file` into a dictionary conforming to the `bigbio-kb` schema (as defined in ../schemas/kb.py)
489
+ :param brat_parse:
490
+ """
491
+
492
+ unified_example = {}
493
+
494
+ # Prefix all ids with document id to ensure global uniqueness,
495
+ # because brat ids are only unique within their document
496
+ id_prefix = brat_parse["document_id"] + "_"
497
+
498
+ # identical
499
+ unified_example["document_id"] = brat_parse["document_id"]
500
+ unified_example["passages"] = [
501
+ {
502
+ "id": id_prefix + "_text",
503
+ "type": "abstract",
504
+ "text": [brat_parse["text"]],
505
+ "offsets": [[0, len(brat_parse["text"])]],
506
+ }
507
+ ]
508
+
509
+ # get normalizations
510
+ ref_id_to_normalizations = defaultdict(list)
511
+ for normalization in brat_parse["normalizations"]:
512
+ ref_id_to_normalizations[normalization["ref_id"]].append(
513
+ {
514
+ "db_name": normalization["resource_name"],
515
+ "db_id": normalization["cuid"],
516
+ }
517
+ )
518
+
519
+ # separate entities and event triggers
520
+ unified_example["events"] = []
521
+ non_event_ann = brat_parse["text_bound_annotations"].copy()
522
+ for event in brat_parse["events"]:
523
+ event = event.copy()
524
+ event["id"] = id_prefix + event["id"]
525
+ trigger = next(
526
+ tr
527
+ for tr in brat_parse["text_bound_annotations"]
528
+ if tr["id"] == event["trigger"]
529
+ )
530
+ if trigger in non_event_ann:
531
+ non_event_ann.remove(trigger)
532
+ event["trigger"] = {
533
+ "text": trigger["text"].copy(),
534
+ "offsets": trigger["offsets"].copy(),
535
+ }
536
+ for argument in event["arguments"]:
537
+ argument["ref_id"] = id_prefix + argument["ref_id"]
538
+
539
+ unified_example["events"].append(event)
540
+
541
+ unified_example["entities"] = []
542
+ anno_ids = [ref_id["id"] for ref_id in non_event_ann]
543
+ for ann in non_event_ann:
544
+ entity_ann = ann.copy()
545
+ entity_ann["id"] = id_prefix + entity_ann["id"]
546
+ entity_ann["normalized"] = ref_id_to_normalizations[ann["id"]]
547
+ unified_example["entities"].append(entity_ann)
548
+
549
+ # massage relations
550
+ unified_example["relations"] = []
551
+ skipped_relations = set()
552
+ for ann in brat_parse["relations"]:
553
+ if (
554
+ ann["head"]["ref_id"] not in anno_ids
555
+ or ann["tail"]["ref_id"] not in anno_ids
556
+ ):
557
+ skipped_relations.add(ann["id"])
558
+ continue
559
+ unified_example["relations"].append(
560
+ {
561
+ "arg1_id": id_prefix + ann["head"]["ref_id"],
562
+ "arg2_id": id_prefix + ann["tail"]["ref_id"],
563
+ "id": id_prefix + ann["id"],
564
+ "type": ann["type"],
565
+ "normalized": [],
566
+ }
567
+ )
568
+ if len(skipped_relations) > 0:
569
+ example_id = brat_parse["document_id"]
570
+ logger.info(
571
+ f"Example:{example_id}: The `bigbio_kb` schema allows `relations` only between entities."
572
+ f" Skip (for now): "
573
+ f"{list(skipped_relations)}"
574
+ )
575
+
576
+ # get coreferences
577
+ unified_example["coreferences"] = []
578
+ for i, ann in enumerate(brat_parse["equivalences"], start=1):
579
+ is_entity_cluster = True
580
+ for ref_id in ann["ref_ids"]:
581
+ if not ref_id.startswith("T"): # not textbound -> no entity
582
+ is_entity_cluster = False
583
+ elif ref_id not in anno_ids: # event trigger -> no entity
584
+ is_entity_cluster = False
585
+ if is_entity_cluster:
586
+ entity_ids = [id_prefix + i for i in ann["ref_ids"]]
587
+ unified_example["coreferences"].append(
588
+ {"id": id_prefix + str(i), "entity_ids": entity_ids}
589
+ )
590
+ return unified_example
grascco.py ADDED
@@ -0,0 +1,228 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ """
17
+ GraSCCo is a collection of artificially generated semi-structured and unstructured German-language clinical summaries.
18
+ These summaries are formulated as letters from the hospital to the patient's GP after in-patient or out-patient care.
19
+ This is common practice in Germany, Austria and Switzerland.
20
+
21
+ The creation of the GraSCCo documents were inspired by existing clinical texts,
22
+ but all names and dates are purely fictional.
23
+ There is no relation to existing patients, clinicians or institutions.
24
+ Whereas the texts try to represent the range of German clinical language as best as possible,
25
+ medical plausibility must not be assumed.
26
+
27
+ GraSCCo can therefore only be used to train clinical language models, not clinical domain models.
28
+ """
29
+
30
+ import json
31
+ from pathlib import Path
32
+ from typing import Dict, List, Tuple
33
+
34
+ import datasets
35
+
36
+ from .bigbiohub import BigBioConfig, Tasks, kb_features, logger
37
+
38
+ _LOCAL = False
39
+
40
+ _CITATION = """\
41
+ @incollection{modersohn2022grascco,
42
+ title={GRASCCO—The First Publicly Shareable, Multiply-Alienated German Clinical Text Corpus},
43
+ author={Modersohn, Luise and Schulz, Stefan and Lohr, Christina and Hahn, Udo},
44
+ booktitle={German Medical Data Sciences 2022--Future Medicine: More Precise, More Integrative, More Sustainable!},
45
+ pages={66--72},
46
+ year={2022},
47
+ publisher={IOS Press}
48
+ }
49
+ """
50
+
51
+ _DATASETNAME = "grascco"
52
+
53
+ _DISPLAYNAME = "GraSCCo"
54
+
55
+ _DESCRIPTION = """\
56
+ GraSCCo is a collection of artificially generated semi-structured and unstructured German-language clinical summaries.
57
+ These summaries are formulated as letters from the hospital to the patient's GP after in-patient or out-patient care.
58
+ This is common practice in Germany, Austria and Switzerland.
59
+
60
+ The creation of the GraSCCo documents were inspired by existing clinical texts,
61
+ but all names and dates are purely fictional.
62
+ There is no relation to existing patients, clinicians or institutions.
63
+ Whereas the texts try to represent the range of German clinical language as best as possible,
64
+ medical plausibility must not be assumed.
65
+
66
+ GraSCCo can therefore only be used to train clinical language models, not clinical domain models.
67
+ """
68
+
69
+ _HOMEPAGE = "https://zenodo.org/records/6539131"
70
+
71
+ _LICENSE = "CC_BY_4p0"
72
+
73
+ _LANGUAGES = ["German"]
74
+
75
+ _PUBMED = False
76
+
77
+ _URLS = {
78
+ _DATASETNAME: {
79
+ "phi": "https://zenodo.org/records/11502329/files/grascco_phi_annotation_json.zip?download=1",
80
+ },
81
+ }
82
+
83
+ _SUPPORTED_TASKS = [Tasks.NAMED_ENTITY_RECOGNITION]
84
+
85
+ _SOURCE_VERSION = "1.0.0"
86
+
87
+ _BIGBIO_VERSION = "1.0.0"
88
+
89
+ _UIMA_FEATURES_KEY = "%FEATURE_STRUCTURES"
90
+
91
+
92
+ class GraSCCoDataset(datasets.GeneratorBasedBuilder):
93
+ """Dataloader for GraSCCo dataset with different annotation layers (PHI, SNOMED CT, etc.)"""
94
+
95
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
96
+ BIGBIO_VERSION = datasets.Version(_BIGBIO_VERSION)
97
+
98
+ BUILDER_CONFIGS = [
99
+ BigBioConfig(
100
+ name="grascco_phi_source",
101
+ version=SOURCE_VERSION,
102
+ description="GraSCCo (PHI) source schema",
103
+ schema="source",
104
+ subset_id="phi",
105
+ ),
106
+ BigBioConfig(
107
+ name="grascco_phi_bigbio_kb",
108
+ version=BIGBIO_VERSION,
109
+ description="GraSCCo (PHI) BigBio schema",
110
+ schema="bigbio_kb",
111
+ subset_id="phi",
112
+ ),
113
+ ]
114
+
115
+ DEFAULT_CONFIG_NAME = "grascco_phi_source"
116
+
117
+ def _info(self) -> datasets.DatasetInfo:
118
+ if self.config.schema == "source":
119
+ features = datasets.Features(
120
+ {
121
+ "document_id": datasets.Value("string"),
122
+ _UIMA_FEATURES_KEY: [
123
+ {
124
+ "%ID": datasets.Value("int64"),
125
+ "%TYPE": datasets.Value("string"),
126
+ "@sofa": datasets.Value("int64"),
127
+ "@layer": datasets.Value("int64"),
128
+ "begin": datasets.Value("int64"),
129
+ "end": datasets.Value("int64"),
130
+ "name": datasets.Value("string"),
131
+ "uiName": datasets.Value("string"),
132
+ "documentTitle": datasets.Value("string"),
133
+ "sofaString": datasets.Value("string"),
134
+ }
135
+ ],
136
+ }
137
+ )
138
+
139
+ elif self.config.schema == "bigbio_kb":
140
+ features = kb_features
141
+
142
+ return datasets.DatasetInfo(
143
+ description=_DESCRIPTION,
144
+ features=features,
145
+ homepage=_HOMEPAGE,
146
+ license=_LICENSE,
147
+ citation=_CITATION,
148
+ )
149
+
150
+ def _split_generators(self, dl_manager) -> List[datasets.SplitGenerator]:
151
+ """Returns SplitGenerators."""
152
+
153
+ urls = _URLS[_DATASETNAME][self.config.subset_id]
154
+ data_dir = dl_manager.download_and_extract(urls)
155
+
156
+ return [
157
+ datasets.SplitGenerator(
158
+ name=datasets.Split.TRAIN,
159
+ # Whatever you put in gen_kwargs will be passed to _generate_examples
160
+ gen_kwargs={
161
+ "filepath": Path(data_dir) / "grascco_phi_annotation_json",
162
+ },
163
+ ),
164
+ ]
165
+
166
+ def _parse_uima_cas_json(self, filename) -> Dict:
167
+ """Parse UIMA CAS JSON file and return parsed elements as well as the raw data"""
168
+ with open(filename, "r", encoding="utf-8") as f:
169
+ uima_features = json.load(f)[_UIMA_FEATURES_KEY]
170
+ phi_elements = []
171
+ for feature in uima_features:
172
+ if feature["%TYPE"] == "webanno.custom.PHI":
173
+ phi_elements.append(feature)
174
+ if feature["%TYPE"] == "de.tudarmstadt.ukp.dkpro.core.api.metadata.type.DocumentMetaData":
175
+ document_title = feature["documentTitle"]
176
+ if feature["%TYPE"] == "uima.cas.Sofa":
177
+ document_text = feature["sofaString"]
178
+ return {
179
+ "phi_elements": phi_elements,
180
+ "document_title": document_title,
181
+ "document_text": document_text,
182
+ "uima_features": uima_features,
183
+ }
184
+
185
+ def _generate_examples(self, filepath) -> Tuple[int, Dict]:
186
+ """Yields examples as (key, example) tuples."""
187
+ for file_id, file in enumerate(sorted(filepath.glob("*.json"))):
188
+ uima_parsed = self._parse_uima_cas_json(file)
189
+ doc_id = uima_parsed["document_title"]
190
+ if self.config.schema == "source":
191
+ yield doc_id, {"document_id": doc_id, _UIMA_FEATURES_KEY: uima_parsed["uima_features"]}
192
+ elif self.config.schema == "bigbio_kb":
193
+ text = uima_parsed["document_text"]
194
+ relations = []
195
+ entities = []
196
+ # Just as single passage; ignoring sentence boundaries from annotation tool, as these are not reliable
197
+ passages = [{"id": f"{file_id}-0", "type": "document", "text": [text], "offsets": [[0, len(text)]]}]
198
+
199
+ # Other subsets / annotation layers will be added in future GraSCCo releases
200
+ if self.config.subset_id == "phi":
201
+ for phi in sorted(uima_parsed["phi_elements"], key=lambda p: p["begin"]):
202
+ e_start = phi["begin"]
203
+ e_end = phi["end"]
204
+ eid = phi["%ID"]
205
+ if "kind" not in phi:
206
+ logger.warning(
207
+ f"'kind' attribute missing in PHI element with ID {eid} in document {doc_id}"
208
+ )
209
+ continue
210
+ entities.append(
211
+ {
212
+ "id": f"{file_id}-{eid}",
213
+ "type": phi["kind"],
214
+ "text": [text[e_start:e_end]],
215
+ "offsets": [[e_start, e_end]],
216
+ "normalized": [],
217
+ }
218
+ )
219
+
220
+ yield doc_id, {
221
+ "id": file_id,
222
+ "document_id": doc_id,
223
+ "passages": passages,
224
+ "entities": entities,
225
+ "events": [],
226
+ "coreferences": [],
227
+ "relations": relations,
228
+ }