gabrielaltay commited on
Commit
aef7406
1 Parent(s): 3ff7e19

update bigbiohub with parsers (#1)

Browse files

- update bigbiohub with parsers (db5934ed392bf70930242a0fcea468a8cc6916d0)

Files changed (1) hide show
  1. bigbiohub.py +399 -0
bigbiohub.py CHANGED
@@ -3,6 +3,12 @@ from enum import Enum
3
  import datasets
4
  from types import SimpleNamespace
5
 
 
 
 
 
 
 
6
 
7
  BigBioValues = SimpleNamespace(NULL="<BB_NULL_STR>")
8
 
@@ -151,3 +157,396 @@ kb_features = datasets.Features(
151
  ],
152
  }
153
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3
  import datasets
4
  from types import SimpleNamespace
5
 
6
+ import bioc
7
+ import datasets
8
+
9
+
10
+ logger = logging.getLogger(__name__)
11
+
12
 
13
  BigBioValues = SimpleNamespace(NULL="<BB_NULL_STR>")
14
 
 
157
  ],
158
  }
159
  )
160
+
161
+
162
+ def get_texts_and_offsets_from_bioc_ann(ann: bioc.BioCAnnotation) -> Tuple:
163
+
164
+ offsets = [(loc.offset, loc.offset + loc.length) for loc in ann.locations]
165
+
166
+ text = ann.text
167
+
168
+ if len(offsets) > 1:
169
+ i = 0
170
+ texts = []
171
+ for start, end in offsets:
172
+ chunk_len = end - start
173
+ texts.append(text[i : chunk_len + i])
174
+ i += chunk_len
175
+ while i < len(text) and text[i] == " ":
176
+ i += 1
177
+ else:
178
+ texts = [text]
179
+
180
+ return offsets, texts
181
+
182
+
183
+ def remove_prefix(a: str, prefix: str) -> str:
184
+ if a.startswith(prefix):
185
+ a = a[len(prefix) :]
186
+ return a
187
+
188
+
189
+ def parse_brat_file(
190
+ txt_file: Path,
191
+ annotation_file_suffixes: List[str] = None,
192
+ parse_notes: bool = False,
193
+ ) -> Dict:
194
+ """
195
+ Parse a brat file into the schema defined below.
196
+ `txt_file` should be the path to the brat '.txt' file you want to parse, e.g. 'data/1234.txt'
197
+ Assumes that the annotations are contained in one or more of the corresponding '.a1', '.a2' or '.ann' files,
198
+ e.g. 'data/1234.ann' or 'data/1234.a1' and 'data/1234.a2'.
199
+ Will include annotator notes, when `parse_notes == True`.
200
+ brat_features = datasets.Features(
201
+ {
202
+ "id": datasets.Value("string"),
203
+ "document_id": datasets.Value("string"),
204
+ "text": datasets.Value("string"),
205
+ "text_bound_annotations": [ # T line in brat, e.g. type or event trigger
206
+ {
207
+ "offsets": datasets.Sequence([datasets.Value("int32")]),
208
+ "text": datasets.Sequence(datasets.Value("string")),
209
+ "type": datasets.Value("string"),
210
+ "id": datasets.Value("string"),
211
+ }
212
+ ],
213
+ "events": [ # E line in brat
214
+ {
215
+ "trigger": datasets.Value(
216
+ "string"
217
+ ), # refers to the text_bound_annotation of the trigger,
218
+ "id": datasets.Value("string"),
219
+ "type": datasets.Value("string"),
220
+ "arguments": datasets.Sequence(
221
+ {
222
+ "role": datasets.Value("string"),
223
+ "ref_id": datasets.Value("string"),
224
+ }
225
+ ),
226
+ }
227
+ ],
228
+ "relations": [ # R line in brat
229
+ {
230
+ "id": datasets.Value("string"),
231
+ "head": {
232
+ "ref_id": datasets.Value("string"),
233
+ "role": datasets.Value("string"),
234
+ },
235
+ "tail": {
236
+ "ref_id": datasets.Value("string"),
237
+ "role": datasets.Value("string"),
238
+ },
239
+ "type": datasets.Value("string"),
240
+ }
241
+ ],
242
+ "equivalences": [ # Equiv line in brat
243
+ {
244
+ "id": datasets.Value("string"),
245
+ "ref_ids": datasets.Sequence(datasets.Value("string")),
246
+ }
247
+ ],
248
+ "attributes": [ # M or A lines in brat
249
+ {
250
+ "id": datasets.Value("string"),
251
+ "type": datasets.Value("string"),
252
+ "ref_id": datasets.Value("string"),
253
+ "value": datasets.Value("string"),
254
+ }
255
+ ],
256
+ "normalizations": [ # N lines in brat
257
+ {
258
+ "id": datasets.Value("string"),
259
+ "type": datasets.Value("string"),
260
+ "ref_id": datasets.Value("string"),
261
+ "resource_name": datasets.Value(
262
+ "string"
263
+ ), # Name of the resource, e.g. "Wikipedia"
264
+ "cuid": datasets.Value(
265
+ "string"
266
+ ), # ID in the resource, e.g. 534366
267
+ "text": datasets.Value(
268
+ "string"
269
+ ), # Human readable description/name of the entity, e.g. "Barack Obama"
270
+ }
271
+ ],
272
+ ### OPTIONAL: Only included when `parse_notes == True`
273
+ "notes": [ # # lines in brat
274
+ {
275
+ "id": datasets.Value("string"),
276
+ "type": datasets.Value("string"),
277
+ "ref_id": datasets.Value("string"),
278
+ "text": datasets.Value("string"),
279
+ }
280
+ ],
281
+ },
282
+ )
283
+ """
284
+
285
+ example = {}
286
+ example["document_id"] = txt_file.with_suffix("").name
287
+ with txt_file.open() as f:
288
+ example["text"] = f.read()
289
+
290
+ # If no specific suffixes of the to-be-read annotation files are given - take standard suffixes
291
+ # for event extraction
292
+ if annotation_file_suffixes is None:
293
+ annotation_file_suffixes = [".a1", ".a2", ".ann"]
294
+
295
+ if len(annotation_file_suffixes) == 0:
296
+ raise AssertionError(
297
+ "At least one suffix for the to-be-read annotation files should be given!"
298
+ )
299
+
300
+ ann_lines = []
301
+ for suffix in annotation_file_suffixes:
302
+ annotation_file = txt_file.with_suffix(suffix)
303
+ if annotation_file.exists():
304
+ with annotation_file.open() as f:
305
+ ann_lines.extend(f.readlines())
306
+
307
+ example["text_bound_annotations"] = []
308
+ example["events"] = []
309
+ example["relations"] = []
310
+ example["equivalences"] = []
311
+ example["attributes"] = []
312
+ example["normalizations"] = []
313
+
314
+ if parse_notes:
315
+ example["notes"] = []
316
+
317
+ for line in ann_lines:
318
+ line = line.strip()
319
+ if not line:
320
+ continue
321
+
322
+ if line.startswith("T"): # Text bound
323
+ ann = {}
324
+ fields = line.split("\t")
325
+
326
+ ann["id"] = fields[0]
327
+ ann["type"] = fields[1].split()[0]
328
+ ann["offsets"] = []
329
+ span_str = remove_prefix(fields[1], (ann["type"] + " "))
330
+ text = fields[2]
331
+ for span in span_str.split(";"):
332
+ start, end = span.split()
333
+ ann["offsets"].append([int(start), int(end)])
334
+
335
+ # Heuristically split text of discontiguous entities into chunks
336
+ ann["text"] = []
337
+ if len(ann["offsets"]) > 1:
338
+ i = 0
339
+ for start, end in ann["offsets"]:
340
+ chunk_len = end - start
341
+ ann["text"].append(text[i : chunk_len + i])
342
+ i += chunk_len
343
+ while i < len(text) and text[i] == " ":
344
+ i += 1
345
+ else:
346
+ ann["text"] = [text]
347
+
348
+ example["text_bound_annotations"].append(ann)
349
+
350
+ elif line.startswith("E"):
351
+ ann = {}
352
+ fields = line.split("\t")
353
+
354
+ ann["id"] = fields[0]
355
+
356
+ ann["type"], ann["trigger"] = fields[1].split()[0].split(":")
357
+
358
+ ann["arguments"] = []
359
+ for role_ref_id in fields[1].split()[1:]:
360
+ argument = {
361
+ "role": (role_ref_id.split(":"))[0],
362
+ "ref_id": (role_ref_id.split(":"))[1],
363
+ }
364
+ ann["arguments"].append(argument)
365
+
366
+ example["events"].append(ann)
367
+
368
+ elif line.startswith("R"):
369
+ ann = {}
370
+ fields = line.split("\t")
371
+
372
+ ann["id"] = fields[0]
373
+ ann["type"] = fields[1].split()[0]
374
+
375
+ ann["head"] = {
376
+ "role": fields[1].split()[1].split(":")[0],
377
+ "ref_id": fields[1].split()[1].split(":")[1],
378
+ }
379
+ ann["tail"] = {
380
+ "role": fields[1].split()[2].split(":")[0],
381
+ "ref_id": fields[1].split()[2].split(":")[1],
382
+ }
383
+
384
+ example["relations"].append(ann)
385
+
386
+ # '*' seems to be the legacy way to mark equivalences,
387
+ # but I couldn't find any info on the current way
388
+ # this might have to be adapted dependent on the brat version
389
+ # of the annotation
390
+ elif line.startswith("*"):
391
+ ann = {}
392
+ fields = line.split("\t")
393
+
394
+ ann["id"] = fields[0]
395
+ ann["ref_ids"] = fields[1].split()[1:]
396
+
397
+ example["equivalences"].append(ann)
398
+
399
+ elif line.startswith("A") or line.startswith("M"):
400
+ ann = {}
401
+ fields = line.split("\t")
402
+
403
+ ann["id"] = fields[0]
404
+
405
+ info = fields[1].split()
406
+ ann["type"] = info[0]
407
+ ann["ref_id"] = info[1]
408
+
409
+ if len(info) > 2:
410
+ ann["value"] = info[2]
411
+ else:
412
+ ann["value"] = ""
413
+
414
+ example["attributes"].append(ann)
415
+
416
+ elif line.startswith("N"):
417
+ ann = {}
418
+ fields = line.split("\t")
419
+
420
+ ann["id"] = fields[0]
421
+ ann["text"] = fields[2]
422
+
423
+ info = fields[1].split()
424
+
425
+ ann["type"] = info[0]
426
+ ann["ref_id"] = info[1]
427
+ ann["resource_name"] = info[2].split(":")[0]
428
+ ann["cuid"] = info[2].split(":")[1]
429
+ example["normalizations"].append(ann)
430
+
431
+ elif parse_notes and line.startswith("#"):
432
+ ann = {}
433
+ fields = line.split("\t")
434
+
435
+ ann["id"] = fields[0]
436
+ ann["text"] = fields[2] if len(fields) == 3 else BigBioValues.NULL
437
+
438
+ info = fields[1].split()
439
+
440
+ ann["type"] = info[0]
441
+ ann["ref_id"] = info[1]
442
+ example["notes"].append(ann)
443
+
444
+ return example
445
+
446
+
447
+ def brat_parse_to_bigbio_kb(brat_parse: Dict) -> Dict:
448
+ """
449
+ Transform a brat parse (conforming to the standard brat schema) obtained with
450
+ `parse_brat_file` into a dictionary conforming to the `bigbio-kb` schema (as defined in ../schemas/kb.py)
451
+ :param brat_parse:
452
+ """
453
+
454
+ unified_example = {}
455
+
456
+ # Prefix all ids with document id to ensure global uniqueness,
457
+ # because brat ids are only unique within their document
458
+ id_prefix = brat_parse["document_id"] + "_"
459
+
460
+ # identical
461
+ unified_example["document_id"] = brat_parse["document_id"]
462
+ unified_example["passages"] = [
463
+ {
464
+ "id": id_prefix + "_text",
465
+ "type": "abstract",
466
+ "text": [brat_parse["text"]],
467
+ "offsets": [[0, len(brat_parse["text"])]],
468
+ }
469
+ ]
470
+
471
+ # get normalizations
472
+ ref_id_to_normalizations = defaultdict(list)
473
+ for normalization in brat_parse["normalizations"]:
474
+ ref_id_to_normalizations[normalization["ref_id"]].append(
475
+ {
476
+ "db_name": normalization["resource_name"],
477
+ "db_id": normalization["cuid"],
478
+ }
479
+ )
480
+
481
+ # separate entities and event triggers
482
+ unified_example["events"] = []
483
+ non_event_ann = brat_parse["text_bound_annotations"].copy()
484
+ for event in brat_parse["events"]:
485
+ event = event.copy()
486
+ event["id"] = id_prefix + event["id"]
487
+ trigger = next(
488
+ tr
489
+ for tr in brat_parse["text_bound_annotations"]
490
+ if tr["id"] == event["trigger"]
491
+ )
492
+ if trigger in non_event_ann:
493
+ non_event_ann.remove(trigger)
494
+ event["trigger"] = {
495
+ "text": trigger["text"].copy(),
496
+ "offsets": trigger["offsets"].copy(),
497
+ }
498
+ for argument in event["arguments"]:
499
+ argument["ref_id"] = id_prefix + argument["ref_id"]
500
+
501
+ unified_example["events"].append(event)
502
+
503
+ unified_example["entities"] = []
504
+ anno_ids = [ref_id["id"] for ref_id in non_event_ann]
505
+ for ann in non_event_ann:
506
+ entity_ann = ann.copy()
507
+ entity_ann["id"] = id_prefix + entity_ann["id"]
508
+ entity_ann["normalized"] = ref_id_to_normalizations[ann["id"]]
509
+ unified_example["entities"].append(entity_ann)
510
+
511
+ # massage relations
512
+ unified_example["relations"] = []
513
+ skipped_relations = set()
514
+ for ann in brat_parse["relations"]:
515
+ if (
516
+ ann["head"]["ref_id"] not in anno_ids
517
+ or ann["tail"]["ref_id"] not in anno_ids
518
+ ):
519
+ skipped_relations.add(ann["id"])
520
+ continue
521
+ unified_example["relations"].append(
522
+ {
523
+ "arg1_id": id_prefix + ann["head"]["ref_id"],
524
+ "arg2_id": id_prefix + ann["tail"]["ref_id"],
525
+ "id": id_prefix + ann["id"],
526
+ "type": ann["type"],
527
+ "normalized": [],
528
+ }
529
+ )
530
+ if len(skipped_relations) > 0:
531
+ example_id = brat_parse["document_id"]
532
+ logger.info(
533
+ f"Example:{example_id}: The `bigbio_kb` schema allows `relations` only between entities."
534
+ f" Skip (for now): "
535
+ f"{list(skipped_relations)}"
536
+ )
537
+
538
+ # get coreferences
539
+ unified_example["coreferences"] = []
540
+ for i, ann in enumerate(brat_parse["equivalences"], start=1):
541
+ is_entity_cluster = True
542
+ for ref_id in ann["ref_ids"]:
543
+ if not ref_id.startswith("T"): # not textbound -> no entity
544
+ is_entity_cluster = False
545
+ elif ref_id not in anno_ids: # event trigger -> no entity
546
+ is_entity_cluster = False
547
+ if is_entity_cluster:
548
+ entity_ids = [id_prefix + i for i in ann["ref_ids"]]
549
+ unified_example["coreferences"].append(
550
+ {"id": id_prefix + str(i), "entity_ids": entity_ids}
551
+ )
552
+ return unified_example