Datasets:

Languages:
English
License:
phlobo commited on
Commit
4eacfe5
1 Parent(s): 64ce0f4

Update coneco based on git version 5f49fbe

Browse files
Files changed (3) hide show
  1. README.md +57 -0
  2. bigbiohub.py +591 -0
  3. coneco.py +209 -0
README.md ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ language:
3
+ - en
4
+ bigbio_language:
5
+ - English
6
+ license: cc-by-4.0
7
+ bigbio_license_shortname: CC_BY_4p0
8
+ multilinguality: monolingual
9
+ pretty_name: CoNECo
10
+ homepage: https://zenodo.org/records/11263147
11
+ bigbio_pubmed: false
12
+ bigbio_public: true
13
+ bigbio_tasks:
14
+ - NAMED_ENTITY_RECOGNITION
15
+ - NAMED_ENTITY_DISAMBIGUATION
16
+ paperswithcode_id: coneco
17
+ ---
18
+
19
+
20
+ # Dataset Card for CoNECo
21
+
22
+ ## Dataset Description
23
+
24
+ - **Homepage:** https://zenodo.org/records/11263147
25
+ - **Pubmed:** False
26
+ - **Public:** True
27
+ - **Tasks:** NER, NEN
28
+
29
+ Complex Named Entity Corpus (CoNECo) is an annotated corpus for NER and NEN of protein-containing complexes. CoNECo comprises 1,621 documents with 2,052 entities, 1,976 of which are normalized to Gene Ontology. We divided the corpus into training, development, and test sets.
30
+
31
+ ## Citation Information
32
+
33
+ ```
34
+ @article{10.1093/bioadv/vbae116,
35
+ author = {Nastou, Katerina and Koutrouli, Mikaela and Pyysalo, Sampo and Jensen, Lars Juhl},
36
+ title = "{CoNECo: A Corpus for Named Entity Recognition and Normalization of Protein Complexes}",
37
+ journal = {Bioinformatics Advances},
38
+ pages = {vbae116},
39
+ year = {2024},
40
+ month = {08},
41
+ abstract = "{Despite significant progress in biomedical information extraction, there is a lack of resources \
42
+ for Named Entity Recognition (NER) and Normalization (NEN) of protein-containing complexes. Current resources \
43
+ inadequately address the recognition of protein-containing complex names across different organisms, underscoring \
44
+ the crucial need for a dedicated corpus.We introduce the Complex Named Entity Corpus (CoNECo), an annotated \
45
+ corpus for NER and NEN of complexes. CoNECo comprises 1,621 documents with 2,052 entities, 1,976 of which are \
46
+ normalized to Gene Ontology. We divided the corpus into training, development, and test sets and trained both a \
47
+ transformer-based and dictionary-based tagger on them. Evaluation on the test set demonstrated robust performance, \
48
+ with F-scores of 73.7\\% and 61.2\\%, respectively. Subsequently, we applied the best taggers for comprehensive \
49
+ tagging of the entire openly accessible biomedical literature.All resources, including the annotated corpus, \
50
+ training data, and code, are available to the community through Zenodo https://zenodo.org/records/11263147 and \
51
+ GitHub https://zenodo.org/records/10693653.}",
52
+ issn = {2635-0041},
53
+ doi = {10.1093/bioadv/vbae116},
54
+ url = {https://doi.org/10.1093/bioadv/vbae116},
55
+ eprint = {https://academic.oup.com/bioinformaticsadvances/advance-article-pdf/doi/10.1093/bioadv/vbae116/58869902/vbae116.pdf},
56
+ }
57
+ ```
bigbiohub.py ADDED
@@ -0,0 +1,591 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from collections import defaultdict
2
+ from dataclasses import dataclass
3
+ from enum import Enum
4
+ import logging
5
+ from pathlib import Path
6
+ import re
7
+ from types import SimpleNamespace
8
+ from typing import TYPE_CHECKING, Dict, Iterable, List, Tuple
9
+
10
+ import datasets
11
+
12
+ if TYPE_CHECKING:
13
+ import bioc
14
+
15
+ logger = logging.getLogger(__name__)
16
+
17
+
18
+ BigBioValues = SimpleNamespace(NULL="<BB_NULL_STR>")
19
+
20
+
21
+ @dataclass
22
+ class BigBioConfig(datasets.BuilderConfig):
23
+ """BuilderConfig for BigBio."""
24
+
25
+ name: str = None
26
+ version: datasets.Version = None
27
+ description: str = None
28
+ schema: str = None
29
+ subset_id: str = None
30
+
31
+
32
+ class Tasks(Enum):
33
+ NAMED_ENTITY_RECOGNITION = "NER"
34
+ NAMED_ENTITY_DISAMBIGUATION = "NED"
35
+ EVENT_EXTRACTION = "EE"
36
+ RELATION_EXTRACTION = "RE"
37
+ COREFERENCE_RESOLUTION = "COREF"
38
+ QUESTION_ANSWERING = "QA"
39
+ TEXTUAL_ENTAILMENT = "TE"
40
+ SEMANTIC_SIMILARITY = "STS"
41
+ TEXT_PAIRS_CLASSIFICATION = "TXT2CLASS"
42
+ PARAPHRASING = "PARA"
43
+ TRANSLATION = "TRANSL"
44
+ SUMMARIZATION = "SUM"
45
+ TEXT_CLASSIFICATION = "TXTCLASS"
46
+
47
+
48
+ entailment_features = datasets.Features(
49
+ {
50
+ "id": datasets.Value("string"),
51
+ "premise": datasets.Value("string"),
52
+ "hypothesis": datasets.Value("string"),
53
+ "label": datasets.Value("string"),
54
+ }
55
+ )
56
+
57
+ pairs_features = datasets.Features(
58
+ {
59
+ "id": datasets.Value("string"),
60
+ "document_id": datasets.Value("string"),
61
+ "text_1": datasets.Value("string"),
62
+ "text_2": datasets.Value("string"),
63
+ "label": datasets.Value("string"),
64
+ }
65
+ )
66
+
67
+ qa_features = datasets.Features(
68
+ {
69
+ "id": datasets.Value("string"),
70
+ "question_id": datasets.Value("string"),
71
+ "document_id": datasets.Value("string"),
72
+ "question": datasets.Value("string"),
73
+ "type": datasets.Value("string"),
74
+ "choices": [datasets.Value("string")],
75
+ "context": datasets.Value("string"),
76
+ "answer": datasets.Sequence(datasets.Value("string")),
77
+ }
78
+ )
79
+
80
+ text_features = datasets.Features(
81
+ {
82
+ "id": datasets.Value("string"),
83
+ "document_id": datasets.Value("string"),
84
+ "text": datasets.Value("string"),
85
+ "labels": [datasets.Value("string")],
86
+ }
87
+ )
88
+
89
+ text2text_features = datasets.Features(
90
+ {
91
+ "id": datasets.Value("string"),
92
+ "document_id": datasets.Value("string"),
93
+ "text_1": datasets.Value("string"),
94
+ "text_2": datasets.Value("string"),
95
+ "text_1_name": datasets.Value("string"),
96
+ "text_2_name": datasets.Value("string"),
97
+ }
98
+ )
99
+
100
+ kb_features = datasets.Features(
101
+ {
102
+ "id": datasets.Value("string"),
103
+ "document_id": datasets.Value("string"),
104
+ "passages": [
105
+ {
106
+ "id": datasets.Value("string"),
107
+ "type": datasets.Value("string"),
108
+ "text": datasets.Sequence(datasets.Value("string")),
109
+ "offsets": datasets.Sequence([datasets.Value("int32")]),
110
+ }
111
+ ],
112
+ "entities": [
113
+ {
114
+ "id": datasets.Value("string"),
115
+ "type": datasets.Value("string"),
116
+ "text": datasets.Sequence(datasets.Value("string")),
117
+ "offsets": datasets.Sequence([datasets.Value("int32")]),
118
+ "normalized": [
119
+ {
120
+ "db_name": datasets.Value("string"),
121
+ "db_id": datasets.Value("string"),
122
+ }
123
+ ],
124
+ }
125
+ ],
126
+ "events": [
127
+ {
128
+ "id": datasets.Value("string"),
129
+ "type": datasets.Value("string"),
130
+ # refers to the text_bound_annotation of the trigger
131
+ "trigger": {
132
+ "text": datasets.Sequence(datasets.Value("string")),
133
+ "offsets": datasets.Sequence([datasets.Value("int32")]),
134
+ },
135
+ "arguments": [
136
+ {
137
+ "role": datasets.Value("string"),
138
+ "ref_id": datasets.Value("string"),
139
+ }
140
+ ],
141
+ }
142
+ ],
143
+ "coreferences": [
144
+ {
145
+ "id": datasets.Value("string"),
146
+ "entity_ids": datasets.Sequence(datasets.Value("string")),
147
+ }
148
+ ],
149
+ "relations": [
150
+ {
151
+ "id": datasets.Value("string"),
152
+ "type": datasets.Value("string"),
153
+ "arg1_id": datasets.Value("string"),
154
+ "arg2_id": datasets.Value("string"),
155
+ "normalized": [
156
+ {
157
+ "db_name": datasets.Value("string"),
158
+ "db_id": datasets.Value("string"),
159
+ }
160
+ ],
161
+ }
162
+ ],
163
+ }
164
+ )
165
+
166
+
167
+ TASK_TO_SCHEMA = {
168
+ Tasks.NAMED_ENTITY_RECOGNITION.name: "KB",
169
+ Tasks.NAMED_ENTITY_DISAMBIGUATION.name: "KB",
170
+ Tasks.EVENT_EXTRACTION.name: "KB",
171
+ Tasks.RELATION_EXTRACTION.name: "KB",
172
+ Tasks.COREFERENCE_RESOLUTION.name: "KB",
173
+ Tasks.QUESTION_ANSWERING.name: "QA",
174
+ Tasks.TEXTUAL_ENTAILMENT.name: "TE",
175
+ Tasks.SEMANTIC_SIMILARITY.name: "PAIRS",
176
+ Tasks.TEXT_PAIRS_CLASSIFICATION.name: "PAIRS",
177
+ Tasks.PARAPHRASING.name: "T2T",
178
+ Tasks.TRANSLATION.name: "T2T",
179
+ Tasks.SUMMARIZATION.name: "T2T",
180
+ Tasks.TEXT_CLASSIFICATION.name: "TEXT",
181
+ }
182
+
183
+ SCHEMA_TO_TASKS = defaultdict(set)
184
+ for task, schema in TASK_TO_SCHEMA.items():
185
+ SCHEMA_TO_TASKS[schema].add(task)
186
+ SCHEMA_TO_TASKS = dict(SCHEMA_TO_TASKS)
187
+
188
+ VALID_TASKS = set(TASK_TO_SCHEMA.keys())
189
+ VALID_SCHEMAS = set(TASK_TO_SCHEMA.values())
190
+
191
+ SCHEMA_TO_FEATURES = {
192
+ "KB": kb_features,
193
+ "QA": qa_features,
194
+ "TE": entailment_features,
195
+ "T2T": text2text_features,
196
+ "TEXT": text_features,
197
+ "PAIRS": pairs_features,
198
+ }
199
+
200
+
201
+ def get_texts_and_offsets_from_bioc_ann(ann: "bioc.BioCAnnotation") -> Tuple:
202
+
203
+ offsets = [(loc.offset, loc.offset + loc.length) for loc in ann.locations]
204
+
205
+ text = ann.text
206
+
207
+ if len(offsets) > 1:
208
+ i = 0
209
+ texts = []
210
+ for start, end in offsets:
211
+ chunk_len = end - start
212
+ texts.append(text[i : chunk_len + i])
213
+ i += chunk_len
214
+ while i < len(text) and text[i] == " ":
215
+ i += 1
216
+ else:
217
+ texts = [text]
218
+
219
+ return offsets, texts
220
+
221
+
222
+ def remove_prefix(a: str, prefix: str) -> str:
223
+ if a.startswith(prefix):
224
+ a = a[len(prefix) :]
225
+ return a
226
+
227
+
228
+ def parse_brat_file(
229
+ txt_file: Path,
230
+ annotation_file_suffixes: List[str] = None,
231
+ parse_notes: bool = False,
232
+ ) -> Dict:
233
+ """
234
+ Parse a brat file into the schema defined below.
235
+ `txt_file` should be the path to the brat '.txt' file you want to parse, e.g. 'data/1234.txt'
236
+ Assumes that the annotations are contained in one or more of the corresponding '.a1', '.a2' or '.ann' files,
237
+ e.g. 'data/1234.ann' or 'data/1234.a1' and 'data/1234.a2'.
238
+ Will include annotator notes, when `parse_notes == True`.
239
+ brat_features = datasets.Features(
240
+ {
241
+ "id": datasets.Value("string"),
242
+ "document_id": datasets.Value("string"),
243
+ "text": datasets.Value("string"),
244
+ "text_bound_annotations": [ # T line in brat, e.g. type or event trigger
245
+ {
246
+ "offsets": datasets.Sequence([datasets.Value("int32")]),
247
+ "text": datasets.Sequence(datasets.Value("string")),
248
+ "type": datasets.Value("string"),
249
+ "id": datasets.Value("string"),
250
+ }
251
+ ],
252
+ "events": [ # E line in brat
253
+ {
254
+ "trigger": datasets.Value(
255
+ "string"
256
+ ), # refers to the text_bound_annotation of the trigger,
257
+ "id": datasets.Value("string"),
258
+ "type": datasets.Value("string"),
259
+ "arguments": datasets.Sequence(
260
+ {
261
+ "role": datasets.Value("string"),
262
+ "ref_id": datasets.Value("string"),
263
+ }
264
+ ),
265
+ }
266
+ ],
267
+ "relations": [ # R line in brat
268
+ {
269
+ "id": datasets.Value("string"),
270
+ "head": {
271
+ "ref_id": datasets.Value("string"),
272
+ "role": datasets.Value("string"),
273
+ },
274
+ "tail": {
275
+ "ref_id": datasets.Value("string"),
276
+ "role": datasets.Value("string"),
277
+ },
278
+ "type": datasets.Value("string"),
279
+ }
280
+ ],
281
+ "equivalences": [ # Equiv line in brat
282
+ {
283
+ "id": datasets.Value("string"),
284
+ "ref_ids": datasets.Sequence(datasets.Value("string")),
285
+ }
286
+ ],
287
+ "attributes": [ # M or A lines in brat
288
+ {
289
+ "id": datasets.Value("string"),
290
+ "type": datasets.Value("string"),
291
+ "ref_id": datasets.Value("string"),
292
+ "value": datasets.Value("string"),
293
+ }
294
+ ],
295
+ "normalizations": [ # N lines in brat
296
+ {
297
+ "id": datasets.Value("string"),
298
+ "type": datasets.Value("string"),
299
+ "ref_id": datasets.Value("string"),
300
+ "resource_name": datasets.Value(
301
+ "string"
302
+ ), # Name of the resource, e.g. "Wikipedia"
303
+ "cuid": datasets.Value(
304
+ "string"
305
+ ), # ID in the resource, e.g. 534366
306
+ "text": datasets.Value(
307
+ "string"
308
+ ), # Human readable description/name of the entity, e.g. "Barack Obama"
309
+ }
310
+ ],
311
+ ### OPTIONAL: Only included when `parse_notes == True`
312
+ "notes": [ # # lines in brat
313
+ {
314
+ "id": datasets.Value("string"),
315
+ "type": datasets.Value("string"),
316
+ "ref_id": datasets.Value("string"),
317
+ "text": datasets.Value("string"),
318
+ }
319
+ ],
320
+ },
321
+ )
322
+ """
323
+
324
+ example = {}
325
+ example["document_id"] = txt_file.with_suffix("").name
326
+ with txt_file.open() as f:
327
+ example["text"] = f.read()
328
+
329
+ # If no specific suffixes of the to-be-read annotation files are given - take standard suffixes
330
+ # for event extraction
331
+ if annotation_file_suffixes is None:
332
+ annotation_file_suffixes = [".a1", ".a2", ".ann"]
333
+
334
+ if len(annotation_file_suffixes) == 0:
335
+ raise AssertionError(
336
+ "At least one suffix for the to-be-read annotation files should be given!"
337
+ )
338
+
339
+ ann_lines = []
340
+ for suffix in annotation_file_suffixes:
341
+ annotation_file = txt_file.with_suffix(suffix)
342
+ if annotation_file.exists():
343
+ with annotation_file.open() as f:
344
+ ann_lines.extend(f.readlines())
345
+
346
+ example["text_bound_annotations"] = []
347
+ example["events"] = []
348
+ example["relations"] = []
349
+ example["equivalences"] = []
350
+ example["attributes"] = []
351
+ example["normalizations"] = []
352
+
353
+ if parse_notes:
354
+ example["notes"] = []
355
+
356
+ for line in ann_lines:
357
+ line = line.strip()
358
+ if not line:
359
+ continue
360
+
361
+ if line.startswith("T"): # Text bound
362
+ ann = {}
363
+ fields = re.split(r"\t+", line)
364
+
365
+ ann["id"] = fields[0]
366
+ ann["type"] = fields[1].split()[0]
367
+ ann["offsets"] = []
368
+ span_str = remove_prefix(fields[1], (ann["type"] + " "))
369
+ text = fields[2]
370
+ for span in span_str.split(";"):
371
+ start, end = span.split()
372
+ ann["offsets"].append([int(start), int(end)])
373
+
374
+ # Heuristically split text of discontiguous entities into chunks
375
+ ann["text"] = []
376
+ if len(ann["offsets"]) > 1:
377
+ i = 0
378
+ for start, end in ann["offsets"]:
379
+ chunk_len = end - start
380
+ ann["text"].append(text[i : chunk_len + i])
381
+ i += chunk_len
382
+ while i < len(text) and text[i] == " ":
383
+ i += 1
384
+ else:
385
+ ann["text"] = [text]
386
+
387
+ example["text_bound_annotations"].append(ann)
388
+
389
+ elif line.startswith("E"):
390
+ ann = {}
391
+ fields = re.split(r"\t+", line)
392
+
393
+ ann["id"] = fields[0]
394
+
395
+ ann["type"], ann["trigger"] = fields[1].split()[0].split(":")
396
+
397
+ ann["arguments"] = []
398
+ for role_ref_id in fields[1].split()[1:]:
399
+ argument = {
400
+ "role": (role_ref_id.split(":"))[0],
401
+ "ref_id": (role_ref_id.split(":"))[1],
402
+ }
403
+ ann["arguments"].append(argument)
404
+
405
+ example["events"].append(ann)
406
+
407
+ elif line.startswith("R"):
408
+ ann = {}
409
+ fields = re.split(r"\t+", line)
410
+
411
+ ann["id"] = fields[0]
412
+ ann["type"] = fields[1].split()[0]
413
+
414
+ ann["head"] = {
415
+ "role": fields[1].split()[1].split(":")[0],
416
+ "ref_id": fields[1].split()[1].split(":")[1],
417
+ }
418
+ ann["tail"] = {
419
+ "role": fields[1].split()[2].split(":")[0],
420
+ "ref_id": fields[1].split()[2].split(":")[1],
421
+ }
422
+
423
+ example["relations"].append(ann)
424
+
425
+ # '*' seems to be the legacy way to mark equivalences,
426
+ # but I couldn't find any info on the current way
427
+ # this might have to be adapted dependent on the brat version
428
+ # of the annotation
429
+ elif line.startswith("*"):
430
+ ann = {}
431
+ fields = re.split(r"\t+", line)
432
+
433
+ ann["id"] = fields[0]
434
+ ann["ref_ids"] = fields[1].split()[1:]
435
+
436
+ example["equivalences"].append(ann)
437
+
438
+ elif line.startswith("A") or line.startswith("M"):
439
+ ann = {}
440
+ fields = re.split(r"\t+", line)
441
+
442
+ ann["id"] = fields[0]
443
+
444
+ info = fields[1].split()
445
+ ann["type"] = info[0]
446
+ ann["ref_id"] = info[1]
447
+
448
+ if len(info) > 2:
449
+ ann["value"] = info[2]
450
+ else:
451
+ ann["value"] = ""
452
+
453
+ example["attributes"].append(ann)
454
+
455
+ elif line.startswith("N"):
456
+ ann = {}
457
+ fields = re.split(r"\t+", line)
458
+
459
+ ann["id"] = fields[0]
460
+ ann["text"] = fields[2] if len(fields) == 3 else BigBioValues.NULL
461
+
462
+ info = fields[1].split()
463
+
464
+ ann["type"] = info[0]
465
+ ann["ref_id"] = info[1]
466
+ ann["resource_name"] = info[2].split(":")[0]
467
+ ann["cuid"] = info[2].split(":")[1]
468
+ example["normalizations"].append(ann)
469
+
470
+ elif parse_notes and line.startswith("#"):
471
+ ann = {}
472
+ fields = re.split(r"\t+", line)
473
+
474
+ ann["id"] = fields[0]
475
+ ann["text"] = fields[2] if len(fields) == 3 else BigBioValues.NULL
476
+
477
+ info = fields[1].split()
478
+
479
+ ann["type"] = info[0]
480
+ ann["ref_id"] = info[1]
481
+ example["notes"].append(ann)
482
+
483
+ return example
484
+
485
+
486
+ def brat_parse_to_bigbio_kb(brat_parse: Dict) -> Dict:
487
+ """
488
+ Transform a brat parse (conforming to the standard brat schema) obtained with
489
+ `parse_brat_file` into a dictionary conforming to the `bigbio-kb` schema (as defined in ../schemas/kb.py)
490
+ :param brat_parse:
491
+ """
492
+
493
+ unified_example = {}
494
+
495
+ # Prefix all ids with document id to ensure global uniqueness,
496
+ # because brat ids are only unique within their document
497
+ id_prefix = brat_parse["document_id"] + "_"
498
+
499
+ # identical
500
+ unified_example["document_id"] = brat_parse["document_id"]
501
+ unified_example["passages"] = [
502
+ {
503
+ "id": id_prefix + "_text",
504
+ "type": "abstract",
505
+ "text": [brat_parse["text"]],
506
+ "offsets": [[0, len(brat_parse["text"])]],
507
+ }
508
+ ]
509
+
510
+ # get normalizations
511
+ ref_id_to_normalizations = defaultdict(list)
512
+ for normalization in brat_parse["normalizations"]:
513
+ ref_id_to_normalizations[normalization["ref_id"]].append(
514
+ {
515
+ "db_name": normalization["resource_name"],
516
+ "db_id": normalization["cuid"],
517
+ }
518
+ )
519
+
520
+ # separate entities and event triggers
521
+ unified_example["events"] = []
522
+ non_event_ann = brat_parse["text_bound_annotations"].copy()
523
+ for event in brat_parse["events"]:
524
+ event = event.copy()
525
+ event["id"] = id_prefix + event["id"]
526
+ trigger = next(
527
+ tr
528
+ for tr in brat_parse["text_bound_annotations"]
529
+ if tr["id"] == event["trigger"]
530
+ )
531
+ if trigger in non_event_ann:
532
+ non_event_ann.remove(trigger)
533
+ event["trigger"] = {
534
+ "text": trigger["text"].copy(),
535
+ "offsets": trigger["offsets"].copy(),
536
+ }
537
+ for argument in event["arguments"]:
538
+ argument["ref_id"] = id_prefix + argument["ref_id"]
539
+
540
+ unified_example["events"].append(event)
541
+
542
+ unified_example["entities"] = []
543
+ anno_ids = [ref_id["id"] for ref_id in non_event_ann]
544
+ for ann in non_event_ann:
545
+ entity_ann = ann.copy()
546
+ entity_ann["id"] = id_prefix + entity_ann["id"]
547
+ entity_ann["normalized"] = ref_id_to_normalizations[ann["id"]]
548
+ unified_example["entities"].append(entity_ann)
549
+
550
+ # massage relations
551
+ unified_example["relations"] = []
552
+ skipped_relations = set()
553
+ for ann in brat_parse["relations"]:
554
+ if (
555
+ ann["head"]["ref_id"] not in anno_ids
556
+ or ann["tail"]["ref_id"] not in anno_ids
557
+ ):
558
+ skipped_relations.add(ann["id"])
559
+ continue
560
+ unified_example["relations"].append(
561
+ {
562
+ "arg1_id": id_prefix + ann["head"]["ref_id"],
563
+ "arg2_id": id_prefix + ann["tail"]["ref_id"],
564
+ "id": id_prefix + ann["id"],
565
+ "type": ann["type"],
566
+ "normalized": [],
567
+ }
568
+ )
569
+ if len(skipped_relations) > 0:
570
+ example_id = brat_parse["document_id"]
571
+ logger.info(
572
+ f"Example:{example_id}: The `bigbio_kb` schema allows `relations` only between entities."
573
+ f" Skip (for now): "
574
+ f"{list(skipped_relations)}"
575
+ )
576
+
577
+ # get coreferences
578
+ unified_example["coreferences"] = []
579
+ for i, ann in enumerate(brat_parse["equivalences"], start=1):
580
+ is_entity_cluster = True
581
+ for ref_id in ann["ref_ids"]:
582
+ if not ref_id.startswith("T"): # not textbound -> no entity
583
+ is_entity_cluster = False
584
+ elif ref_id not in anno_ids: # event trigger -> no entity
585
+ is_entity_cluster = False
586
+ if is_entity_cluster:
587
+ entity_ids = [id_prefix + i for i in ann["ref_ids"]]
588
+ unified_example["coreferences"].append(
589
+ {"id": id_prefix + str(i), "entity_ids": entity_ids}
590
+ )
591
+ return unified_example
coneco.py ADDED
@@ -0,0 +1,209 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ """
17
+ A dataset loading script for the Complex Named Entity Corpus (CoNECo.)
18
+
19
+ CoNECo is an annotated corpus for NER and NEN of protein-containing complexes. \
20
+ CoNECo comprises 1,621 documents with 2,052 entities, 1,976 of which are normalized \
21
+ to Gene Ontology. We divided the corpus into training, development, and test sets.
22
+ """
23
+
24
+ from pathlib import Path
25
+ from typing import Dict, List, Tuple
26
+
27
+ import datasets
28
+
29
+ from .bigbiohub import (BigBioConfig, Tasks, brat_parse_to_bigbio_kb,
30
+ kb_features, parse_brat_file)
31
+
32
+ _LANGUAGES = ["English"]
33
+ _PUBMED = False
34
+ _LOCAL = False
35
+ _CITATION = """\
36
+ @article{10.1093/bioadv/vbae116,
37
+ author = {Nastou, Katerina and Koutrouli, Mikaela and Pyysalo, Sampo and Jensen, Lars Juhl},
38
+ title = "{CoNECo: A Corpus for Named Entity Recognition and Normalization of Protein Complexes}",
39
+ journal = {Bioinformatics Advances},
40
+ pages = {vbae116},
41
+ year = {2024},
42
+ month = {08},
43
+ abstract = "{Despite significant progress in biomedical information extraction, there is a lack of resources \
44
+ for Named Entity Recognition (NER) and Normalization (NEN) of protein-containing complexes. Current resources \
45
+ inadequately address the recognition of protein-containing complex names across different organisms, underscoring \
46
+ the crucial need for a dedicated corpus.We introduce the Complex Named Entity Corpus (CoNECo), an annotated \
47
+ corpus for NER and NEN of complexes. CoNECo comprises 1,621 documents with 2,052 entities, 1,976 of which are \
48
+ normalized to Gene Ontology. We divided the corpus into training, development, and test sets and trained both a \
49
+ transformer-based and dictionary-based tagger on them. Evaluation on the test set demonstrated robust performance, \
50
+ with F-scores of 73.7\\% and 61.2\\%, respectively. Subsequently, we applied the best taggers for comprehensive \
51
+ tagging of the entire openly accessible biomedical literature.All resources, including the annotated corpus, \
52
+ training data, and code, are available to the community through Zenodo https://zenodo.org/records/11263147 and \
53
+ GitHub https://zenodo.org/records/10693653.}",
54
+ issn = {2635-0041},
55
+ doi = {10.1093/bioadv/vbae116},
56
+ url = {https://doi.org/10.1093/bioadv/vbae116},
57
+ eprint = {https://academic.oup.com/bioinformaticsadvances/advance-article-pdf/doi/10.1093/bioadv/vbae116/\
58
+ 58869902/vbae116.pdf},
59
+ }
60
+ """
61
+
62
+ _DATASETNAME = "coneco"
63
+ _DISPLAYNAME = "CoNECo"
64
+
65
+ _DESCRIPTION = """\
66
+ Complex Named Entity Corpus (CoNECo) is an annotated corpus for NER and NEN of protein-containing complexes. \
67
+ CoNECo comprises 1,621 documents with 2,052 entities, 1,976 of which are normalized to Gene Ontology. We \
68
+ divided the corpus into training, development, and test sets.
69
+ """
70
+
71
+ _HOMEPAGE = "https://zenodo.org/records/11263147"
72
+
73
+ _LICENSE = "CC_BY_4p0"
74
+
75
+ _URLS = {
76
+ _DATASETNAME: "https://zenodo.org/records/11263147/files/CoNECo_corpus.tar.gz?download=1",
77
+ }
78
+
79
+ _SUPPORTED_TASKS = [
80
+ Tasks.NAMED_ENTITY_RECOGNITION,
81
+ Tasks.NAMED_ENTITY_DISAMBIGUATION,
82
+ ]
83
+
84
+ _SOURCE_VERSION = "2.0.0"
85
+
86
+ _BIGBIO_VERSION = "1.0.0"
87
+
88
+
89
+ class ConecoDataset(datasets.GeneratorBasedBuilder):
90
+ """TODO: Short description of my dataset."""
91
+
92
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
93
+ BIGBIO_VERSION = datasets.Version(_BIGBIO_VERSION)
94
+
95
+ BUILDER_CONFIGS = [
96
+ BigBioConfig(
97
+ name="coneco_source",
98
+ version=SOURCE_VERSION,
99
+ description="coneco source schema",
100
+ schema="source",
101
+ subset_id="coneco",
102
+ ),
103
+ BigBioConfig(
104
+ name="coneco_bigbio_kb",
105
+ version=BIGBIO_VERSION,
106
+ description="coneco BigBio schema",
107
+ schema="bigbio_kb",
108
+ subset_id="coneco",
109
+ ),
110
+ ]
111
+
112
+ DEFAULT_CONFIG_NAME = "coneco_source"
113
+
114
+ def _info(self) -> datasets.DatasetInfo:
115
+ if self.config.schema == "source":
116
+ features = datasets.Features(
117
+ {
118
+ "id": datasets.Value("string"),
119
+ "document_id": datasets.Value("string"),
120
+ "text": datasets.Value("string"),
121
+ "text_bound_annotations": [ # T line in brat, i.e. entities for NER task
122
+ {
123
+ "offsets": datasets.Sequence([datasets.Value("int32")]),
124
+ "text": datasets.Sequence(datasets.Value("string")),
125
+ "type": datasets.Value("string"),
126
+ "id": datasets.Value("string"),
127
+ }
128
+ ],
129
+ "normalizations": [ # N lines in brat, i.e. normalization for NEN task
130
+ {
131
+ "id": datasets.Value("string"),
132
+ "type": datasets.Value("string"),
133
+ "ref_id": datasets.Value("string"),
134
+ "resource_name": datasets.Value("string"),
135
+ "cuid": datasets.Value("string"),
136
+ "text": datasets.Value("string"),
137
+ }
138
+ ],
139
+ }
140
+ )
141
+ elif self.config.schema == "bigbio_kb":
142
+ features = kb_features
143
+
144
+ return datasets.DatasetInfo(
145
+ description=_DESCRIPTION,
146
+ features=features,
147
+ homepage=_HOMEPAGE,
148
+ license=str(_LICENSE),
149
+ citation=_CITATION,
150
+ )
151
+
152
+ def _split_generators(self, dl_manager) -> List[datasets.SplitGenerator]:
153
+ """Returns SplitGenerators."""
154
+
155
+ urls = _URLS[_DATASETNAME]
156
+ data_dir = Path(dl_manager.download_and_extract(urls))
157
+
158
+ return [
159
+ datasets.SplitGenerator(
160
+ name=datasets.Split.TRAIN,
161
+ # Whatever you put in gen_kwargs will be passed to _generate_examples
162
+ gen_kwargs={
163
+ "filepath": data_dir / "train",
164
+ "split": "train",
165
+ },
166
+ ),
167
+ datasets.SplitGenerator(
168
+ name=datasets.Split.TEST,
169
+ gen_kwargs={
170
+ "filepath": data_dir / "test",
171
+ "split": "test",
172
+ },
173
+ ),
174
+ datasets.SplitGenerator(
175
+ name=datasets.Split.VALIDATION,
176
+ gen_kwargs={
177
+ "filepath": data_dir / "dev",
178
+ "split": "dev",
179
+ },
180
+ ),
181
+ ]
182
+
183
+ def _filter_oos_entities(self, brat_parse):
184
+ """Filter out entity annotations with out-of-scope type."""
185
+ brat_parse["text_bound_annotations"] = [a for a in brat_parse["text_bound_annotations"] if a["type"] != "OOS"]
186
+ return brat_parse
187
+
188
+ def _generate_examples(self, filepath, split: str) -> Tuple[int, Dict]:
189
+ """Yields examples as (key, example) tuples."""
190
+ if self.config.schema == "source":
191
+ for file in sorted(filepath.iterdir()):
192
+ if file.suffix != ".txt":
193
+ continue
194
+ brat_parsed = parse_brat_file(file)
195
+ brat_parsed = self._filter_oos_entities(brat_parsed)
196
+ brat_parsed["id"] = file.stem
197
+
198
+ yield brat_parsed["document_id"], brat_parsed
199
+
200
+ elif self.config.schema == "bigbio_kb":
201
+ for file in sorted(filepath.iterdir()):
202
+ if file.suffix != ".txt":
203
+ continue
204
+ brat_parsed = parse_brat_file(file)
205
+ brat_parsed = self._filter_oos_entities(brat_parsed)
206
+ bigbio_kb_example = brat_parse_to_bigbio_kb(brat_parsed)
207
+ bigbio_kb_example["id"] = file.stem
208
+
209
+ yield bigbio_kb_example["id"], bigbio_kb_example