masaenger commited on
Commit
651a49b
1 Parent(s): 32e7508

Update anat_em based on git version 06790f1

Browse files
Files changed (3) hide show
  1. README.md +47 -0
  2. anat_em.py +230 -0
  3. bigbiohub.py +592 -0
README.md ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ ---
3
+ language:
4
+ - en
5
+ bigbio_language:
6
+ - English
7
+ license: cc-by-sa-3.0
8
+ multilinguality: monolingual
9
+ bigbio_license_shortname: CC_BY_SA_3p0
10
+ pretty_name: AnatEM
11
+ homepage: http://nactem.ac.uk/anatomytagger/#AnatEM
12
+ bigbio_pubmed: True
13
+ bigbio_public: True
14
+ bigbio_tasks:
15
+ - NAMED_ENTITY_RECOGNITION
16
+ ---
17
+
18
+
19
+ # Dataset Card for AnatEM
20
+
21
+ ## Dataset Description
22
+
23
+ - **Homepage:** http://nactem.ac.uk/anatomytagger/#AnatEM
24
+ - **Pubmed:** True
25
+ - **Public:** True
26
+ - **Tasks:** NER
27
+
28
+
29
+ The extended Anatomical Entity Mention corpus (AnatEM) consists of 1212 documents (approx. 250,000 words) manually annotated to identify over 13,000 mentions of anatomical entities. Each annotation is assigned one of 12 granularity-based types such as Cellular component, Tissue and Organ, defined with reference to the Common Anatomy Reference Ontology.
30
+
31
+
32
+
33
+ ## Citation Information
34
+
35
+ ```
36
+ @article{pyysalo2014anatomical,
37
+ title={Anatomical entity mention recognition at literature scale},
38
+ author={Pyysalo, Sampo and Ananiadou, Sophia},
39
+ journal={Bioinformatics},
40
+ volume={30},
41
+ number={6},
42
+ pages={868--875},
43
+ year={2014},
44
+ publisher={Oxford University Press}
45
+ }
46
+
47
+ ```
anat_em.py ADDED
@@ -0,0 +1,230 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """
16
+ The extended Anatomical Entity Mention corpus (AnatEM) consists of 1212 documents
17
+ (approx. 250,000 words) manually annotated to identify over 13,000 mentions of anatomical
18
+ entities. Each annotation is assigned one of 12 granularity-based types such as Cellular
19
+ component, Tissue and Organ, defined with reference to the Common Anatomy Reference Ontology
20
+ (see https://bioportal.bioontology.org/ontologies/CARO).
21
+ """
22
+ from pathlib import Path
23
+ from typing import Dict, Iterator, Tuple
24
+
25
+ import datasets
26
+
27
+ from .bigbiohub import kb_features
28
+ from .bigbiohub import BigBioConfig
29
+ from .bigbiohub import Tasks
30
+ from .bigbiohub import parse_brat_file
31
+ from .bigbiohub import brat_parse_to_bigbio_kb
32
+
33
+
34
+ _LANGUAGES = ['English']
35
+ _PUBMED = True
36
+ _LOCAL = False
37
+ _CITATION = """\
38
+ @article{pyysalo2014anatomical,
39
+ title={Anatomical entity mention recognition at literature scale},
40
+ author={Pyysalo, Sampo and Ananiadou, Sophia},
41
+ journal={Bioinformatics},
42
+ volume={30},
43
+ number={6},
44
+ pages={868--875},
45
+ year={2014},
46
+ publisher={Oxford University Press}
47
+ }
48
+ """
49
+
50
+ _DATASETNAME = "anat_em"
51
+ _DISPLAYNAME = "AnatEM"
52
+
53
+
54
+ _DESCRIPTION = """\
55
+ The extended Anatomical Entity Mention corpus (AnatEM) consists of 1212 \
56
+ documents (approx. 250,000 words) manually annotated to identify over 13,000 \
57
+ mentions of anatomical entities. Each annotation is assigned one of 12 \
58
+ granularity-based types such as Cellular component, Tissue and Organ, defined \
59
+ with reference to the Common Anatomy Reference Ontology.
60
+ """
61
+
62
+ _HOMEPAGE = "http://nactem.ac.uk/anatomytagger/#AnatEM"
63
+
64
+ _LICENSE = 'Creative Commons Attribution Share Alike 3.0 Unported'
65
+
66
+ _URLS = {_DATASETNAME: "http://nactem.ac.uk/anatomytagger/AnatEM-1.0.2.tar.gz"}
67
+
68
+ _SUPPORTED_TASKS = [Tasks.NAMED_ENTITY_RECOGNITION]
69
+
70
+ _SOURCE_VERSION = "1.0.2"
71
+ _BIGBIO_VERSION = "1.0.0"
72
+
73
+
74
+ class AnatEMDataset(datasets.GeneratorBasedBuilder):
75
+ """The extended Anatomical Entity Mention corpus (AnatEM)"""
76
+
77
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
78
+ BIGBIO_VERSION = datasets.Version(_BIGBIO_VERSION)
79
+
80
+ BUILDER_CONFIGS = [
81
+ BigBioConfig(
82
+ name="anat_em_source",
83
+ version=SOURCE_VERSION,
84
+ description="AnatEM source schema",
85
+ schema="source",
86
+ subset_id="anat_em",
87
+ ),
88
+ BigBioConfig(
89
+ name="anat_em_bigbio_kb",
90
+ version=BIGBIO_VERSION,
91
+ description="AnatEM BigBio schema",
92
+ schema="bigbio_kb",
93
+ subset_id="anat_em",
94
+ ),
95
+ ]
96
+
97
+ DEFAULT_CONFIG_NAME = "anat_em_source"
98
+
99
+ def _info(self):
100
+ if self.config.schema == "source":
101
+ features = datasets.Features(
102
+ {
103
+ "document_id": datasets.Value("string"),
104
+ "document_type": datasets.Value("string"), # Either PMC or PM
105
+ "text": datasets.Value("string"),
106
+ "text_type": datasets.Value(
107
+ "string"
108
+ ), # Either abstract (for PM) or sec / caption (for PMC)
109
+ "entities": [
110
+ {
111
+ "entity_id": datasets.Value("string"),
112
+ "type": datasets.Value("string"),
113
+ "offsets": datasets.Sequence([datasets.Value("int32")]),
114
+ "text": datasets.Sequence(datasets.Value("string")),
115
+ }
116
+ ],
117
+ }
118
+ )
119
+
120
+ elif self.config.schema == "bigbio_kb":
121
+ features = kb_features
122
+
123
+ return datasets.DatasetInfo(
124
+ description=_DESCRIPTION,
125
+ features=features,
126
+ homepage=_HOMEPAGE,
127
+ license=str(_LICENSE),
128
+ citation=_CITATION,
129
+ )
130
+
131
+ def _split_generators(self, dl_manager):
132
+ urls = _URLS[_DATASETNAME]
133
+ data_dir = Path(dl_manager.download_and_extract(urls))
134
+
135
+ standoff_dir = data_dir / "AnatEM-1.0.2" / "standoff"
136
+
137
+ return [
138
+ datasets.SplitGenerator(
139
+ name=datasets.Split.TRAIN,
140
+ gen_kwargs={"split_dir": standoff_dir / "train"},
141
+ ),
142
+ datasets.SplitGenerator(
143
+ name=datasets.Split.TEST,
144
+ gen_kwargs={"split_dir": standoff_dir / "test"},
145
+ ),
146
+ datasets.SplitGenerator(
147
+ name=datasets.Split.VALIDATION,
148
+ gen_kwargs={"split_dir": standoff_dir / "devel"},
149
+ ),
150
+ ]
151
+
152
+ def _generate_examples(self, split_dir: Path) -> Iterator[Tuple[str, Dict]]:
153
+ if self.config.name == "anat_em_source":
154
+ for file in split_dir.iterdir():
155
+ # Ignore hidden files and annotation files - we just consider the brat text files
156
+ if file.name.startswith("._") or file.name.endswith(".ann"):
157
+ continue
158
+
159
+ # Read brat annotations for the given text file and convert example to the source format
160
+ brat_example = parse_brat_file(file)
161
+ source_example = self._to_source_example(file, brat_example)
162
+
163
+ yield source_example["document_id"], source_example
164
+
165
+ elif self.config.name == "anat_em_bigbio_kb":
166
+ for file in split_dir.iterdir():
167
+ # Ignore hidden files and annotation files - we just consider the brat text files
168
+ if file.name.startswith("._") or file.name.endswith(".ann"):
169
+ continue
170
+
171
+ # Read brat annotations for the given text file and convert example to the BigBio-KB format
172
+ brat_example = parse_brat_file(file)
173
+ kb_example = brat_parse_to_bigbio_kb(brat_example)
174
+ kb_example["id"] = kb_example["document_id"]
175
+
176
+ # Fix text type annotation for the converted example
177
+ _, text_type = self.get_document_type_and_text_type(file)
178
+ kb_example["passages"][0]["type"] = text_type
179
+
180
+ yield kb_example["id"], kb_example
181
+
182
+ def _to_source_example(self, input_file: Path, brat_example: Dict) -> Dict:
183
+ """
184
+ Converts an example extracted using the default brat parsing logic to the source format
185
+ of the given corpus.
186
+ """
187
+ document_type, text_type = self.get_document_type_and_text_type(input_file)
188
+
189
+ source_example = {
190
+ "document_id": brat_example["document_id"],
191
+ "document_type": document_type,
192
+ "text": brat_example["text"],
193
+ "text_type": text_type,
194
+ }
195
+
196
+ id_prefix = brat_example["document_id"] + "_"
197
+
198
+ source_example["entities"] = []
199
+ for entity_annotation in brat_example["text_bound_annotations"]:
200
+ entity_ann = entity_annotation.copy()
201
+
202
+ entity_ann["entity_id"] = id_prefix + entity_ann["id"]
203
+ entity_ann.pop("id")
204
+
205
+ source_example["entities"].append(entity_ann)
206
+
207
+ return source_example
208
+
209
+ def get_document_type_and_text_type(self, input_file: Path) -> Tuple[str, str]:
210
+ """
211
+ Extracts the document type (PubMed(PM) or PubMedCentral (PMC)) and the respective
212
+ text type (abstract for PM and sec or caption for (PMC) from the name of the given
213
+ file, e.g.:
214
+
215
+ PMID-9778569.txt -> ("PM", "abstract")
216
+
217
+ PMC-1274342-sec-02.txt -> ("PMC", "sec")
218
+
219
+ PMC-1592597-caption-02.ann -> ("PMC", "caption")
220
+
221
+ """
222
+ name_parts = str(input_file.stem).split("-")
223
+
224
+ if name_parts[0] == "PMID":
225
+ return "PM", "abstract"
226
+
227
+ elif name_parts[0] == "PMC":
228
+ return "PMC", name_parts[2]
229
+ else:
230
+ raise AssertionError(f"Unexpected file prefix {name_parts[0]}")
bigbiohub.py ADDED
@@ -0,0 +1,592 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from collections import defaultdict
2
+ from dataclasses import dataclass
3
+ from enum import Enum
4
+ import logging
5
+ from pathlib import Path
6
+ from types import SimpleNamespace
7
+ from typing import TYPE_CHECKING, Dict, Iterable, List, Tuple
8
+
9
+ import datasets
10
+
11
+ if TYPE_CHECKING:
12
+ import bioc
13
+
14
+ logger = logging.getLogger(__name__)
15
+
16
+
17
+ BigBioValues = SimpleNamespace(NULL="<BB_NULL_STR>")
18
+
19
+
20
+ @dataclass
21
+ class BigBioConfig(datasets.BuilderConfig):
22
+ """BuilderConfig for BigBio."""
23
+
24
+ name: str = None
25
+ version: datasets.Version = None
26
+ description: str = None
27
+ schema: str = None
28
+ subset_id: str = None
29
+
30
+
31
+ class Tasks(Enum):
32
+ NAMED_ENTITY_RECOGNITION = "NER"
33
+ NAMED_ENTITY_DISAMBIGUATION = "NED"
34
+ EVENT_EXTRACTION = "EE"
35
+ RELATION_EXTRACTION = "RE"
36
+ COREFERENCE_RESOLUTION = "COREF"
37
+ QUESTION_ANSWERING = "QA"
38
+ TEXTUAL_ENTAILMENT = "TE"
39
+ SEMANTIC_SIMILARITY = "STS"
40
+ TEXT_PAIRS_CLASSIFICATION = "TXT2CLASS"
41
+ PARAPHRASING = "PARA"
42
+ TRANSLATION = "TRANSL"
43
+ SUMMARIZATION = "SUM"
44
+ TEXT_CLASSIFICATION = "TXTCLASS"
45
+
46
+
47
+ entailment_features = datasets.Features(
48
+ {
49
+ "id": datasets.Value("string"),
50
+ "premise": datasets.Value("string"),
51
+ "hypothesis": datasets.Value("string"),
52
+ "label": datasets.Value("string"),
53
+ }
54
+ )
55
+
56
+ pairs_features = datasets.Features(
57
+ {
58
+ "id": datasets.Value("string"),
59
+ "document_id": datasets.Value("string"),
60
+ "text_1": datasets.Value("string"),
61
+ "text_2": datasets.Value("string"),
62
+ "label": datasets.Value("string"),
63
+ }
64
+ )
65
+
66
+ qa_features = datasets.Features(
67
+ {
68
+ "id": datasets.Value("string"),
69
+ "question_id": datasets.Value("string"),
70
+ "document_id": datasets.Value("string"),
71
+ "question": datasets.Value("string"),
72
+ "type": datasets.Value("string"),
73
+ "choices": [datasets.Value("string")],
74
+ "context": datasets.Value("string"),
75
+ "answer": datasets.Sequence(datasets.Value("string")),
76
+ }
77
+ )
78
+
79
+ text_features = datasets.Features(
80
+ {
81
+ "id": datasets.Value("string"),
82
+ "document_id": datasets.Value("string"),
83
+ "text": datasets.Value("string"),
84
+ "labels": [datasets.Value("string")],
85
+ }
86
+ )
87
+
88
+ text2text_features = datasets.Features(
89
+ {
90
+ "id": datasets.Value("string"),
91
+ "document_id": datasets.Value("string"),
92
+ "text_1": datasets.Value("string"),
93
+ "text_2": datasets.Value("string"),
94
+ "text_1_name": datasets.Value("string"),
95
+ "text_2_name": datasets.Value("string"),
96
+ }
97
+ )
98
+
99
+ kb_features = datasets.Features(
100
+ {
101
+ "id": datasets.Value("string"),
102
+ "document_id": datasets.Value("string"),
103
+ "passages": [
104
+ {
105
+ "id": datasets.Value("string"),
106
+ "type": datasets.Value("string"),
107
+ "text": datasets.Sequence(datasets.Value("string")),
108
+ "offsets": datasets.Sequence([datasets.Value("int32")]),
109
+ }
110
+ ],
111
+ "entities": [
112
+ {
113
+ "id": datasets.Value("string"),
114
+ "type": datasets.Value("string"),
115
+ "text": datasets.Sequence(datasets.Value("string")),
116
+ "offsets": datasets.Sequence([datasets.Value("int32")]),
117
+ "normalized": [
118
+ {
119
+ "db_name": datasets.Value("string"),
120
+ "db_id": datasets.Value("string"),
121
+ }
122
+ ],
123
+ }
124
+ ],
125
+ "events": [
126
+ {
127
+ "id": datasets.Value("string"),
128
+ "type": datasets.Value("string"),
129
+ # refers to the text_bound_annotation of the trigger
130
+ "trigger": {
131
+ "text": datasets.Sequence(datasets.Value("string")),
132
+ "offsets": datasets.Sequence([datasets.Value("int32")]),
133
+ },
134
+ "arguments": [
135
+ {
136
+ "role": datasets.Value("string"),
137
+ "ref_id": datasets.Value("string"),
138
+ }
139
+ ],
140
+ }
141
+ ],
142
+ "coreferences": [
143
+ {
144
+ "id": datasets.Value("string"),
145
+ "entity_ids": datasets.Sequence(datasets.Value("string")),
146
+ }
147
+ ],
148
+ "relations": [
149
+ {
150
+ "id": datasets.Value("string"),
151
+ "type": datasets.Value("string"),
152
+ "arg1_id": datasets.Value("string"),
153
+ "arg2_id": datasets.Value("string"),
154
+ "normalized": [
155
+ {
156
+ "db_name": datasets.Value("string"),
157
+ "db_id": datasets.Value("string"),
158
+ }
159
+ ],
160
+ }
161
+ ],
162
+ }
163
+ )
164
+
165
+
166
+ TASK_TO_SCHEMA = {
167
+ Tasks.NAMED_ENTITY_RECOGNITION.name: "KB",
168
+ Tasks.NAMED_ENTITY_DISAMBIGUATION.name: "KB",
169
+ Tasks.EVENT_EXTRACTION.name: "KB",
170
+ Tasks.RELATION_EXTRACTION.name: "KB",
171
+ Tasks.COREFERENCE_RESOLUTION.name: "KB",
172
+ Tasks.QUESTION_ANSWERING.name: "QA",
173
+ Tasks.TEXTUAL_ENTAILMENT.name: "TE",
174
+ Tasks.SEMANTIC_SIMILARITY.name: "PAIRS",
175
+ Tasks.TEXT_PAIRS_CLASSIFICATION.name: "PAIRS",
176
+ Tasks.PARAPHRASING.name: "T2T",
177
+ Tasks.TRANSLATION.name: "T2T",
178
+ Tasks.SUMMARIZATION.name: "T2T",
179
+ Tasks.TEXT_CLASSIFICATION.name: "TEXT",
180
+ }
181
+
182
+ SCHEMA_TO_TASKS = defaultdict(set)
183
+ for task, schema in TASK_TO_SCHEMA.items():
184
+ SCHEMA_TO_TASKS[schema].add(task)
185
+ SCHEMA_TO_TASKS = dict(SCHEMA_TO_TASKS)
186
+
187
+ VALID_TASKS = set(TASK_TO_SCHEMA.keys())
188
+ VALID_SCHEMAS = set(TASK_TO_SCHEMA.values())
189
+
190
+ SCHEMA_TO_FEATURES = {
191
+ "KB": kb_features,
192
+ "QA": qa_features,
193
+ "TE": entailment_features,
194
+ "T2T": text2text_features,
195
+ "TEXT": text_features,
196
+ "PAIRS": pairs_features,
197
+ }
198
+
199
+
200
+ def get_texts_and_offsets_from_bioc_ann(ann: "bioc.BioCAnnotation") -> Tuple:
201
+
202
+ offsets = [(loc.offset, loc.offset + loc.length) for loc in ann.locations]
203
+
204
+ text = ann.text
205
+
206
+ if len(offsets) > 1:
207
+ i = 0
208
+ texts = []
209
+ for start, end in offsets:
210
+ chunk_len = end - start
211
+ texts.append(text[i : chunk_len + i])
212
+ i += chunk_len
213
+ while i < len(text) and text[i] == " ":
214
+ i += 1
215
+ else:
216
+ texts = [text]
217
+
218
+ return offsets, texts
219
+
220
+
221
+ def remove_prefix(a: str, prefix: str) -> str:
222
+ if a.startswith(prefix):
223
+ a = a[len(prefix) :]
224
+ return a
225
+
226
+
227
+ def parse_brat_file(
228
+ txt_file: Path,
229
+ annotation_file_suffixes: List[str] = None,
230
+ parse_notes: bool = False,
231
+ ) -> Dict:
232
+ """
233
+ Parse a brat file into the schema defined below.
234
+ `txt_file` should be the path to the brat '.txt' file you want to parse, e.g. 'data/1234.txt'
235
+ Assumes that the annotations are contained in one or more of the corresponding '.a1', '.a2' or '.ann' files,
236
+ e.g. 'data/1234.ann' or 'data/1234.a1' and 'data/1234.a2'.
237
+ Will include annotator notes, when `parse_notes == True`.
238
+ brat_features = datasets.Features(
239
+ {
240
+ "id": datasets.Value("string"),
241
+ "document_id": datasets.Value("string"),
242
+ "text": datasets.Value("string"),
243
+ "text_bound_annotations": [ # T line in brat, e.g. type or event trigger
244
+ {
245
+ "offsets": datasets.Sequence([datasets.Value("int32")]),
246
+ "text": datasets.Sequence(datasets.Value("string")),
247
+ "type": datasets.Value("string"),
248
+ "id": datasets.Value("string"),
249
+ }
250
+ ],
251
+ "events": [ # E line in brat
252
+ {
253
+ "trigger": datasets.Value(
254
+ "string"
255
+ ), # refers to the text_bound_annotation of the trigger,
256
+ "id": datasets.Value("string"),
257
+ "type": datasets.Value("string"),
258
+ "arguments": datasets.Sequence(
259
+ {
260
+ "role": datasets.Value("string"),
261
+ "ref_id": datasets.Value("string"),
262
+ }
263
+ ),
264
+ }
265
+ ],
266
+ "relations": [ # R line in brat
267
+ {
268
+ "id": datasets.Value("string"),
269
+ "head": {
270
+ "ref_id": datasets.Value("string"),
271
+ "role": datasets.Value("string"),
272
+ },
273
+ "tail": {
274
+ "ref_id": datasets.Value("string"),
275
+ "role": datasets.Value("string"),
276
+ },
277
+ "type": datasets.Value("string"),
278
+ }
279
+ ],
280
+ "equivalences": [ # Equiv line in brat
281
+ {
282
+ "id": datasets.Value("string"),
283
+ "ref_ids": datasets.Sequence(datasets.Value("string")),
284
+ }
285
+ ],
286
+ "attributes": [ # M or A lines in brat
287
+ {
288
+ "id": datasets.Value("string"),
289
+ "type": datasets.Value("string"),
290
+ "ref_id": datasets.Value("string"),
291
+ "value": datasets.Value("string"),
292
+ }
293
+ ],
294
+ "normalizations": [ # N lines in brat
295
+ {
296
+ "id": datasets.Value("string"),
297
+ "type": datasets.Value("string"),
298
+ "ref_id": datasets.Value("string"),
299
+ "resource_name": datasets.Value(
300
+ "string"
301
+ ), # Name of the resource, e.g. "Wikipedia"
302
+ "cuid": datasets.Value(
303
+ "string"
304
+ ), # ID in the resource, e.g. 534366
305
+ "text": datasets.Value(
306
+ "string"
307
+ ), # Human readable description/name of the entity, e.g. "Barack Obama"
308
+ }
309
+ ],
310
+ ### OPTIONAL: Only included when `parse_notes == True`
311
+ "notes": [ # # lines in brat
312
+ {
313
+ "id": datasets.Value("string"),
314
+ "type": datasets.Value("string"),
315
+ "ref_id": datasets.Value("string"),
316
+ "text": datasets.Value("string"),
317
+ }
318
+ ],
319
+ },
320
+ )
321
+ """
322
+
323
+ example = {}
324
+ example["document_id"] = txt_file.with_suffix("").name
325
+ with txt_file.open() as f:
326
+ example["text"] = f.read()
327
+
328
+ # If no specific suffixes of the to-be-read annotation files are given - take standard suffixes
329
+ # for event extraction
330
+ if annotation_file_suffixes is None:
331
+ annotation_file_suffixes = [".a1", ".a2", ".ann"]
332
+
333
+ if len(annotation_file_suffixes) == 0:
334
+ raise AssertionError(
335
+ "At least one suffix for the to-be-read annotation files should be given!"
336
+ )
337
+
338
+ ann_lines = []
339
+ for suffix in annotation_file_suffixes:
340
+ annotation_file = txt_file.with_suffix(suffix)
341
+ try:
342
+ with annotation_file.open() as f:
343
+ ann_lines.extend(f.readlines())
344
+ except Exception:
345
+ continue
346
+
347
+ example["text_bound_annotations"] = []
348
+ example["events"] = []
349
+ example["relations"] = []
350
+ example["equivalences"] = []
351
+ example["attributes"] = []
352
+ example["normalizations"] = []
353
+
354
+ if parse_notes:
355
+ example["notes"] = []
356
+
357
+ for line in ann_lines:
358
+ line = line.strip()
359
+ if not line:
360
+ continue
361
+
362
+ if line.startswith("T"): # Text bound
363
+ ann = {}
364
+ fields = line.split("\t")
365
+
366
+ ann["id"] = fields[0]
367
+ ann["type"] = fields[1].split()[0]
368
+ ann["offsets"] = []
369
+ span_str = remove_prefix(fields[1], (ann["type"] + " "))
370
+ text = fields[2]
371
+ for span in span_str.split(";"):
372
+ start, end = span.split()
373
+ ann["offsets"].append([int(start), int(end)])
374
+
375
+ # Heuristically split text of discontiguous entities into chunks
376
+ ann["text"] = []
377
+ if len(ann["offsets"]) > 1:
378
+ i = 0
379
+ for start, end in ann["offsets"]:
380
+ chunk_len = end - start
381
+ ann["text"].append(text[i : chunk_len + i])
382
+ i += chunk_len
383
+ while i < len(text) and text[i] == " ":
384
+ i += 1
385
+ else:
386
+ ann["text"] = [text]
387
+
388
+ example["text_bound_annotations"].append(ann)
389
+
390
+ elif line.startswith("E"):
391
+ ann = {}
392
+ fields = line.split("\t")
393
+
394
+ ann["id"] = fields[0]
395
+
396
+ ann["type"], ann["trigger"] = fields[1].split()[0].split(":")
397
+
398
+ ann["arguments"] = []
399
+ for role_ref_id in fields[1].split()[1:]:
400
+ argument = {
401
+ "role": (role_ref_id.split(":"))[0],
402
+ "ref_id": (role_ref_id.split(":"))[1],
403
+ }
404
+ ann["arguments"].append(argument)
405
+
406
+ example["events"].append(ann)
407
+
408
+ elif line.startswith("R"):
409
+ ann = {}
410
+ fields = line.split("\t")
411
+
412
+ ann["id"] = fields[0]
413
+ ann["type"] = fields[1].split()[0]
414
+
415
+ ann["head"] = {
416
+ "role": fields[1].split()[1].split(":")[0],
417
+ "ref_id": fields[1].split()[1].split(":")[1],
418
+ }
419
+ ann["tail"] = {
420
+ "role": fields[1].split()[2].split(":")[0],
421
+ "ref_id": fields[1].split()[2].split(":")[1],
422
+ }
423
+
424
+ example["relations"].append(ann)
425
+
426
+ # '*' seems to be the legacy way to mark equivalences,
427
+ # but I couldn't find any info on the current way
428
+ # this might have to be adapted dependent on the brat version
429
+ # of the annotation
430
+ elif line.startswith("*"):
431
+ ann = {}
432
+ fields = line.split("\t")
433
+
434
+ ann["id"] = fields[0]
435
+ ann["ref_ids"] = fields[1].split()[1:]
436
+
437
+ example["equivalences"].append(ann)
438
+
439
+ elif line.startswith("A") or line.startswith("M"):
440
+ ann = {}
441
+ fields = line.split("\t")
442
+
443
+ ann["id"] = fields[0]
444
+
445
+ info = fields[1].split()
446
+ ann["type"] = info[0]
447
+ ann["ref_id"] = info[1]
448
+
449
+ if len(info) > 2:
450
+ ann["value"] = info[2]
451
+ else:
452
+ ann["value"] = ""
453
+
454
+ example["attributes"].append(ann)
455
+
456
+ elif line.startswith("N"):
457
+ ann = {}
458
+ fields = line.split("\t")
459
+
460
+ ann["id"] = fields[0]
461
+ ann["text"] = fields[2]
462
+
463
+ info = fields[1].split()
464
+
465
+ ann["type"] = info[0]
466
+ ann["ref_id"] = info[1]
467
+ ann["resource_name"] = info[2].split(":")[0]
468
+ ann["cuid"] = info[2].split(":")[1]
469
+ example["normalizations"].append(ann)
470
+
471
+ elif parse_notes and line.startswith("#"):
472
+ ann = {}
473
+ fields = line.split("\t")
474
+
475
+ ann["id"] = fields[0]
476
+ ann["text"] = fields[2] if len(fields) == 3 else BigBioValues.NULL
477
+
478
+ info = fields[1].split()
479
+
480
+ ann["type"] = info[0]
481
+ ann["ref_id"] = info[1]
482
+ example["notes"].append(ann)
483
+
484
+ return example
485
+
486
+
487
+ def brat_parse_to_bigbio_kb(brat_parse: Dict) -> Dict:
488
+ """
489
+ Transform a brat parse (conforming to the standard brat schema) obtained with
490
+ `parse_brat_file` into a dictionary conforming to the `bigbio-kb` schema (as defined in ../schemas/kb.py)
491
+ :param brat_parse:
492
+ """
493
+
494
+ unified_example = {}
495
+
496
+ # Prefix all ids with document id to ensure global uniqueness,
497
+ # because brat ids are only unique within their document
498
+ id_prefix = brat_parse["document_id"] + "_"
499
+
500
+ # identical
501
+ unified_example["document_id"] = brat_parse["document_id"]
502
+ unified_example["passages"] = [
503
+ {
504
+ "id": id_prefix + "_text",
505
+ "type": "abstract",
506
+ "text": [brat_parse["text"]],
507
+ "offsets": [[0, len(brat_parse["text"])]],
508
+ }
509
+ ]
510
+
511
+ # get normalizations
512
+ ref_id_to_normalizations = defaultdict(list)
513
+ for normalization in brat_parse["normalizations"]:
514
+ ref_id_to_normalizations[normalization["ref_id"]].append(
515
+ {
516
+ "db_name": normalization["resource_name"],
517
+ "db_id": normalization["cuid"],
518
+ }
519
+ )
520
+
521
+ # separate entities and event triggers
522
+ unified_example["events"] = []
523
+ non_event_ann = brat_parse["text_bound_annotations"].copy()
524
+ for event in brat_parse["events"]:
525
+ event = event.copy()
526
+ event["id"] = id_prefix + event["id"]
527
+ trigger = next(
528
+ tr
529
+ for tr in brat_parse["text_bound_annotations"]
530
+ if tr["id"] == event["trigger"]
531
+ )
532
+ if trigger in non_event_ann:
533
+ non_event_ann.remove(trigger)
534
+ event["trigger"] = {
535
+ "text": trigger["text"].copy(),
536
+ "offsets": trigger["offsets"].copy(),
537
+ }
538
+ for argument in event["arguments"]:
539
+ argument["ref_id"] = id_prefix + argument["ref_id"]
540
+
541
+ unified_example["events"].append(event)
542
+
543
+ unified_example["entities"] = []
544
+ anno_ids = [ref_id["id"] for ref_id in non_event_ann]
545
+ for ann in non_event_ann:
546
+ entity_ann = ann.copy()
547
+ entity_ann["id"] = id_prefix + entity_ann["id"]
548
+ entity_ann["normalized"] = ref_id_to_normalizations[ann["id"]]
549
+ unified_example["entities"].append(entity_ann)
550
+
551
+ # massage relations
552
+ unified_example["relations"] = []
553
+ skipped_relations = set()
554
+ for ann in brat_parse["relations"]:
555
+ if (
556
+ ann["head"]["ref_id"] not in anno_ids
557
+ or ann["tail"]["ref_id"] not in anno_ids
558
+ ):
559
+ skipped_relations.add(ann["id"])
560
+ continue
561
+ unified_example["relations"].append(
562
+ {
563
+ "arg1_id": id_prefix + ann["head"]["ref_id"],
564
+ "arg2_id": id_prefix + ann["tail"]["ref_id"],
565
+ "id": id_prefix + ann["id"],
566
+ "type": ann["type"],
567
+ "normalized": [],
568
+ }
569
+ )
570
+ if len(skipped_relations) > 0:
571
+ example_id = brat_parse["document_id"]
572
+ logger.info(
573
+ f"Example:{example_id}: The `bigbio_kb` schema allows `relations` only between entities."
574
+ f" Skip (for now): "
575
+ f"{list(skipped_relations)}"
576
+ )
577
+
578
+ # get coreferences
579
+ unified_example["coreferences"] = []
580
+ for i, ann in enumerate(brat_parse["equivalences"], start=1):
581
+ is_entity_cluster = True
582
+ for ref_id in ann["ref_ids"]:
583
+ if not ref_id.startswith("T"): # not textbound -> no entity
584
+ is_entity_cluster = False
585
+ elif ref_id not in anno_ids: # event trigger -> no entity
586
+ is_entity_cluster = False
587
+ if is_entity_cluster:
588
+ entity_ids = [id_prefix + i for i in ann["ref_ids"]]
589
+ unified_example["coreferences"].append(
590
+ {"id": id_prefix + str(i), "entity_ids": entity_ids}
591
+ )
592
+ return unified_example