Datasets:

Sub-tasks:
parsing
Languages:
English
Multilinguality:
monolingual
Size Categories:
unknown
Language Creators:
found
Annotations Creators:
expert-generated
Source Datasets:
original
dfki-nlp commited on
Commit
8d2e942
1 Parent(s): 2d0d031

Upload brat.py

Browse files
Files changed (1) hide show
  1. brat.py +337 -0
brat.py ADDED
@@ -0,0 +1,337 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import glob
2
+ import logging
3
+ from dataclasses import dataclass
4
+ from os import listdir, path
5
+ from typing import Dict, List, Optional
6
+
7
+ import datasets
8
+ from datasets import BuilderConfig, DatasetInfo, Features, Sequence, SplitGenerator, Value
9
+
10
+ logger = logging.getLogger(__name__)
11
+
12
+
13
+ @dataclass
14
+ class BratConfig(BuilderConfig):
15
+ """BuilderConfig for BRAT."""
16
+
17
+ url: str = None # type: ignore
18
+ description: Optional[str] = None
19
+ citation: Optional[str] = None
20
+ homepage: Optional[str] = None
21
+
22
+ subdirectory_mapping: Optional[Dict[str, str]] = None
23
+ file_name_blacklist: Optional[List[str]] = None
24
+ ann_file_extension: str = "ann"
25
+ txt_file_extension: str = "txt"
26
+
27
+
28
+ class Brat(datasets.GeneratorBasedBuilder):
29
+ BUILDER_CONFIG_CLASS = BratConfig
30
+
31
+ def _info(self):
32
+ return DatasetInfo(
33
+ description=self.config.description,
34
+ citation=self.config.citation,
35
+ homepage=self.config.homepage,
36
+ features=Features(
37
+ {
38
+ "context": Value("string"),
39
+ "file_name": Value("string"),
40
+ "spans": Sequence(
41
+ {
42
+ "id": Value("string"),
43
+ "type": Value("string"),
44
+ "locations": Sequence(
45
+ {
46
+ "start": Value("int32"),
47
+ "end": Value("int32"),
48
+ }
49
+ ),
50
+ "text": Value("string"),
51
+ }
52
+ ),
53
+ "relations": Sequence(
54
+ {
55
+ "id": Value("string"),
56
+ "type": Value("string"),
57
+ "arguments": Sequence(
58
+ {"type": Value("string"), "target": Value("string")}
59
+ ),
60
+ }
61
+ ),
62
+ "equivalence_relations": Sequence(
63
+ {
64
+ "type": Value("string"),
65
+ "targets": Sequence(Value("string")),
66
+ }
67
+ ),
68
+ "events": Sequence(
69
+ {
70
+ "id": Value("string"),
71
+ "type": Value("string"),
72
+ "trigger": Value("string"),
73
+ "arguments": Sequence(
74
+ {"type": Value("string"), "target": Value("string")}
75
+ ),
76
+ }
77
+ ),
78
+ "attributions": Sequence(
79
+ {
80
+ "id": Value("string"),
81
+ "type": Value("string"),
82
+ "target": Value("string"),
83
+ "value": Value("string"),
84
+ }
85
+ ),
86
+ "normalizations": Sequence(
87
+ {
88
+ "id": Value("string"),
89
+ "type": Value("string"),
90
+ "target": Value("string"),
91
+ "resource_id": Value("string"),
92
+ "entity_id": Value("string"),
93
+ }
94
+ ),
95
+ "notes": Sequence(
96
+ {
97
+ "id": Value("string"),
98
+ "type": Value("string"),
99
+ "target": Value("string"),
100
+ "note": Value("string"),
101
+ }
102
+ ),
103
+ }
104
+ ),
105
+ )
106
+
107
+ @staticmethod
108
+ def _get_location(location_string):
109
+ parts = location_string.split(" ")
110
+ assert (
111
+ len(parts) == 2
112
+ ), f"Wrong number of entries in location string. Expected 2, but found: {parts}"
113
+ return {"start": int(parts[0]), "end": int(parts[1])}
114
+
115
+ @staticmethod
116
+ def _get_span_annotation(annotation_line):
117
+ """
118
+ example input:
119
+ T1 Organization 0 4 Sony
120
+ """
121
+
122
+ _id, remaining, text = annotation_line.split("\t", maxsplit=2)
123
+ _type, locations = remaining.split(" ", maxsplit=1)
124
+ return {
125
+ "id": _id,
126
+ "text": text,
127
+ "type": _type,
128
+ "locations": [Brat._get_location(loc) for loc in locations.split(";")],
129
+ }
130
+
131
+ @staticmethod
132
+ def _get_event_annotation(annotation_line):
133
+ """
134
+ example input:
135
+ E1 MERGE-ORG:T2 Org1:T1 Org2:T3
136
+ """
137
+ _id, remaining = annotation_line.strip().split("\t")
138
+ args = [dict(zip(["type", "target"], a.split(":"))) for a in remaining.split(" ")]
139
+ return {
140
+ "id": _id,
141
+ "type": args[0]["type"],
142
+ "trigger": args[0]["target"],
143
+ "arguments": args[1:],
144
+ }
145
+
146
+ @staticmethod
147
+ def _get_relation_annotation(annotation_line):
148
+ """
149
+ example input:
150
+ R1 Origin Arg1:T3 Arg2:T4
151
+ """
152
+
153
+ _id, remaining = annotation_line.strip().split("\t")
154
+ _type, remaining = remaining.split(" ", maxsplit=1)
155
+ args = [dict(zip(["type", "target"], a.split(":"))) for a in remaining.split(" ")]
156
+ return {"id": _id, "type": _type, "arguments": args}
157
+
158
+ @staticmethod
159
+ def _get_equivalence_relation_annotation(annotation_line):
160
+ """
161
+ example input:
162
+ * Equiv T1 T2 T3
163
+ """
164
+ _, remaining = annotation_line.strip().split("\t")
165
+ parts = remaining.split(" ")
166
+ return {"type": parts[0], "targets": parts[1:]}
167
+
168
+ @staticmethod
169
+ def _get_attribute_annotation(annotation_line):
170
+ """
171
+ example input (binary: implicit value is True, if present, False otherwise):
172
+ A1 Negation E1
173
+ example input (multi-value: explicit value)
174
+ A2 Confidence E2 L1
175
+ """
176
+
177
+ _id, remaining = annotation_line.strip().split("\t")
178
+ parts = remaining.split(" ")
179
+ # if no value is present, it is implicitly "true"
180
+ if len(parts) == 2:
181
+ parts.append("true")
182
+ return {
183
+ "id": _id,
184
+ "type": parts[0],
185
+ "target": parts[1],
186
+ "value": parts[2],
187
+ }
188
+
189
+ @staticmethod
190
+ def _get_normalization_annotation(annotation_line):
191
+ """
192
+ example input:
193
+ N1 Reference T1 Wikipedia:534366 Barack Obama
194
+ """
195
+ _id, remaining, text = annotation_line.split("\t", maxsplit=2)
196
+ _type, target, ref = remaining.split(" ")
197
+ res_id, ent_id = ref.split(":")
198
+ return {
199
+ "id": _id,
200
+ "type": _type,
201
+ "target": target,
202
+ "resource_id": res_id,
203
+ "entity_id": ent_id,
204
+ }
205
+
206
+ @staticmethod
207
+ def _get_note_annotation(annotation_line):
208
+ """
209
+ example input:
210
+ #1 AnnotatorNotes T1 this annotation is suspect
211
+ """
212
+ _id, remaining, note = annotation_line.split("\t", maxsplit=2)
213
+ _type, target = remaining.split(" ")
214
+ return {
215
+ "id": _id,
216
+ "type": _type,
217
+ "target": target,
218
+ "note": note,
219
+ }
220
+
221
+ @staticmethod
222
+ def _read_annotation_file(filename):
223
+ """
224
+ reads a BRAT v1.3 annotations file (see https://brat.nlplab.org/standoff.html)
225
+ """
226
+
227
+ res = {
228
+ "spans": [],
229
+ "events": [],
230
+ "relations": [],
231
+ "equivalence_relations": [],
232
+ "attributions": [],
233
+ "normalizations": [],
234
+ "notes": [],
235
+ }
236
+
237
+ with open(filename) as file:
238
+ for i, line in enumerate(file):
239
+ if len(line.strip()) == 0:
240
+ continue
241
+ ann_type = line[0]
242
+
243
+ # strip away the new line character
244
+ if line.endswith("\n"):
245
+ line = line[:-1]
246
+
247
+ if ann_type == "T":
248
+ res["spans"].append(Brat._get_span_annotation(line))
249
+ elif ann_type == "E":
250
+ res["events"].append(Brat._get_event_annotation(line))
251
+ elif ann_type == "R":
252
+ res["relations"].append(Brat._get_relation_annotation(line))
253
+ elif ann_type == "*":
254
+ res["equivalence_relations"].append(
255
+ Brat._get_equivalence_relation_annotation(line)
256
+ )
257
+ elif ann_type in ["A", "M"]:
258
+ res["attributions"].append(Brat._get_attribute_annotation(line))
259
+ elif ann_type == "N":
260
+ res["normalizations"].append(Brat._get_normalization_annotation(line))
261
+ elif ann_type == "#":
262
+ res["notes"].append(Brat._get_note_annotation(line))
263
+ else:
264
+ raise ValueError(
265
+ f'unknown BRAT annotation id type: "{line}" (from file {filename} @line {i}). '
266
+ f"Annotation ids have to start with T (spans), E (events), R (relations), "
267
+ f"A (attributions), or N (normalizations). See "
268
+ f"https://brat.nlplab.org/standoff.html for the BRAT annotation file "
269
+ f"specification."
270
+ )
271
+ return res
272
+
273
+ def _generate_examples(self, files=None, directory=None):
274
+ """Read context (.txt) and annotation (.ann) files."""
275
+ if files is None:
276
+ assert (
277
+ directory is not None
278
+ ), "If files is None, directory has to be provided, but it is also None."
279
+ _files = glob.glob(f"{directory}/*.{self.config.ann_file_extension}")
280
+ files = sorted(path.splitext(fn)[0] for fn in _files)
281
+
282
+ for filename in files:
283
+ basename = path.basename(filename)
284
+ if (
285
+ self.config.file_name_blacklist is not None
286
+ and basename in self.config.file_name_blacklist
287
+ ):
288
+ logger.info(f"skip annotation file: {basename} (blacklisted)")
289
+ continue
290
+
291
+ ann_fn = f"{filename}.{self.config.ann_file_extension}"
292
+ brat_annotations = Brat._read_annotation_file(ann_fn)
293
+
294
+ txt_fn = f"{filename}.{self.config.txt_file_extension}"
295
+ txt_content = open(txt_fn).read()
296
+ brat_annotations["context"] = txt_content
297
+ brat_annotations["file_name"] = basename
298
+
299
+ yield basename, brat_annotations
300
+
301
+ def _split_generators(self, dl_manager):
302
+ """Returns SplitGenerators."""
303
+
304
+ subdirectory_mapping = self.config.subdirectory_mapping
305
+
306
+ # since subclasses of BuilderConfig are not allowed to define
307
+ # attributes without defaults, check here
308
+ assert self.config.url is not None, "data url not specified"
309
+
310
+ # if url points to a local directory, just point to that
311
+ if path.exists(self.config.url) and path.isdir(self.config.url):
312
+ data_dir = self.config.url
313
+ # otherwise, download and extract
314
+ else:
315
+ data_dir = dl_manager.download_and_extract(self.config.url)
316
+ logging.info(f"load from data dir: {data_dir}")
317
+
318
+ # if no subdirectory mapping is provided, ...
319
+ if subdirectory_mapping is None:
320
+ # ... use available subdirectories as split names ...
321
+ subdirs = [f for f in listdir(data_dir) if path.isdir(path.join(data_dir, f))]
322
+ if len(subdirs) > 0:
323
+ subdirectory_mapping = {subdir: subdir for subdir in subdirs}
324
+ else:
325
+ # ... otherwise, default to a single train split with the base directory
326
+ subdirectory_mapping = {"": "train"}
327
+
328
+ return [
329
+ SplitGenerator(
330
+ name=split,
331
+ # These kwargs will be passed to _generate_examples
332
+ gen_kwargs={
333
+ "directory": path.join(data_dir, subdir),
334
+ },
335
+ )
336
+ for subdir, split in subdirectory_mapping.items()
337
+ ]