dfki-nlp commited on
Commit
d5a24c5
1 Parent(s): b4568cc

Upload sciarg.py

Browse files
Files changed (1) hide show
  1. sciarg.py +349 -0
sciarg.py ADDED
@@ -0,0 +1,349 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import glob
2
+ import logging
3
+ from dataclasses import dataclass
4
+ from os import listdir, path
5
+ from typing import Dict, List, Optional
6
+
7
+ import datasets
8
+ from datasets import BuilderConfig, DatasetInfo, Features, Sequence, SplitGenerator, Value
9
+
10
+ logger = logging.getLogger(__name__)
11
+
12
+
13
+ @dataclass
14
+ class BratConfig(BuilderConfig):
15
+ """BuilderConfig for BRAT."""
16
+
17
+ url: str = "http://data.dws.informatik.uni-mannheim.de/sci-arg/compiled_corpus.zip" # type: ignore
18
+ description: Optional[str] = '''This dataset is an extension of the Dr. Inventor corpus (Fisas et al., 2015, 2016) with an annotation layer containing
19
+ fine-grained argumentative components and relations. It is the first argument-annotated corpus of scientific
20
+ publications (in English), which allows for joint analyses of argumentation and other rhetorical dimensions of
21
+ scientific writing.'''
22
+ citation: Optional[str] = """|
23
+ @inproceedings{lauscher2018b,
24
+ title = {An argument-annotated corpus of scientific publications},
25
+ booktitle = {Proceedings of the 5th Workshop on Mining Argumentation},
26
+ publisher = {Association for Computational Linguistics},
27
+ author = {Lauscher, Anne and Glava\v{s}, Goran and Ponzetto, Simone Paolo},
28
+ address = {Brussels, Belgium},
29
+ year = {2018},
30
+ pages = {40–46}
31
+ }"""
32
+ homepage: Optional[str] = "https://github.com/anlausch/ArguminSci"
33
+
34
+ subdirectory_mapping: Optional[Dict[str, str]] = None
35
+ file_name_blacklist: Optional[List[str]] = None
36
+ ann_file_extension: str = "ann"
37
+ txt_file_extension: str = "txt"
38
+
39
+
40
+ class Brat(datasets.GeneratorBasedBuilder):
41
+ BUILDER_CONFIG_CLASS = BratConfig
42
+ BUILDER_CONFIG_CLASS.file_name_blacklist = ["A28"]
43
+ def _info(self):
44
+ return DatasetInfo(
45
+ description=self.config.description,
46
+ citation=self.config.citation,
47
+ homepage=self.config.homepage,
48
+ features=Features(
49
+ {
50
+ "context": Value("string"),
51
+ "file_name": Value("string"),
52
+ "spans": Sequence(
53
+ {
54
+ "id": Value("string"),
55
+ "type": Value("string"),
56
+ "locations": Sequence(
57
+ {
58
+ "start": Value("int32"),
59
+ "end": Value("int32"),
60
+ }
61
+ ),
62
+ "text": Value("string"),
63
+ }
64
+ ),
65
+ "relations": Sequence(
66
+ {
67
+ "id": Value("string"),
68
+ "type": Value("string"),
69
+ "arguments": Sequence(
70
+ {"type": Value("string"), "target": Value("string")}
71
+ ),
72
+ }
73
+ ),
74
+ "equivalence_relations": Sequence(
75
+ {
76
+ "type": Value("string"),
77
+ "targets": Sequence(Value("string")),
78
+ }
79
+ ),
80
+ "events": Sequence(
81
+ {
82
+ "id": Value("string"),
83
+ "type": Value("string"),
84
+ "trigger": Value("string"),
85
+ "arguments": Sequence(
86
+ {"type": Value("string"), "target": Value("string")}
87
+ ),
88
+ }
89
+ ),
90
+ "attributions": Sequence(
91
+ {
92
+ "id": Value("string"),
93
+ "type": Value("string"),
94
+ "target": Value("string"),
95
+ "value": Value("string"),
96
+ }
97
+ ),
98
+ "normalizations": Sequence(
99
+ {
100
+ "id": Value("string"),
101
+ "type": Value("string"),
102
+ "target": Value("string"),
103
+ "resource_id": Value("string"),
104
+ "entity_id": Value("string"),
105
+ }
106
+ ),
107
+ "notes": Sequence(
108
+ {
109
+ "id": Value("string"),
110
+ "type": Value("string"),
111
+ "target": Value("string"),
112
+ "note": Value("string"),
113
+ }
114
+ ),
115
+ }
116
+ ),
117
+ )
118
+
119
+ @staticmethod
120
+ def _get_location(location_string):
121
+ parts = location_string.split(" ")
122
+ assert (
123
+ len(parts) == 2
124
+ ), f"Wrong number of entries in location string. Expected 2, but found: {parts}"
125
+ return {"start": int(parts[0]), "end": int(parts[1])}
126
+
127
+ @staticmethod
128
+ def _get_span_annotation(annotation_line):
129
+ """
130
+ example input:
131
+ T1 Organization 0 4 Sony
132
+ """
133
+
134
+ _id, remaining, text = annotation_line.split("\t", maxsplit=2)
135
+ _type, locations = remaining.split(" ", maxsplit=1)
136
+ return {
137
+ "id": _id,
138
+ "text": text,
139
+ "type": _type,
140
+ "locations": [Brat._get_location(loc) for loc in locations.split(";")],
141
+ }
142
+
143
+ @staticmethod
144
+ def _get_event_annotation(annotation_line):
145
+ """
146
+ example input:
147
+ E1 MERGE-ORG:T2 Org1:T1 Org2:T3
148
+ """
149
+ _id, remaining = annotation_line.strip().split("\t")
150
+ args = [dict(zip(["type", "target"], a.split(":"))) for a in remaining.split(" ")]
151
+ return {
152
+ "id": _id,
153
+ "type": args[0]["type"],
154
+ "trigger": args[0]["target"],
155
+ "arguments": args[1:],
156
+ }
157
+
158
+ @staticmethod
159
+ def _get_relation_annotation(annotation_line):
160
+ """
161
+ example input:
162
+ R1 Origin Arg1:T3 Arg2:T4
163
+ """
164
+
165
+ _id, remaining = annotation_line.strip().split("\t")
166
+ _type, remaining = remaining.split(" ", maxsplit=1)
167
+ args = [dict(zip(["type", "target"], a.split(":"))) for a in remaining.split(" ")]
168
+ return {"id": _id, "type": _type, "arguments": args}
169
+
170
+ @staticmethod
171
+ def _get_equivalence_relation_annotation(annotation_line):
172
+ """
173
+ example input:
174
+ * Equiv T1 T2 T3
175
+ """
176
+ _, remaining = annotation_line.strip().split("\t")
177
+ parts = remaining.split(" ")
178
+ return {"type": parts[0], "targets": parts[1:]}
179
+
180
+ @staticmethod
181
+ def _get_attribute_annotation(annotation_line):
182
+ """
183
+ example input (binary: implicit value is True, if present, False otherwise):
184
+ A1 Negation E1
185
+ example input (multi-value: explicit value)
186
+ A2 Confidence E2 L1
187
+ """
188
+
189
+ _id, remaining = annotation_line.strip().split("\t")
190
+ parts = remaining.split(" ")
191
+ # if no value is present, it is implicitly "true"
192
+ if len(parts) == 2:
193
+ parts.append("true")
194
+ return {
195
+ "id": _id,
196
+ "type": parts[0],
197
+ "target": parts[1],
198
+ "value": parts[2],
199
+ }
200
+
201
+ @staticmethod
202
+ def _get_normalization_annotation(annotation_line):
203
+ """
204
+ example input:
205
+ N1 Reference T1 Wikipedia:534366 Barack Obama
206
+ """
207
+ _id, remaining, text = annotation_line.split("\t", maxsplit=2)
208
+ _type, target, ref = remaining.split(" ")
209
+ res_id, ent_id = ref.split(":")
210
+ return {
211
+ "id": _id,
212
+ "type": _type,
213
+ "target": target,
214
+ "resource_id": res_id,
215
+ "entity_id": ent_id,
216
+ }
217
+
218
+ @staticmethod
219
+ def _get_note_annotation(annotation_line):
220
+ """
221
+ example input:
222
+ #1 AnnotatorNotes T1 this annotation is suspect
223
+ """
224
+ _id, remaining, note = annotation_line.split("\t", maxsplit=2)
225
+ _type, target = remaining.split(" ")
226
+ return {
227
+ "id": _id,
228
+ "type": _type,
229
+ "target": target,
230
+ "note": note,
231
+ }
232
+
233
+ @staticmethod
234
+ def _read_annotation_file(filename):
235
+ """
236
+ reads a BRAT v1.3 annotations file (see https://brat.nlplab.org/standoff.html)
237
+ """
238
+
239
+ res = {
240
+ "spans": [],
241
+ "events": [],
242
+ "relations": [],
243
+ "equivalence_relations": [],
244
+ "attributions": [],
245
+ "normalizations": [],
246
+ "notes": [],
247
+ }
248
+
249
+ with open(filename) as file:
250
+ for i, line in enumerate(file):
251
+ if len(line.strip()) == 0:
252
+ continue
253
+ ann_type = line[0]
254
+
255
+ # strip away the new line character
256
+ if line.endswith("\n"):
257
+ line = line[:-1]
258
+
259
+ if ann_type == "T":
260
+ res["spans"].append(Brat._get_span_annotation(line))
261
+ elif ann_type == "E":
262
+ res["events"].append(Brat._get_event_annotation(line))
263
+ elif ann_type == "R":
264
+ res["relations"].append(Brat._get_relation_annotation(line))
265
+ elif ann_type == "*":
266
+ res["equivalence_relations"].append(
267
+ Brat._get_equivalence_relation_annotation(line)
268
+ )
269
+ elif ann_type in ["A", "M"]:
270
+ res["attributions"].append(Brat._get_attribute_annotation(line))
271
+ elif ann_type == "N":
272
+ res["normalizations"].append(Brat._get_normalization_annotation(line))
273
+ elif ann_type == "#":
274
+ res["notes"].append(Brat._get_note_annotation(line))
275
+ else:
276
+ raise ValueError(
277
+ f'unknown BRAT annotation id type: "{line}" (from file {filename} @line {i}). '
278
+ f"Annotation ids have to start with T (spans), E (events), R (relations), "
279
+ f"A (attributions), or N (normalizations). See "
280
+ f"https://brat.nlplab.org/standoff.html for the BRAT annotation file "
281
+ f"specification."
282
+ )
283
+ return res
284
+
285
+ def _generate_examples(self, files=None, directory=None):
286
+ """Read context (.txt) and annotation (.ann) files."""
287
+ if files is None:
288
+ assert (
289
+ directory is not None
290
+ ), "If files is None, directory has to be provided, but it is also None."
291
+ _files = glob.glob(f"{directory}/*.{self.config.ann_file_extension}")
292
+ files = sorted(path.splitext(fn)[0] for fn in _files)
293
+
294
+ for filename in files:
295
+ basename = path.basename(filename)
296
+ if (
297
+ self.config.file_name_blacklist is not None
298
+ and basename in self.config.file_name_blacklist
299
+ ):
300
+ logger.info(f"skip annotation file: {basename} (blacklisted)")
301
+ continue
302
+
303
+ ann_fn = f"{filename}.{self.config.ann_file_extension}"
304
+ brat_annotations = Brat._read_annotation_file(ann_fn)
305
+
306
+ txt_fn = f"{filename}.{self.config.txt_file_extension}"
307
+ txt_content = open(txt_fn).read()
308
+ brat_annotations["context"] = txt_content
309
+ brat_annotations["file_name"] = basename
310
+
311
+ yield basename, brat_annotations
312
+
313
+ def _split_generators(self, dl_manager):
314
+ """Returns SplitGenerators."""
315
+
316
+ subdirectory_mapping = self.config.subdirectory_mapping
317
+
318
+ # since subclasses of BuilderConfig are not allowed to define
319
+ # attributes without defaults, check here
320
+ assert self.config.url is not None, "data url not specified"
321
+
322
+ # if url points to a local directory, just point to that
323
+ if path.exists(self.config.url) and path.isdir(self.config.url):
324
+ data_dir = self.config.url
325
+ # otherwise, download and extract
326
+ else:
327
+ data_dir = dl_manager.download_and_extract(self.config.url)
328
+ logging.info(f"load from data dir: {data_dir}")
329
+
330
+ # if no subdirectory mapping is provided, ...
331
+ if subdirectory_mapping is None:
332
+ # ... use available subdirectories as split names ...
333
+ subdirs = [f for f in listdir(data_dir) if path.isdir(path.join(data_dir, f))]
334
+ if len(subdirs) > 0:
335
+ subdirectory_mapping = {subdir: subdir for subdir in subdirs}
336
+ else:
337
+ # ... otherwise, default to a single train split with the base directory
338
+ subdirectory_mapping = {"": "train"}
339
+
340
+ return [
341
+ SplitGenerator(
342
+ name=split,
343
+ # These kwargs will be passed to _generate_examples
344
+ gen_kwargs={
345
+ "directory": path.join(data_dir, subdir),
346
+ },
347
+ )
348
+ for subdir, split in subdirectory_mapping.items()
349
+ ]