timo-pierre-schrader commited on
Commit
c5d1618
·
verified ·
1 Parent(s): 10d8099

Delete loading script

Browse files
Files changed (1) hide show
  1. MuLMS.py +0 -752
MuLMS.py DELETED
@@ -1,752 +0,0 @@
1
- # Experiment resources related to the MuLMS corpus.
2
- # Copyright (c) 2023 Robert Bosch GmbH
3
- #
4
- # This program is free software: you can redistribute it and/or modify
5
- # it under the terms of the GNU Affero General Public License as published
6
- # by the Free Software Foundation, either version 3 of the License, or
7
- # (at your option) any later version.
8
- #
9
- # This program is distributed in the hope that it will be useful,
10
- # but WITHOUT ANY WARRANTY; without even the implied warranty of
11
- # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12
- # GNU Affero General Public License for more details.
13
- # You should have received a copy of the GNU Affero General Public License
14
- # along with this program. If not, see <https://www.gnu.org/licenses/>.
15
-
16
- # 24 October 2023 - Modified by Timo Schrader (find all changes in CHANGELOG)
17
-
18
- """
19
- This module contains the HuggingFace dataset reader for the "Multi-Layer Materials Science Corpus (MuLMS)"
20
- """
21
-
22
- from dataclasses import dataclass
23
- from os import listdir
24
- from os.path import exists, join
25
-
26
- import datasets
27
- import pandas as pd
28
- from puima.collection_utils import DocumentCollection
29
-
30
- SENT_TYPE = "de.tudarmstadt.ukp.dkpro.core.api.segmentation.type.Sentence"
31
- MATSCI_SENT_TYPE = "webanno.custom.MatSci_Sentence"
32
- TOKEN_TYPE = "de.tudarmstadt.ukp.dkpro.core.api.segmentation.type.Token"
33
- SKIPPING_TYPE = "webanno.custom.MatSci_Skipping"
34
- ENTITY_TYPE = "webanno.custom.MatSci_Entity"
35
- RELATION_TYPE = "webanno.custom.MatSci_Relations"
36
- PASSAGE_TYPE = "webanno.custom.MatSci_Passage"
37
- DOCUMENT_METADATA_TYPE = "de.tudarmstadt.ukp.dkpro.core.api.metadata.type.DocumentMetaData"
38
-
39
-
40
- def get_token_indices_for_annot(annot, sent_tokens, doc):
41
- """
42
- Retrieves indices of tokens within the list of sententence tokens (sent_tokens) for
43
- the annotation annot.
44
- param: annot - puima Annotation object
45
- param: sent_tokens - list of puima Annotation objects (tokens of the sentence)
46
- """
47
- indices = [None, None]
48
- for annot_token in doc.select_covered(TOKEN_TYPE, annot): # noqa: F405
49
- token_index = sent_tokens.index(annot_token)
50
- if indices[0] is None or indices[0] > token_index:
51
- indices[0] = token_index
52
- if indices[1] is None or indices[1] < token_index:
53
- indices[1] = token_index
54
- return tuple(indices)
55
-
56
-
57
- def get_token_index_for_annot_if_subtoken(annot, sent_tokens, doc):
58
- """
59
- Retrieves indices of tokens within the list of sententence tokens (sent_tokens) for
60
- the annotation annot.
61
- param: annot - puima Annotation object
62
- param: sent_tokens - list of puima Annotation objects (tokens of the sentence)
63
- """
64
- annot_token = next(doc.select_covering(TOKEN_TYPE, annot)) # noqa: F405
65
- token_index = sent_tokens.index(annot_token)
66
- return (token_index, token_index)
67
-
68
-
69
- az_content_labels: list = [
70
- "Experiment",
71
- "Results",
72
- "Exp_Preparation",
73
- "Exp_Characterization",
74
- "Background_PriorWork",
75
- "Explanation",
76
- "Conclusion",
77
- "Motivation",
78
- "Background",
79
- ]
80
- az_structure_labels: list = ["Metadata", "Caption", "Heading", "Abstract"]
81
-
82
- meas_labels: list = ["MEASUREMENT", "QUAL_MEASUREMENT", "O"]
83
-
84
- mulms_ne_labels: list = [
85
- "MAT",
86
- "NUM",
87
- "VALUE",
88
- "UNIT",
89
- "PROPERTY",
90
- "CITE",
91
- "TECHNIQUE",
92
- "RANGE",
93
- "INSTRUMENT",
94
- "SAMPLE",
95
- "FORM",
96
- "DEV",
97
- "MEASUREMENT",
98
- ]
99
-
100
- rel_labels: list = [
101
- "hasForm",
102
- "measuresProperty",
103
- "usedAs",
104
- "conditionProperty",
105
- "conditionSampleFeatures",
106
- "usesTechnique",
107
- "conditionEnvironment",
108
- "propertyValue",
109
- "usedIn",
110
- "conditionInstrument",
111
- "dopedBy",
112
- "takenFrom",
113
- "usedTogether",
114
- ]
115
-
116
- ne_labels_set = set(mulms_ne_labels)
117
- all_az_labels = set(az_content_labels) | set(az_structure_labels)
118
- meas_labels_set = set(meas_labels)
119
- rel_label_set: set = set(rel_labels)
120
-
121
- _CITATION = """\
122
- @InProceedings{schrader-etal-2023-mulms,
123
- title = {MuLMS-AZ: An Argumentative Zoning Dataset for the Materials Science Domain},
124
- author={Timo Pierre Schrader, Teresa Bürkle, Sophie Henning, Sherry Tan, Matteo Finco, Stefan Grünewald, Maira Indrikova, Felix Hildebrand, Annemarie Friedrich
125
- },
126
- year={2023}
127
- },
128
- @InProceedings{schrader-etal-2023-mulms,
129
- title = {MuLMS: A Multi-Layer Annotated Text Corpus for Information Extraction in the Materials Science Domain},
130
- author={Timo Pierre Schrader, Matteo Finco, Stefan Grünewald, Felix Hildebrand, Annemarie Friedrich
131
- },
132
- year={2023}
133
- }
134
- """
135
-
136
- _DESCRIPTION = """\
137
- This dataset represents the Multi-Layer Material Science (MuLMS) corpus.
138
- It consists of 50 thoroughly annotated documents from the materials science domain and
139
- provides annotations for named entities, argumentative zoning (AZ), relation extraction,
140
- measurement classification and citation context retrieval. Please refer to our papers for
141
- more details about the MuLMS corpus.
142
- """
143
-
144
- _HOMEPAGE = "https://github.com/boschresearch/mulms-az-codi2023"
145
-
146
- _LICENSE = "AGPL-3"
147
-
148
- _URLS = "https://github.com/boschresearch/mulms-az-codi2023/raw/refs/heads/main/data/mulms_corpus.zip" # "Path to MuLMS-AZ files"
149
-
150
-
151
- @dataclass
152
- class MuLMSDatasetBuilderConfig(datasets.BuilderConfig):
153
- """
154
- Config class for the dataset class.
155
- """
156
-
157
- replace_heading_AZ_labels: bool = True
158
- remove_figure_and_table_labels: bool = True
159
-
160
-
161
- class MuLMSDataset(datasets.GeneratorBasedBuilder):
162
- """This dataset represents the Multi-Layer Material Science Corpus with 50 documents across multiple domains."""
163
-
164
- VERSION = datasets.Version("1.0.0")
165
-
166
- BUILDER_CONFIG_CLASS = MuLMSDatasetBuilderConfig
167
-
168
- BUILDER_CONFIGS = [
169
- MuLMSDatasetBuilderConfig(
170
- name="MuLMS_Corpus",
171
- version=VERSION,
172
- description="This part of the dataset covers all annotations.",
173
- ),
174
- datasets.BuilderConfig(
175
- name="NER_Dependencies",
176
- version=VERSION,
177
- description="This part of the dataset represents Named Entities as dependencies (returned in CONLL format).",
178
- ),
179
- ]
180
-
181
- DEFAULT_CONFIG_NAME = "MuLMS_Corpus"
182
-
183
- AZ_HEADING_REPLACEMENT_LABELS = [
184
- "Supporting Information",
185
- "Author Contribution",
186
- "Confict of Interest",
187
- "Acknowledgment",
188
- ]
189
-
190
- def _info(self) -> datasets.DatasetInfo:
191
- """
192
- Provides information about this dataset.
193
-
194
- Returns:
195
- datasets.DatasetInfo
196
- """
197
- if self.config.name == "default":
198
- self.config.name = self.DEFAULT_CONFIG_NAME
199
- if self.config.name == "MuLMS_Corpus":
200
- features: datasets.Features = datasets.Features(
201
- {
202
- "doc_id": datasets.Value("string"),
203
- "sentence": datasets.Value("string"),
204
- "tokens": datasets.Sequence(datasets.Value("string")),
205
- "beginOffset": datasets.Value("int32"),
206
- "endOffset": datasets.Value("int32"),
207
- "AZ_labels": datasets.Value("string"),
208
- "Measurement_label": datasets.Value("string"),
209
- "NER_labels": datasets.Sequence(
210
- {
211
- "text": datasets.Value("string"),
212
- "id": datasets.Value("int32"),
213
- "value": datasets.Value("string"),
214
- "begin": datasets.Value("string"),
215
- "end": datasets.Value("string"),
216
- "tokenIndices": datasets.Sequence(datasets.Value("int32")),
217
- }
218
- ),
219
- "NER_labels_BILOU": datasets.Sequence(datasets.Value("string")),
220
- "relations": datasets.Sequence(
221
- {
222
- "ne_id_gov": datasets.Value("int32"),
223
- "ne_id_dep": datasets.Value("int32"),
224
- "label": datasets.Value("string"),
225
- }
226
- ),
227
- "docFileName": datasets.Value("string"),
228
- "data_split": datasets.Value("string"),
229
- "category": datasets.Value("string"),
230
- }
231
- )
232
- elif self.config.name == "NER_Dependencies":
233
- features: datasets.Features = datasets.Features(
234
- {
235
- "ID": datasets.Value("int32"),
236
- "sentence": datasets.Value("string"),
237
- "token_id": datasets.Value("int32"),
238
- "token_text": datasets.Value("string"),
239
- "NE_Dependencies": datasets.Value("string"),
240
- "data_split": datasets.Value("string"),
241
- }
242
- )
243
- return datasets.DatasetInfo(
244
- description=_DESCRIPTION,
245
- features=features,
246
- homepage=_HOMEPAGE,
247
- license=_LICENSE,
248
- citation=_CITATION,
249
- )
250
-
251
- def _split_generators(self, dl_manager) -> list:
252
- """
253
- Downloads files from URL or reads them from the file system and provides _generate_examples
254
- with necessary information.
255
-
256
- Args:
257
- dl_manager: Handles data retrieval
258
-
259
- Returns:
260
- list: Information about files and splits
261
- """
262
-
263
- data_files: list = dl_manager.download_and_extract(_URLS)
264
- self.data_dir: str = join(data_files, "mulms_corpus")
265
- data_files = listdir(join(self.data_dir, "xmi"))
266
- assert exists(
267
- join(self.data_dir, "MuLMS_Corpus_Metadata.csv")
268
- ), "MuLMS_Corpus_Metadata.csv is missing."
269
-
270
- if "/" in data_files[0]:
271
- data_files = [f.split("/")[-1] for f in data_files]
272
- if "\\" in data_files[0]:
273
- data_files = [f.split("\\")[-1] for f in data_files]
274
-
275
- metadata_df: pd.DataFrame = pd.read_csv(
276
- join(self.data_dir, "MuLMS_Corpus_Metadata.csv")
277
- )
278
- train_files: list = sorted(
279
- [
280
- f
281
- for f in data_files
282
- if any(
283
- name in f
284
- for name in list(metadata_df[metadata_df["set"].str.contains("train")]["name"])
285
- )
286
- ]
287
- )
288
- dev_files: list = sorted(
289
- [
290
- f
291
- for f in data_files
292
- if any(
293
- name in f for name in list(metadata_df[metadata_df["set"] == "dev"]["name"])
294
- )
295
- ]
296
- )
297
- test_files: list = sorted(
298
- [
299
- f
300
- for f in data_files
301
- if any(
302
- name in f for name in list(metadata_df[metadata_df["set"] == "test"]["name"])
303
- )
304
- ]
305
- )
306
-
307
- if self.config.name == "MuLMS_Corpus":
308
- return [
309
- datasets.SplitGenerator(
310
- name=datasets.Split.TRAIN,
311
- gen_kwargs={
312
- "dir": join(self.data_dir, "xmi"),
313
- "files": train_files,
314
- "data_split": metadata_df[metadata_df["set"].str.contains("train")][
315
- ["name", "set", "category"]
316
- ],
317
- },
318
- ),
319
- datasets.SplitGenerator(
320
- name=datasets.Split.VALIDATION,
321
- gen_kwargs={
322
- "dir": join(self.data_dir, "xmi"),
323
- "files": dev_files,
324
- "data_split": metadata_df[metadata_df["set"] == "dev"][
325
- ["name", "set", "category"]
326
- ],
327
- },
328
- ),
329
- datasets.SplitGenerator(
330
- name=datasets.Split.TEST,
331
- gen_kwargs={
332
- "dir": join(self.data_dir, "xmi"),
333
- "files": test_files,
334
- "data_split": metadata_df[metadata_df["set"] == "test"][
335
- ["name", "set", "category"]
336
- ],
337
- },
338
- ),
339
- ]
340
-
341
- elif self.config.name == "NER_Dependencies":
342
- return [
343
- datasets.SplitGenerator(
344
- name=datasets.Split.TRAIN,
345
- gen_kwargs={
346
- "dir": join(self.data_dir, "ne_dependencies_conll"),
347
- "files": [
348
- "ne_deps_train1.conllu",
349
- "ne_deps_train2.conllu",
350
- "ne_deps_train3.conllu",
351
- "ne_deps_train4.conllu",
352
- "ne_deps_train5.conllu",
353
- ],
354
- "data_split": None,
355
- },
356
- ),
357
- datasets.SplitGenerator(
358
- name=datasets.Split.VALIDATION,
359
- gen_kwargs={
360
- "dir": join(self.data_dir, "ne_dependencies_conll"),
361
- "files": ["ne_deps_dev.conllu"],
362
- "data_split": None,
363
- },
364
- ),
365
- datasets.SplitGenerator(
366
- name=datasets.Split.TEST,
367
- gen_kwargs={
368
- "dir": join(self.data_dir, "ne_dependencies_conll"),
369
- "files": ["ne_deps_test.conllu"],
370
- "data_split": None,
371
- },
372
- ),
373
- ]
374
-
375
- def _generate_examples(self, dir: str, files: list, data_split: pd.DataFrame):
376
- """
377
- Yields the data during runtime.
378
-
379
- Args:
380
- dir (str): Path to downloaded or local files.
381
- files (list): List of filenames corresponding to the current split; must be contained within "dir"
382
- data_split (pd.DataFrame): Category and train/dev/test split info for each document
383
-
384
- Yields:
385
- tuple: Yields document ID and dictionary with current data sample
386
- """
387
-
388
- if self.config.name == "MuLMS_Corpus":
389
-
390
- doc_coll: DocumentCollection = DocumentCollection(
391
- xmi_dir=dir, file_list=files)
392
-
393
- split_info: str = None
394
- category_info: str = None
395
-
396
- for doc_name in doc_coll.docs:
397
- doc = doc_coll.docs[doc_name]
398
- for sent_id, sent_annot in enumerate(
399
- doc.select_annotations(SENT_TYPE) # noqa: F405
400
- ):
401
- sent_text = doc.get_covered_text(sent_annot)
402
-
403
- # Argumentative Zoning labels
404
- az_labels = set()
405
- for matsci_sent in doc.select_covered(
406
- MATSCI_SENT_TYPE, sent_annot # noqa: F405
407
- ):
408
- content_info = matsci_sent.get_feature_value(
409
- "ContentInformation")
410
- struct_info = matsci_sent.get_feature_value(
411
- "StructureInformation")
412
- az_labels.add(content_info)
413
- az_labels.add(struct_info)
414
- if "None" in az_labels:
415
- az_labels.remove("None")
416
- if None in az_labels:
417
- az_labels.remove(None)
418
- # Extract Measurement related label
419
- sent_meas_label = list(
420
- set.intersection(az_labels, meas_labels))
421
- if len(sent_meas_label) == 2:
422
- sent_meas_label = "MEASUREMENT"
423
- elif len(sent_meas_label) == 1:
424
- sent_meas_label = sent_meas_label[0]
425
- else:
426
- sent_meas_label = "O"
427
- # Remove AZ labels that are not in structure / content tags defined
428
- az_labels = list(
429
- set.intersection(all_az_labels, az_labels)
430
- ) # keep only valid labels
431
-
432
- if len(az_labels) == 0:
433
- continue
434
-
435
- # Tokens
436
- sent_tokens = list(doc.select_covered(TOKEN_TYPE, sent_annot)) # noqa: F405
437
- sent_token_list = [0] * len(sent_tokens)
438
-
439
- token2idx = {}
440
- for i, token in enumerate(sent_tokens):
441
- token2idx[token.begin] = i
442
- sent_token_list[i] = doc.get_covered_text(
443
- token) # token text
444
-
445
- # Named Entity annotations
446
- sent_offset: int = sent_annot.begin
447
- ner_labels: list = []
448
- ner_labels_duplicate_lookup: dict = (
449
- dict()
450
- ) # Used to detect duplicate Named Entity annotations
451
- ne_annot2id: dict = {}
452
- for ent_annot in doc.select_covered(ENTITY_TYPE, sent_annot): # noqa: F405
453
- if ent_annot.get_feature_value("implicitEntity") is None:
454
- label = ent_annot.get_feature_value("value")
455
- if label in ne_labels_set: # filter by applicable labels
456
- # retrieve token indices
457
- ent_indices = get_token_indices_for_annot(
458
- ent_annot, sent_tokens, doc
459
- )
460
- if (
461
- None in ent_indices
462
- ): # happens if entity annotation is subtoken : choose covering token
463
- try:
464
- ent_indices = get_token_index_for_annot_if_subtoken(
465
- ent_annot, sent_tokens, doc
466
- )
467
- except StopIteration:
468
- pass
469
- except ValueError:
470
- pass
471
- if None in ent_indices:
472
- continue
473
-
474
- try:
475
- ne_annot2id[ent_annot] = ent_annot.id
476
- ne_dict: dict = {
477
- "text": doc.get_covered_text(ent_annot),
478
- "id": ent_annot.id,
479
- "value": label,
480
- "begin": ent_annot.begin - sent_offset,
481
- "end": ent_annot.end - sent_offset,
482
- "tokenIndices": ent_indices,
483
- } # index of first + last token of the NE
484
- if (
485
- not tuple(
486
- [ne_dict["value"],
487
- ne_dict["begin"], ne_dict["end"]]
488
- )
489
- in ner_labels_duplicate_lookup.keys()
490
- ):
491
- ner_labels.append(ne_dict)
492
- ner_labels_duplicate_lookup[
493
- (
494
- tuple(
495
- [
496
- ne_dict["value"],
497
- ne_dict["begin"],
498
- ne_dict["end"],
499
- ]
500
- )
501
- )
502
- ] = ent_annot.id
503
- else:
504
- ne_annot2id[ent_annot] = ner_labels_duplicate_lookup[
505
- (
506
- tuple(
507
- [
508
- ne_dict["value"],
509
- ne_dict["begin"],
510
- ne_dict["end"],
511
- ]
512
- )
513
- )
514
- ]
515
- except KeyError:
516
- pass
517
-
518
- # Creating Nested Named Entity BIO Labels
519
- B: str = "B-{0}"
520
- I: str = "I-{0}"
521
- L: str = "L-{0}"
522
- O: str = "O"
523
- U: str = "U-{0}"
524
- ner_labels.sort(
525
- key=lambda x: (x["tokenIndices"]
526
- [0], -x["tokenIndices"][1])
527
- ) # Work from left to right and prioritize longer strings
528
- nested_bilou_labels: list = [O] * len(sent_tokens)
529
- if len(ner_labels) > 0:
530
- for i in range(len(ner_labels)):
531
- begin_idx: int = ner_labels[i]["tokenIndices"][0]
532
- end_idx: int = ner_labels[i]["tokenIndices"][1]
533
-
534
- # Check whether there are already two NE annotation layers within this span
535
- skip_current_entity: bool = False
536
- for j in range(begin_idx, end_idx + 1):
537
- if (
538
- nested_bilou_labels[j].count("+") == 2
539
- ): # Already 3 annotations connected via "+"
540
- skip_current_entity = True
541
- break
542
-
543
- if skip_current_entity:
544
- continue
545
-
546
- tag: str = ner_labels[i]["value"]
547
-
548
- # Case of Unit Length Tag
549
- if begin_idx == end_idx:
550
- if nested_bilou_labels[begin_idx] == O:
551
- nested_bilou_labels[begin_idx] = U.format(
552
- tag)
553
- else:
554
- nested_bilou_labels[begin_idx] += "+" + \
555
- U.format(tag)
556
- continue
557
-
558
- # Tags that span over more than one token
559
- if nested_bilou_labels[begin_idx] == O:
560
- nested_bilou_labels[begin_idx] = B.format(tag)
561
- else:
562
- nested_bilou_labels[begin_idx] += "+" + \
563
- B.format(tag)
564
-
565
- # Append all inside tags
566
- for j in range(begin_idx + 1, end_idx + 1):
567
- if j < end_idx:
568
- if nested_bilou_labels[j] == O:
569
- nested_bilou_labels[j] = I.format(tag)
570
- else:
571
- nested_bilou_labels[j] += "+" + \
572
- I.format(tag)
573
- else:
574
- if nested_bilou_labels[j] == O:
575
- nested_bilou_labels[j] = L.format(tag)
576
- else:
577
- nested_bilou_labels[j] += "+" + \
578
- L.format(tag)
579
-
580
- # positive relation instances
581
- rel_labels: list = []
582
- for rel_annot in doc.select_covered(RELATION_TYPE, sent_annot): # noqa: F405
583
- label: str = rel_annot.get_feature_value(
584
- "RelationType")
585
-
586
- gov_annot = rel_annot.get_feature_value(
587
- "Governor", True)
588
- dep_annot = rel_annot.get_feature_value(
589
- "Dependent", True)
590
-
591
- gov_label = gov_annot.get_feature_value("value")
592
-
593
- if (
594
- label in rel_label_set
595
- ): # only consider annotation if in selected set of relations
596
-
597
- # --- Adding transitive links --- #
598
- # conditionProperty + propertyValue --> conditionPropertyValue
599
- # measuresProperty + propertyValue --> measuresPropertyValue
600
- # Note that this will also happen when the intermediate entity is implicit!
601
- if gov_label == "MEASUREMENT" and label in {
602
- "conditionProperty",
603
- "measuresProperty",
604
- }:
605
- for rel_annot2 in doc.select_covered(
606
- RELATION_TYPE, sent_annot # noqa: F405
607
- ):
608
- label2: str = rel_annot2.get_feature_value(
609
- "RelationType")
610
- gov_annot2 = rel_annot2.get_feature_value(
611
- "Governor", True)
612
- dep_annot2 = rel_annot2.get_feature_value(
613
- "Dependent", True)
614
-
615
- if label2 == "propertyValue" and gov_annot2 == dep_annot:
616
- if label == "conditionProperty":
617
- transitiveLabel = "conditionPropertyValue"
618
- elif label == "measuresProperty":
619
- transitiveLabel = "measuresPropertyValue"
620
- else:
621
- assert False
622
-
623
- try:
624
- rel_labels.append(
625
- {
626
- "ne_id_gov": ne_annot2id[gov_annot],
627
- "ne_id_dep": ne_annot2id[dep_annot2],
628
- "label": transitiveLabel,
629
- }
630
- )
631
- except KeyError:
632
- continue
633
- # --- End of adding transitive links --- #
634
-
635
- if (
636
- gov_annot.get_feature_value(
637
- "value") not in ne_labels_set
638
- or dep_annot.get_feature_value("value") not in ne_labels_set
639
- ):
640
- # only considering relations between "valid" NE types for now
641
- continue
642
- if gov_annot is dep_annot:
643
- continue
644
- if (
645
- gov_annot.begin == dep_annot.begin
646
- and gov_annot.end == dep_annot.end
647
- ):
648
- # same span, continue
649
- continue
650
-
651
- if gov_annot not in ne_annot2id:
652
- # check if it's in a different sentence
653
- if doc.get_covered_text(dep_annot) == "nanoparticle-type":
654
- continue
655
- sent_list2 = list(
656
- doc.select_covering(SENT_TYPE, gov_annot) # noqa: F405
657
- )
658
- try:
659
- sent_list2.remove(sent_annot)
660
- except ValueError:
661
- pass
662
- if len(sent_list2) == 0:
663
- continue
664
- sent_annot2 = sent_list2[0]
665
- if sent_annot2 is not sent_annot:
666
- # gov in different sentence, skipping cross-sentence links
667
- continue
668
- if dep_annot not in ne_annot2id:
669
- sent_list2 = list(
670
- doc.select_covering(SENT_TYPE, dep_annot) # noqa: F405
671
- )
672
- try:
673
- sent_list2.remove(sent_annot)
674
- except ValueError:
675
- pass
676
- if len(sent_list2) == 0:
677
- continue
678
- sent_annot2 = sent_list2[0]
679
- if sent_annot2 != sent_annot:
680
- # dep in different sentence, skipping cross-sentence links
681
- continue
682
-
683
- if gov_annot not in ne_annot2id:
684
- if gov_annot.get_feature_value("valueType") == "implicit":
685
- # skip this case, implicit PROPERTY
686
- continue
687
- assert False
688
- if dep_annot not in ne_annot2id:
689
- assert False
690
- rel_labels.append(
691
- {
692
- "ne_id_gov": ne_annot2id[gov_annot],
693
- "ne_id_dep": ne_annot2id[dep_annot],
694
- "label": label,
695
- }
696
- )
697
-
698
- if split_info is None:
699
- split_info = data_split[data_split["name"]
700
- == doc_name]["set"].values[0]
701
- category_info = data_split[data_split["name"] == doc_name][
702
- "category"
703
- ].values[0]
704
-
705
- # Iterator yields data sample sentence-wise
706
- yield doc_name + "/" + str(sent_id), {
707
- "doc_id": doc_name,
708
- "sentence": sent_text,
709
- "tokens": sent_token_list,
710
- "beginOffset": sent_annot.begin,
711
- "endOffset": sent_annot.end,
712
- "AZ_labels": az_labels,
713
- "Measurement_label": sent_meas_label,
714
- "NER_labels": ner_labels,
715
- "NER_labels_BILOU": nested_bilou_labels,
716
- # [(ne_index_in_list, ne_index_in_list, relation_name)]
717
- "relations": rel_labels,
718
- "docFileName": doc_name,
719
- "data_split": split_info,
720
- "category": category_info,
721
- }
722
-
723
- split_info = None
724
- category_info = None
725
- elif self.config.name == "NER_Dependencies":
726
- id: int = 0
727
- sent_id: int = 0
728
- sent_text: str = None
729
- for i, f in enumerate(files):
730
- split_info: str = (
731
- f"train{i+1}" if "train" in f else (
732
- "dev" if "dev" in f else "test")
733
- )
734
- with open(join(dir, f), mode="r", encoding="utf-8") as cf:
735
- conll_lines: list[str] = cf.read().splitlines()
736
- for line in conll_lines:
737
- if line.startswith("#"):
738
- sent_text = line.split("# text = ")[-1]
739
- sent_id += 1
740
- continue
741
- elif line == "":
742
- continue
743
- t_id, t_text, deps = line.split("\t")
744
- yield id, {
745
- "ID": sent_id,
746
- "sentence": sent_text,
747
- "token_id": t_id,
748
- "token_text": t_text,
749
- "NE_Dependencies": deps,
750
- "data_split": split_info,
751
- }
752
- id += 1