File size: 9,718 Bytes
74b4882 2d9edd3 cc9f1ec 74b4882 f4ff05c 982d568 74b4882 cc9f1ec f4ff05c 74b4882 f4ff05c 74b4882 f4ff05c 982d568 74b4882 982d568 74b4882 982d568 f4ff05c 74b4882 f4ff05c 74b4882 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 |
import logging
from typing import Sequence, Set, Tuple, Union
import networkx as nx
from pie_modules.document.processing import (
RegexPartitioner,
RelationArgumentSorter,
TextSpanTrimmer,
)
from pytorch_ie.annotations import BinaryRelation, LabeledMultiSpan, LabeledSpan
from pytorch_ie.core import Document
from pytorch_ie.documents import (
TextDocumentWithLabeledSpansAndBinaryRelations,
TextDocumentWithLabeledSpansBinaryRelationsAndLabeledPartitions,
)
from pie_datasets.builders import BratBuilder, BratConfig
from pie_datasets.builders.brat import BratDocumentWithMergedSpans
from pie_datasets.core.dataset import DocumentConvertersType
from pie_datasets.document.processing import Caster, Pipeline
URL = "http://data.dws.informatik.uni-mannheim.de/sci-arg/compiled_corpus.zip"
SPLIT_PATHS = {"train": "compiled_corpus"}
logger = logging.getLogger(__name__)
def _merge_spans_via_relation(
spans: Sequence[LabeledSpan],
relations: Sequence[BinaryRelation],
link_relation_label: str,
create_multi_spans: bool = True,
) -> Tuple[Union[Set[LabeledSpan], Set[LabeledMultiSpan]], Set[BinaryRelation]]:
# convert list of relations to a graph to easily calculate connected components to merge
g = nx.Graph()
link_relations = []
other_relations = []
for rel in relations:
if rel.label == link_relation_label:
link_relations.append(rel)
# never merge spans that have not the same label
if (
not (isinstance(rel.head, LabeledSpan) or isinstance(rel.tail, LabeledSpan))
or rel.head.label == rel.tail.label
):
g.add_edge(rel.head, rel.tail)
else:
logger.debug(
f"spans to merge do not have the same label, do not merge them: {rel.head}, {rel.tail}"
)
else:
other_relations.append(rel)
span_mapping = {}
connected_components: Set[LabeledSpan]
for connected_components in nx.connected_components(g):
# all spans in a connected component have the same label
label = list(span.label for span in connected_components)[0]
connected_components_sorted = sorted(connected_components, key=lambda span: span.start)
if create_multi_spans:
new_span = LabeledMultiSpan(
slices=tuple((span.start, span.end) for span in connected_components_sorted),
label=label,
)
else:
new_span = LabeledSpan(
start=min(span.start for span in connected_components_sorted),
end=max(span.end for span in connected_components_sorted),
label=label,
)
for span in connected_components_sorted:
span_mapping[span] = new_span
for span in spans:
if span not in span_mapping:
if create_multi_spans:
span_mapping[span] = LabeledMultiSpan(
slices=((span.start, span.end),), label=span.label, score=span.score
)
else:
span_mapping[span] = LabeledSpan(
start=span.start, end=span.end, label=span.label, score=span.score
)
new_spans = set(span_mapping.values())
new_relations = {
BinaryRelation(
head=span_mapping[rel.head],
tail=span_mapping[rel.tail],
label=rel.label,
score=rel.score,
)
for rel in other_relations
}
return new_spans, new_relations
class SpansWithRelationsMerger:
"""Merge spans that are connected via a specific relation type.
Args:
relation_layer: The name of the layer that contains the relations.
link_relation_label: The label of the relations that connect the spans.
create_multi_spans: If True, the merged spans are LabeledMultiSpans, otherwise LabeledSpans.
"""
def __init__(
self,
relation_layer: str,
link_relation_label: str,
result_document_type: type[Document],
result_field_mapping: dict[str, str],
create_multi_spans: bool = True,
):
self.relation_layer = relation_layer
self.link_relation_label = link_relation_label
self.create_multi_spans = create_multi_spans
self.result_document_type = result_document_type
self.result_field_mapping = result_field_mapping
def __call__(self, document: Document) -> Document:
relations: Sequence[BinaryRelation] = document[self.relation_layer]
spans: Sequence[LabeledSpan] = document[self.relation_layer].target_layer
new_spans, new_relations = _merge_spans_via_relation(
spans=spans,
relations=relations,
link_relation_label=self.link_relation_label,
create_multi_spans=self.create_multi_spans,
)
result = document.copy(with_annotations=False).as_type(new_type=self.result_document_type)
span_layer_name = document[self.relation_layer].target_name
result_span_layer_name = self.result_field_mapping[span_layer_name]
result_relation_layer_name = self.result_field_mapping[self.relation_layer]
result[result_span_layer_name].extend(new_spans)
result[result_relation_layer_name].extend(new_relations)
return result
def get_common_pipeline_steps(target_document_type: type[Document]) -> dict:
return dict(
cast=Caster(
document_type=target_document_type,
field_mapping={"spans": "labeled_spans", "relations": "binary_relations"},
),
trim_adus=TextSpanTrimmer(layer="labeled_spans"),
sort_symmetric_relation_arguments=RelationArgumentSorter(
relation_layer="binary_relations",
label_whitelist=["parts_of_same", "semantically_same"],
),
)
def get_common_pipeline_steps_with_merge_multi_spans(
target_document_type: type[Document],
) -> dict:
return dict(
merge_spans=SpansWithRelationsMerger(
relation_layer="relations",
link_relation_label="parts_of_same",
create_multi_spans=False,
result_document_type=target_document_type,
result_field_mapping={"spans": "labeled_spans", "relations": "binary_relations"},
),
trim_adus=TextSpanTrimmer(layer="labeled_spans"),
sort_symmetric_relation_arguments=RelationArgumentSorter(
relation_layer="binary_relations",
label_whitelist=["parts_of_same", "semantically_same"],
),
)
class SciArgConfig(BratConfig):
def __init__(
self,
name: str,
resolve_parts_of_same: bool = False,
**kwargs,
):
super().__init__(name=name, merge_fragmented_spans=True, **kwargs)
self.resolve_parts_of_same = resolve_parts_of_same
class SciArg(BratBuilder):
BASE_DATASET_PATH = "DFKI-SLT/brat"
BASE_DATASET_REVISION = "844de61e8a00dc6a93fc29dc185f6e617131fbf1"
# Overwrite the default config to merge the span fragments.
# The span fragments in SciArg come just from the new line splits, so we can merge them.
# Actual span fragments are annotated via "parts_of_same" relations.
BUILDER_CONFIGS = [
SciArgConfig(name=BratBuilder.DEFAULT_CONFIG_NAME),
SciArgConfig(name="resolve_parts_of_same", resolve_parts_of_same=True),
]
DOCUMENT_TYPES = {
BratBuilder.DEFAULT_CONFIG_NAME: BratDocumentWithMergedSpans,
"resolve_parts_of_same": BratDocumentWithMergedSpans,
}
# we need to add None to the list of dataset variants to support the default dataset variant
BASE_BUILDER_KWARGS_DICT = {
dataset_variant: {"url": URL, "split_paths": SPLIT_PATHS}
for dataset_variant in ["default", "resolve_parts_of_same", None]
}
@property
def document_converters(self) -> DocumentConvertersType:
regex_partitioner = RegexPartitioner(
partition_layer_name="labeled_partitions",
pattern="<([^>/]+)>.*</\\1>",
label_group_id=1,
label_whitelist=["Title", "Abstract", "H1"],
skip_initial_partition=True,
strip_whitespace=True,
)
if not self.config.resolve_parts_of_same:
return {
TextDocumentWithLabeledSpansAndBinaryRelations: Pipeline(
**get_common_pipeline_steps(TextDocumentWithLabeledSpansAndBinaryRelations)
),
TextDocumentWithLabeledSpansBinaryRelationsAndLabeledPartitions: Pipeline(
**get_common_pipeline_steps(
TextDocumentWithLabeledSpansBinaryRelationsAndLabeledPartitions
),
add_partitions=regex_partitioner,
),
}
else:
return {
TextDocumentWithLabeledSpansAndBinaryRelations: Pipeline(
**get_common_pipeline_steps_with_merge_multi_spans(
TextDocumentWithLabeledSpansAndBinaryRelations
)
),
TextDocumentWithLabeledSpansBinaryRelationsAndLabeledPartitions: Pipeline(
**get_common_pipeline_steps_with_merge_multi_spans(
TextDocumentWithLabeledSpansBinaryRelationsAndLabeledPartitions
),
add_partitions=regex_partitioner,
),
# TODO: add TextDocumentWithLabeledMultiSpansAndBinaryRelations
# TODO: add TextDocumentWithLabeledMultiSpansBinaryRelationsAndLabeledPartitions
}
|