german_legal_sentences / german_legal_sentences.py
1
import random
2
3
from pathlib import Path
4
import datasets
5
from datasets import Value, Sequence, ClassLabel, Features
6
7
_CITATION = """\
8
coming soon
9
"""
10
11
_DESCRIPTION = """\
12
German Legal Sentences (GLS) is an automatically generated training dataset for semantic sentence 
13
matching in the domain in german legal documents. It follows the concept of weak supervision, where 
14
imperfect labels are generated using multiple heuristics. For this purpose we use a combination of 
15
legal citation matching and BM25 similarity. The contained sentences and their citations are parsed 
16
from real judicial decisions provided by [Open Legal Data](http://openlegaldata.io/)
17
"""
18
19
_VERSION = "0.0.2"
20
_DATA_URL = f"http://lavis.cs.hs-rm.de/storage/german-legal-sentences/GermanLegalSentences_v{_VERSION}.zip"
21
22
23
class GLSConfig(datasets.BuilderConfig):
24
    """BuilderConfig."""
25
26
    def __init__(
27
        self,
28
        load_collection,
29
        load_es_neighbors=None,
30
        n_es_neighbors=None,
31
        **kwargs,
32
    ):
33
        """BuilderConfig.
34
        Args:
35
          **kwargs: keyword arguments forwarded to super.
36
        """
37
        super(GLSConfig, self).__init__(**kwargs)
38
        self.load_collection = load_collection
39
        self.load_es_neighbors = load_es_neighbors
40
        self.n_es_neighbors = n_es_neighbors
41
42
43
class GermanLegalSentences(datasets.GeneratorBasedBuilder):
44
    BUILDER_CONFIGS = [
45
        GLSConfig(
46
            name="sentences",
47
            load_es_neighbors=False,
48
            load_collection=False,
49
            version=datasets.Version(_VERSION, ""),
50
            description="Just the sentences and their masked references",
51
        ),
52
        GLSConfig(
53
            name="pairs",
54
            load_es_neighbors=False,
55
            load_collection=True,
56
            version=datasets.Version(_VERSION, ""),
57
            description="Sentence pairs sharing references",
58
        ),
59
        GLSConfig(
60
            name="pairs+es",
61
            load_es_neighbors=True,
62
            load_collection=True,
63
            n_es_neighbors=5,
64
            version=datasets.Version(_VERSION, ""),
65
            description="Sentence pairs sharing references plus ES neighbors",
66
        ),
67
    ]
68
69
    def _features(self):
70
        if self.config.name == "sentences":
71
            return datasets.Features(
72
                {
73
                    "sent_id": Value("uint32"),
74
                    "doc_id": Value("uint32"),
75
                    "text": Value("string"),
76
                    "references": Sequence(
77
                        {
78
                            "ref_id": Value("uint32"),
79
                            "name": Value("string"),
80
                            "type": ClassLabel(names=["AZ", "LAW"]),
81
                        }
82
                    ),
83
                }
84
            )
85
        elif self.config.name == "pairs":
86
            return Features(
87
                {
88
                    "query.sent_id": Value("uint32"),
89
                    "query.doc_id": Value("uint32"),
90
                    "query.text": Value("string"),
91
                    "query.ref_ids": Sequence(Value("uint32")),
92
                    "related.sent_id": Value("uint32"),
93
                    "related.doc_id": Value("uint32"),
94
                    "related.text": Value("string"),
95
                    "related.ref_ids": Sequence(Value("uint32")),
96
                }
97
            )
98
        elif self.config.name == "pairs+es":
99
            return Features(
100
                {
101
                    "query.sent_id": Value("uint32"),
102
                    "query.doc_id": Value("uint32"),
103
                    "query.text": Value("string"),
104
                    "query.ref_ids": Sequence(Value("uint32")),
105
                    "related.sent_id": Value("uint32"),
106
                    "related.doc_id": Value("uint32"),
107
                    "related.text": Value("string"),
108
                    "related.ref_ids": Sequence(Value("uint32")),
109
                    "es_neighbors.text": Sequence(Value("string")),
110
                    "es_neighbors.sent_id": Sequence(Value("uint32")),
111
                    "es_neighbors.doc_id": Sequence(Value("uint32")),
112
                    "es_neighbors.ref_ids": Sequence(
113
                        Sequence(datasets.Value("uint32"))
114
                    ),
115
                }
116
            )
117
        assert True
118
119
    def _info(self):
120
        return datasets.DatasetInfo(
121
            description=_DESCRIPTION,
122
            features=self._features(),
123
            supervised_keys=None,
124
            homepage="",
125
            citation=_CITATION,
126
        )
127
128
    def _split_generators(self, dl_manager):
129
        if dl_manager.manual_dir:
130
            data_dir = Path(dl_manager.manual_dir)
131
        else:
132
            data_dir = Path(dl_manager.download_and_extract(_DATA_URL))
133
        collection = _load_collection(data_dir) if self.config.load_collection else None
134
        sent_ref_map = _load_sent_references(data_dir)
135
        references = (
136
            _load_reference_info(data_dir) if self.config.name == "sentences" else None
137
        )
138
        es_neighbors = (
139
            _load_es_neighbors(data_dir) if self.config.load_es_neighbors else None
140
        )
141
142
        gen_kwargs = dict()
143
        for split in ("train", "valid", "test"):
144
            gen_kwargs[split] = {
145
                "collection": collection,
146
                "pair_id_file": data_dir / f"{split}.pairs.tsv",
147
                "sentence_file": data_dir / f"{split}.sentences.tsv",
148
                "references": references,
149
                "sent_ref_map": sent_ref_map,
150
                "es_neighbors": es_neighbors,
151
            }
152
        return [
153
            datasets.SplitGenerator(
154
                name=datasets.Split.TRAIN, gen_kwargs=gen_kwargs["train"]
155
            ),
156
            datasets.SplitGenerator(
157
                name=datasets.Split.VALIDATION, gen_kwargs=gen_kwargs["valid"]
158
            ),
159
            datasets.SplitGenerator(
160
                name=datasets.Split.TEST, gen_kwargs=gen_kwargs["test"]
161
            ),
162
        ]
163
164
    def _generate_examples(self, **kwargs):
165
        if self.config.name.startswith("pairs"):
166
            yield from self._generate_pairs(**kwargs)
167
        elif self.config.name == "sentences":
168
            yield from self._generate_sentences(**kwargs)
169
        else:
170
            assert True
171
172
    def _generate_pairs(
173
        self, pair_id_file, collection, sent_ref_map, es_neighbors, **kwargs
174
    ):
175
        random.seed(17)
176
        with open(pair_id_file, encoding="utf-8") as r:
177
            idx = 0
178
            for line in r:
179
                stripped = line.rstrip()
180
                if stripped:
181
                    a, b = stripped.split("\t")
182
                    features = {
183
                        "query.sent_id": int(a),
184
                        "query.doc_id": int(collection[a]["doc_id"]),
185
                        "query.text": collection[a]["text"],
186
                        "query.ref_ids": sent_ref_map[a],
187
                        "related.sent_id": int(b),
188
                        "related.doc_id": int(collection[b]["doc_id"]),
189
                        "related.text": collection[b]["text"],
190
                        "related.ref_ids": sent_ref_map[b],
191
                    }
192
                    if self.config.name == "pairs+es":
193
                        curr_es_neighbors = es_neighbors.get(a) or []
194
                        if len(curr_es_neighbors) < self.config.n_es_neighbors:
195
                            continue
196
197
                        es_sent_ids = random.sample(
198
                            curr_es_neighbors, k=self.config.n_es_neighbors
199
                        )
200
                        additional_features = {
201
                            "es_neighbors.sent_id": [int(i) for i in es_sent_ids],
202
                            "es_neighbors.doc_id": [
203
                                int(collection[i]["doc_id"]) for i in es_sent_ids
204
                            ],
205
                            "es_neighbors.text": [
206
                                collection[i]["text"] for i in es_sent_ids
207
                            ],
208
                            "es_neighbors.ref_ids": [
209
                                sent_ref_map[i] for i in es_sent_ids
210
                            ],
211
                        }
212
                        features.update(additional_features)
213
                    yield idx, features
214
                    idx += 1
215
216
    def _generate_sentences(
217
        self,
218
        sentence_file,
219
        references,
220
        sent_ref_map,
221
        **kwargs,
222
    ):
223
        with open(sentence_file, encoding="utf-8") as r:
224
            for idx, line in enumerate(r):
225
                stripped = line.rstrip()
226
                if stripped == "":
227
                    continue
228
                s_id, doc_id, text = stripped.split("\t", maxsplit=2)
229
                yield idx, {
230
                    "sent_id": int(s_id),
231
                    "doc_id": int(doc_id),
232
                    "text": text,
233
                    "references": [
234
                        {
235
                            "ref_id": int(r_id),
236
                            "name": references[r_id][1],
237
                            "type": references[r_id][0],
238
                        }
239
                        for r_id in sent_ref_map[s_id]
240
                    ],
241
                }
242
243
244
def _load_collection(data_dir):
245
    collection = dict()
246
    for split in ("train", "valid", "test"):
247
        with open(data_dir / f"{split}.sentences.tsv", encoding="utf-8") as r:
248
            for line in r:
249
                s_id, d_id, sent = line.strip().split("\t", maxsplit=2)
250
                collection[s_id] = {"doc_id": d_id, "text": sent}
251
    return collection
252
253
254
def _load_reference_info(data_dir):
255
    with open(data_dir / "refs.tsv", encoding="utf-8") as r:
256
        references = {
257
            r_id: (r_type, r_name.rstrip())
258
            for r_id, r_type, r_name in (
259
                line.split("\t", maxsplit=2) for line in r if len(line) > 2
260
            )
261
        }
262
263
    return references
264
265
266
def _load_sent_references(data_dir):
267
    with open(data_dir / "sent_ref_map.tsv", encoding="utf-8") as r:
268
        sent_ref_map = {
269
            s_id: r_ids.rstrip().split()
270
            for s_id, r_ids in (
271
                line.split("\t", maxsplit=1) for line in r if len(line) > 2
272
            )
273
        }
274
    return sent_ref_map
275
276
277
def _load_es_neighbors(data_dir):
278
    with open(data_dir / "es_neighbors.tsv", encoding="utf-8") as r:
279
        es_neighbors = {
280
            s_id: other_s_ids.rstrip().split()
281
            for s_id, other_s_ids in (
282
                line.split("\t", maxsplit=1) for line in r if len(line) > 2
283
            )
284
        }
285
    return es_neighbors
286