Datasets:
Tasks:
Token Classification
Sub-tasks:
named-entity-recognition
Languages:
German
Size:
n<1K
License:
File size: 6,453 Bytes
0211ef3 c35074d 0211ef3 4234207 0211ef3 c35074d 0211ef3 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 |
import os
import warnings
import datasets
_DESCRIPTION = """\
"""
_HOMEPAGE_URL = "https://github.com/elenanereiss/Legal-Entity-Recognition"
_CITATION = """\
@inproceedings{leitner2019fine,
author = {Elena Leitner and Georg Rehm and Julian Moreno-Schneider},
title = {{Fine-grained Named Entity Recognition in Legal Documents}},
booktitle = {Semantic Systems. The Power of AI and Knowledge
Graphs. Proceedings of the 15th International Conference
(SEMANTiCS 2019)},
year = 2019,
editor = {Maribel Acosta and Philippe Cudré-Mauroux and Maria
Maleshkova and Tassilo Pellegrini and Harald Sack and York
Sure-Vetter},
keywords = {aip},
publisher = {Springer},
series = {Lecture Notes in Computer Science},
number = {11702},
address = {Karlsruhe, Germany},
month = 9,
note = {10/11 September 2019},
pages = {272--287},
pdf = {https://link.springer.com/content/pdf/10.1007%2F978-3-030-33220-4_20.pdf}}
"""
_DATA_URL = "https://raw.githubusercontent.com/elenanereiss/Legal-Entity-Recognition/master/data/dataset_courts.zip"
_VERSION = "1.0.0"
_COURTS = ["bag", "bfh", "bgh", "bpatg", "bsg", "bverfg", "bverwg"]
_COURTS_FILEPATHS = {court: f"{court}.conll" for court in _COURTS}
_ALL = "all"
class GermanLegalEntityRecognitionConfig(datasets.BuilderConfig):
def __init__(self, *args, courts=None, **kwargs):
super().__init__(*args, version=datasets.Version(_VERSION, ""), **kwargs)
self.courts = courts
@property
def filepaths(self):
return [_COURTS_FILEPATHS[court] for court in self.courts]
class GermanLegalEntityRecognition(datasets.GeneratorBasedBuilder):
BUILDER_CONFIGS = [
GermanLegalEntityRecognitionConfig(name=court, courts=[court], description=f"Court. {court}.")
for court in _COURTS
] + [GermanLegalEntityRecognitionConfig(name=_ALL, courts=_COURTS, description="All courts included.")]
BUILDER_CONFIG_CLASS = GermanLegalEntityRecognitionConfig
DEFAULT_CONFIG_NAME = _ALL
def _info(self):
warnings.warn(
"Dataset 'german_legal_entity_recognition' is deprecated and will be deleted. Use 'elenanereiss/german-ler' instead.",
FutureWarning,
)
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"id": datasets.Value("string"),
"tokens": datasets.Sequence(datasets.Value("string")),
"ner_tags": datasets.Sequence(
datasets.features.ClassLabel(
names=[
"B-AN",
"B-EUN",
"B-GRT",
"B-GS",
"B-INN",
"B-LD",
"B-LDS",
"B-LIT",
"B-MRK",
"B-ORG",
"B-PER",
"B-RR",
"B-RS",
"B-ST",
"B-STR",
"B-UN",
"B-VO",
"B-VS",
"B-VT",
"I-AN",
"I-EUN",
"I-GRT",
"I-GS",
"I-INN",
"I-LD",
"I-LDS",
"I-LIT",
"I-MRK",
"I-ORG",
"I-PER",
"I-RR",
"I-RS",
"I-ST",
"I-STR",
"I-UN",
"I-VO",
"I-VS",
"I-VT",
"O",
]
)
),
},
),
supervised_keys=None,
homepage=_HOMEPAGE_URL,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
path = dl_manager.download_and_extract(_DATA_URL)
return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"datapath": path})]
def _generate_examples(self, datapath):
sentence_counter = 0
for filepath in self.config.filepaths:
filepath = os.path.join(datapath, filepath)
with open(filepath, encoding="utf-8") as f:
current_words = []
current_labels = []
for row in f:
row = row.rstrip()
row_split = row.split()
if len(row_split) == 2:
token, label = row_split
current_words.append(token)
current_labels.append(label)
else:
if not current_words:
continue
assert len(current_words) == len(current_labels), "word len doesnt match label length"
sentence = (
sentence_counter,
{
"id": str(sentence_counter),
"tokens": current_words,
"ner_tags": current_labels,
},
)
sentence_counter += 1
current_words = []
current_labels = []
yield sentence
# if something remains:
if current_words:
sentence = (
sentence_counter,
{
"id": str(sentence_counter),
"tokens": current_words,
"ner_tags": current_labels,
},
)
yield sentence
|