HisGermaNER / HisGermaNER.py
stefan-it's picture
dataset: add initial version of loader
0de18a0
raw
history blame
3.94 kB
import datasets
# Coming soon!
_CITATION = ""
_DESCRIPTION = """\
HisGermaNER is another NER dataset from historical German newspapers.
In the first release of our dataset, 11 newspapers from 1710 to 1840 from the Austrian National Library (ONB) are selected, resulting in 100 pages.
"""
class HisGermaNERConfig(datasets.BuilderConfig):
"""BuilderConfig for HisGermaNER"""
def __init__(self, data_url, **kwargs):
super(HisGermaNERConfig, self).__init__(**kwargs)
self.data_url = data_url
class HisGermaNER(datasets.GeneratorBasedBuilder):
BUILDER_CONFIGS = [
HisGermaNERConfig(
name="HisGermaNER",
version=datasets.Version("0.0.1"),
description="HisGermaNER Dataset",
data_url="https://huggingface.co/datasets/stefan-it/HisGermaNER/resolve/main/splits/HisGermaNER_v0_",
)
]
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"id": datasets.Value("string"),
"tokens": datasets.Sequence(datasets.Value("string")),
"ner_tags": datasets.Sequence(
datasets.features.ClassLabel(
names=[
"O",
"B-PER",
"I-PER",
"B-ORG",
"I-ORG",
"B-LOC",
"I-LOC",
]
)
)
}
),
supervised_keys=None,
homepage="https://huggingface.co/datasets/stefan-it/HisGermaNER",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns generator for dataset splits."""
download_urls = {
split: self.config.data_url + split + ".tsv" for split in ["train", "dev", "test"]
}
downloaded_files = dl_manager.download_and_extract(download_urls)
splits = [
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}),
datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["dev"]}),
datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": downloaded_files["test"]})
]
return splits
def _generate_examples(self, filepath):
with open(filepath, "rt", encoding="utf-8") as f_p:
current_tokens = []
current_tags = []
sentence_counter = 0
for line in f_p:
line = line.strip()
if not line:
if len(current_tokens) > 0:
sentence = (
sentence_counter, {
"id": str(sentence_counter),
"tokens": current_tokens,
"ner_tags": current_tags,
}
)
sentence_counter += 1
current_tokens = []
current_tags = []
yield sentence
continue
if line.startswith("TOKEN"):
continue
if line.startswith("# "):
continue
token, tag, misc = line.split("\t")
current_tokens.append(token)
current_tags.append(tag)
if len(current_tokens) > 0:
yield sentence_counter, {
"id": str(sentence_counter),
"tokens": current_tokens,
"ner_tags": current_tags,
}