Datasets:
Tasks:
Token Classification
Modalities:
Text
Sub-tasks:
named-entity-recognition
Languages:
Polish
Size:
10K - 100K
License:
# coding=utf-8 | |
"""KPWR version 1.27 dataset.""" | |
import csv | |
import datasets | |
_DESCRIPTION = "KPWR version 1.27 dataset." | |
_URLS = { | |
"train": "https://huggingface.co/datasets/clarin-knext/kpwr/resolve/main/data/train.iob", | |
"valid": "https://huggingface.co/datasets/clarin-knext/kpwr/resolve/main/data/valid.iob", | |
"test": "https://huggingface.co/datasets/clarin-knext/kpwr/resolve/main/data/test.iob", | |
} | |
_HOMEPAGE = "https://clarin-pl.eu/dspace/handle/11321/270" | |
with open('data/n82_tagset.txt', 'r') as fin: | |
_N82_TAGS = fin.read().split('\n') | |
_NER_IOB_TAGS = ['O'] | |
for tag in _N82_TAGS: | |
_NER_IOB_TAGS.extend([f'B-{tag}', f'I-{tag}']) | |
class KpwrDataset(datasets.GeneratorBasedBuilder): | |
def _info(self) -> datasets.DatasetInfo: | |
return datasets.DatasetInfo( | |
description=_DESCRIPTION, | |
features=datasets.Features( | |
{ | |
"tokens": datasets.Sequence(datasets.Value('string')), | |
"lemmas": datasets.Sequence(datasets.Value('string')), | |
"mstags": datasets.Sequence(datasets.Value('string')), | |
"ner": datasets.Sequence(datasets.features.ClassLabel(names=_NER_IOB_TAGS)) | |
} | |
), | |
homepage=_HOMEPAGE | |
) | |
def _split_generators(self, dl_manager: datasets.DownloadManager): | |
downloaded_files = dl_manager.download_and_extract(_URLS) | |
return [ | |
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={'filepath': downloaded_files['train']}), | |
datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={'filepath': downloaded_files['valid']}), | |
datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={'filepath': downloaded_files['test']}) | |
] | |
def _generate_examples(self, filepath: str): | |
with open(filepath, 'r', encoding='utf-8') as fin: | |
reader = csv.reader(fin, delimiter='\t', quoting=csv.QUOTE_NONE) | |
tokens = [] | |
lemmas = [] | |
mstags = [] | |
ner = [] | |
gid = 0 | |
for line in reader: | |
if not line: | |
yield gid, { | |
"tokens": tokens, | |
"lemmas": lemmas, | |
"mstags": mstags, | |
"ner": ner | |
} | |
gid += 1 | |
tokens = [] | |
lemmas = [] | |
mstags = [] | |
ner = [] | |
elif len(line) == 1: # ignore --DOCSTART lines | |
continue | |
else: | |
tokens.append(line[0]) | |
lemmas.append(line[1]) | |
mstags.append(line[2]) | |
ner.append(line[3]) | |