|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
"""NKJP-POS tagging dataset.""" |
|
|
|
import csv |
|
from typing import List, Tuple, Dict, Generator |
|
|
|
import datasets |
|
|
|
_DESCRIPTION = """NKJP-POS tagging dataset.""" |
|
|
|
_URLS = { |
|
"train": "https://huggingface.co/datasets/clarin-pl/nkjp-pos/resolve/main/data/train.tsv", |
|
"validation": "https://huggingface.co/datasets/clarin-pl/nkjp-pos/resolve/main/data/valid.tsv", |
|
"test": "https://huggingface.co/datasets/clarin-pl/nkjp-pos/resolve/main/data/test.tsv", |
|
} |
|
|
|
_HOMEPAGE = "http://clip.ipipan.waw.pl/NationalCorpusOfPolish" |
|
|
|
_POS_TAGS = [ |
|
'adj', |
|
'adja', |
|
'adjc', |
|
'adjp', |
|
'adv', |
|
'aglt', |
|
'bedzie', |
|
'brev', |
|
'burk', |
|
'comp', |
|
'conj', |
|
'depr', |
|
'fin', |
|
'ger', |
|
'imps', |
|
'impt', |
|
'inf', |
|
'interj', |
|
'interp', |
|
'num', |
|
'numcol', |
|
'pact', |
|
'pant', |
|
'pcon', |
|
'ppas', |
|
'ppron12', |
|
'ppron3', |
|
'praet', |
|
'pred', |
|
'prep', |
|
'qub', |
|
'siebie', |
|
'subst', |
|
'winien', |
|
'xxx' |
|
] |
|
|
|
|
|
class NKJPPOS(datasets.GeneratorBasedBuilder): |
|
|
|
def _info(self) -> datasets.DatasetInfo: |
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=datasets.Features( |
|
{ |
|
"tokens": datasets.Sequence(datasets.Value("string")), |
|
"morph": datasets.Sequence(datasets.Value("string")), |
|
"lemmas": datasets.Sequence(datasets.Value("string")), |
|
"pos_tags": datasets.Sequence(datasets.features.ClassLabel( |
|
names=_POS_TAGS, |
|
num_classes=len(_POS_TAGS) |
|
)), |
|
"full_pos_tags": datasets.Sequence( |
|
datasets.Value("string")), |
|
"nps": datasets.Sequence(datasets.Value("string")), |
|
} |
|
), |
|
homepage=_HOMEPAGE, |
|
) |
|
|
|
def _split_generators( |
|
self, dl_manager: datasets.DownloadManager |
|
) -> List[datasets.SplitGenerator]: |
|
urls_to_download = _URLS |
|
downloaded_files = dl_manager.download_and_extract(urls_to_download) |
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
gen_kwargs={"filepath": downloaded_files["train"]}, |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.VALIDATION, |
|
gen_kwargs={"filepath": downloaded_files["validation"]}, |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TEST, |
|
gen_kwargs={"filepath": downloaded_files["test"]}, |
|
), |
|
] |
|
|
|
@staticmethod |
|
def _parse_tag( |
|
tag: str |
|
) -> Tuple[str, str]: |
|
full_tag = tag |
|
pos_tag = tag.split(':')[0] |
|
|
|
return pos_tag, full_tag |
|
|
|
def _generate_examples( |
|
self, filepath: str |
|
) -> Generator[Tuple[int, Dict[str, str]], None, None]: |
|
with open(filepath, 'r', encoding="utf-8") as f: |
|
reader = csv.reader(f, delimiter='\t', quoting=csv.QUOTE_NONE) |
|
|
|
tokens = [] |
|
morph = [] |
|
tags = [] |
|
full_tags = [] |
|
lemma = [] |
|
nps = [] |
|
gid = 0 |
|
|
|
for line in reader: |
|
if not line: |
|
yield gid, { |
|
'tokens': tokens, |
|
'morph': morph, |
|
'pos_tags': tags, |
|
'full_pos_tags': full_tags, |
|
'lemmas': lemma, |
|
'nps': nps |
|
} |
|
gid += 1 |
|
tokens = [] |
|
morph = [] |
|
tags = [] |
|
full_tags = [] |
|
lemma = [] |
|
nps = [] |
|
|
|
else: |
|
tokens.append(line[0]) |
|
morph.append(line[1]) |
|
lemma.append(line[3]) |
|
nps.append(line[4]) |
|
tag, full_tag = self._parse_tag(line[2]) |
|
|
|
tags.append(tag) |
|
full_tags.append(full_tag) |
|
|