# coding=utf-8 # Copyright 2021 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Lint as: python3 """NKJP-POS tagging dataset.""" import csv from typing import List, Tuple, Dict, Generator import datasets _DESCRIPTION = """NKJP-POS tagging dataset.""" _URLS = { "train": "https://huggingface.co/datasets/clarin-pl/nkjp-pos/resolve/main/data/train.tsv", "validation": "https://huggingface.co/datasets/clarin-pl/nkjp-pos/resolve/main/data/valid.tsv", "test": "https://huggingface.co/datasets/clarin-pl/nkjp-pos/resolve/main/data/test.tsv", } _HOMEPAGE = "http://clip.ipipan.waw.pl/NationalCorpusOfPolish" _POS_TAGS = [ 'adj', 'adja', 'adjc', 'adjp', 'adv', 'aglt', 'bedzie', 'brev', 'burk', 'comp', 'conj', 'depr', 'fin', 'ger', 'imps', 'impt', 'inf', 'interj', 'interp', 'num', 'numcol', 'pact', 'pant', 'pcon', 'ppas', 'ppron12', 'ppron3', 'praet', 'pred', 'prep', 'qub', 'siebie', 'subst', 'winien', 'xxx' ] class NKJPPOS(datasets.GeneratorBasedBuilder): def _info(self) -> datasets.DatasetInfo: return datasets.DatasetInfo( description=_DESCRIPTION, features=datasets.Features( { "tokens": datasets.Sequence(datasets.Value("string")), "morph": datasets.Sequence(datasets.Value("string")), "lemmas": datasets.Sequence(datasets.Value("string")), "pos_tags": datasets.Sequence(datasets.features.ClassLabel( names=_POS_TAGS, num_classes=len(_POS_TAGS) )), "full_pos_tags": datasets.Sequence( datasets.Value("string")), "nps": datasets.Sequence(datasets.Value("string")), } ), homepage=_HOMEPAGE, ) def _split_generators( self, dl_manager: datasets.DownloadManager ) -> List[datasets.SplitGenerator]: urls_to_download = _URLS downloaded_files = dl_manager.download_and_extract(urls_to_download) return [ datasets.SplitGenerator( name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}, ), datasets.SplitGenerator( name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["validation"]}, ), datasets.SplitGenerator( name=datasets.Split.TEST, gen_kwargs={"filepath": downloaded_files["test"]}, ), ] @staticmethod def _parse_tag( tag: str ) -> Tuple[str, str]: full_tag = tag pos_tag = tag.split(':')[0] return pos_tag, full_tag def _generate_examples( self, filepath: str ) -> Generator[Tuple[int, Dict[str, str]], None, None]: with open(filepath, 'r', encoding="utf-8") as f: reader = csv.reader(f, delimiter='\t', quoting=csv.QUOTE_NONE) tokens = [] morph = [] tags = [] full_tags = [] lemma = [] nps = [] gid = 0 for line in reader: if not line: yield gid, { 'tokens': tokens, 'morph': morph, 'pos_tags': tags, 'full_pos_tags': full_tags, 'lemmas': lemma, 'nps': nps } gid += 1 tokens = [] morph = [] tags = [] full_tags = [] lemma = [] nps = [] else: tokens.append(line[0]) morph.append(line[1]) lemma.append(line[3]) nps.append(line[4]) tag, full_tag = self._parse_tag(line[2]) tags.append(tag) full_tags.append(full_tag)