# coding=utf-8 # Copyright 2021 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Lint as: python3 """NKJP-POS tagging dataset.""" import json from typing import List, Tuple, Dict, Generator import datasets _DESCRIPTION = """NKJP-POS tagging dataset.""" _URLS = { "train": "https://huggingface.co/datasets/clarin-pl/nkjp-pos/resolve/main/data/train.jsonl", "test": "https://huggingface.co/datasets/clarin-pl/nkjp-pos/resolve/main/data/test.jsonl", } _HOMEPAGE = "http://clip.ipipan.waw.pl/NationalCorpusOfPolish" _POS_TAGS = { "adj", "adja", "adjc", "adjp", "adv", "aglt", "bedzie", "brev", "burk", "comp", "conj", "depr", "fin", "ger", "imps", "impt", "inf", "interj", "interp", "num", "numcol", "pact", "pant", "pcon", "ppas", "ppron12", "ppron3", "praet", "pred", "prep", "qub", "siebie", "subst", "winien", "xxx", } class NKJPPOS(datasets.GeneratorBasedBuilder): VERSION = datasets.Version("1.1.0") def _info(self) -> datasets.DatasetInfo: return datasets.DatasetInfo( description=_DESCRIPTION, features=datasets.Features( { "id": datasets.Value("string"), "tokens": datasets.Sequence(datasets.Value("string")), "pos_tags": datasets.Sequence( datasets.features.ClassLabel( names=list(_POS_TAGS), num_classes=len(_POS_TAGS) ) ), } ), homepage=_HOMEPAGE, version=self.VERSION, ) def _split_generators( self, dl_manager: datasets.DownloadManager ) -> List[datasets.SplitGenerator]: urls_to_download = _URLS downloaded_files = dl_manager.download_and_extract(urls_to_download) return [ datasets.SplitGenerator( name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}, ), datasets.SplitGenerator( name=datasets.Split.TEST, gen_kwargs={"filepath": downloaded_files["test"]}, ), ] @staticmethod def _clean_line(data_line: Dict): new_tokens = [] new_pos_tags = [] for token, pos_tag in zip(data_line["tokens"], data_line["pos_tags"]): if pos_tag in _POS_TAGS: new_tokens.append(token) new_pos_tags.append(pos_tag) data_line["tokens"] = new_tokens data_line["pos_tags"] = new_pos_tags assert len(data_line["tokens"]) == len(data_line["pos_tags"]) return data_line def _generate_examples( self, filepath: str ) -> Generator[Tuple[str, Dict[str, str]], None, None]: with open(filepath, "r", encoding="utf-8") as f: for line in f: data_line = self._clean_line(json.loads(line)) yield data_line["id"], data_line