# coding=utf-8 # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor. # # Licensed under the Apache License, Version 2.0 (the 'License'); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an 'AS IS' BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import datasets _CITATION = '' _DESCRIPTION = """SETimes_sr is a Serbian dataset annotated for morphosyntactic information and named entities. The dataset contains 3177 training samples, 395 validation samples and 319 test samples across the respective data splits. Each sample represents a sentence and includes the following features: sentence ID ('sent_id'), sentence text ('text'), list of tokens ('tokens'), list of lemmas ('lemmas'), list of Multext-East tags ('xpos_tags), list of UPOS tags ('upos_tags'), list of morphological features ('feats'), and list of IOB tags ('iob_tags'). The 'upos_tags' and 'iob_tags' features are encoded as class labels. """ _HOMEPAGE = '' _LICENSE = '' _URL = 'https://huggingface.co/datasets/classla/setimes_sr/raw/main/data.zip' _TRAINING_FILE = 'train_ner.conllu' _DEV_FILE = 'dev_ner.conllu' _TEST_FILE = 'test_ner.conllu' class SeTimesSr(datasets.GeneratorBasedBuilder): VERSION = datasets.Version('1.0.0') BUILDER_CONFIGS = [ datasets.BuilderConfig( name='setimes_sr', version=VERSION, description='' ) ] def _info(self): features = datasets.Features( { 'sent_id': datasets.Value('string'), 'text': datasets.Value('string'), 'tokens': datasets.Sequence(datasets.Value('string')), 'lemmas': datasets.Sequence(datasets.Value('string')), 'xpos_tags': datasets.Sequence(datasets.Value('string')), 'upos_tags': datasets.Sequence( datasets.features.ClassLabel( names=[ 'X', 'INTJ', 'VERB', 'PROPN', 'ADV', 'ADJ', 'PUNCT', 'PRON', 'DET', 'NUM', 'SYM', 'SCONJ', 'NOUN', 'AUX', 'PART', 'CCONJ', 'ADP' ] ) ), 'feats': datasets.Sequence(datasets.Value('string')), 'iob_tags': datasets.Sequence( datasets.features.ClassLabel( names=[ 'I-org', 'B-misc', 'B-per', 'B-deriv-per', 'B-org', 'B-loc', 'I-deriv-per', 'I-misc', 'I-loc', 'I-per', 'O' ] ) ) } ) return datasets.DatasetInfo( description=_DESCRIPTION, features=features, supervised_keys=None, homepage=_HOMEPAGE, license=_LICENSE, citation=_CITATION, ) def _split_generators(self, dl_manager): """Returns SplitGenerators.""" data_dir = dl_manager.download_and_extract(_URL) return [ datasets.SplitGenerator( name=datasets.Split.TRAIN, gen_kwargs={ 'filepath': os.path.join(data_dir, _TRAINING_FILE), 'split': 'train'} ), datasets.SplitGenerator( name=datasets.Split.VALIDATION, gen_kwargs={ 'filepath': os.path.join(data_dir, _DEV_FILE), 'split': 'dev'} ), datasets.SplitGenerator( name=datasets.Split.TEST, gen_kwargs={ 'filepath': os.path.join(data_dir, _TEST_FILE), 'split': 'test'} ), ] def _generate_examples(self, filepath, split): with open(filepath, encoding='utf-8') as f: sent_id = '' text = '' tokens = [] lemmas = [] xpos_tags = [] upos_tags = [] feats = [] iob_tags = [] data_id = 0 for line in f: if line and not line == '\n': if line.startswith('#'): if line.startswith('# sent_id'): if tokens: yield data_id, { 'sent_id': sent_id, 'text': text, 'tokens': tokens, 'lemmas': lemmas, 'xpos_tags': xpos_tags, 'upos_tags': upos_tags, 'feats': feats, 'iob_tags': iob_tags } tokens = [] lemmas = [] xpos_tags = [] upos_tags = [] feats = [] iob_tags = [] data_id += 1 sent_id = line.split(' = ')[1].strip() elif line.startswith('# text'): text = line.split(' = ')[1].strip() elif not line.startswith('_'): splits = line.split('\t') tokens.append(splits[1].strip()) lemmas.append(splits[2].strip()) xpos_tags.append(splits[3].strip()) upos_tags.append(splits[4].strip()) feats.append(splits[5].strip()) iob_tags.append(splits[9].strip()) yield data_id, { 'sent_id': sent_id, 'text': text, 'tokens': tokens, 'lemmas': lemmas, 'xpos_tags': xpos_tags, 'upos_tags': upos_tags, 'feats': feats, 'iob_tags': iob_tags }