# coding=utf-8 # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor. # # Licensed under the Apache License, Version 2.0 (the 'License'); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an 'AS IS' BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import datasets _CITATION = '' _DESCRIPTION = """SETimes_sr is a Serbian dataset annotated for morphosyntactic information and named entities. The dataset contains 3177 training samples, 395 validation samples and 319 test samples across the respective data splits. Each sample represents a sentence and includes the following features: sentence ID ('sent_id'), sentence text ('text'), list of tokens ('tokens'), list of lemmas ('lemmas'), list of Multext-East tags ('xpos_tags), list of UPOS tags ('upos_tags'), list of morphological features ('feats'), and list of IOB tags ('iob_tags'). The 'upos_tags' and 'iob_tags' features are encoded as class labels. """ _HOMEPAGE = '' _LICENSE = '' _URLs = { 'ner': 'https://huggingface.co/datasets/classla/setimes_sr/raw/main/data_ner.zip', 'upos': 'https://huggingface.co/datasets/classla/setimes_sr/raw/main/data_ner.zip', 'ud': 'https://huggingface.co/datasets/classla/setimes_sr/raw/main/data_ud.zip' } _DATA_DIRS = { 'ner': 'data_ner', 'upos': 'data_ner', 'ud': 'data_ud' } class SeTimesSr(datasets.GeneratorBasedBuilder): VERSION = datasets.Version('1.0.1') BUILDER_CONFIGS = [ datasets.BuilderConfig( name='upos', version=VERSION, description='' ), datasets.BuilderConfig( name='ner', version=VERSION, description='' ), datasets.BuilderConfig( name='ud', version=VERSION, description='' ) ] DEFAULT_CONFIG_NAME = 'ner' def _info(self): if self.config.name == "upos": features = datasets.Features( { 'sent_id': datasets.Value('string'), 'text': datasets.Value('string'), 'tokens': datasets.Sequence(datasets.Value('string')), 'lemmas': datasets.Sequence(datasets.Value('string')), 'xpos_tags': datasets.Sequence(datasets.Value('string')), 'upos_tags': datasets.Sequence( datasets.features.ClassLabel( names=[ 'X', 'INTJ', 'VERB', 'PROPN', 'ADV', 'ADJ', 'PUNCT', 'PRON', 'DET', 'NUM', 'SYM', 'SCONJ', 'NOUN', 'AUX', 'PART', 'CCONJ', 'ADP' ] ) ), 'feats': datasets.Sequence(datasets.Value('string')), 'iob_tags': datasets.Sequence(datasets.Value('string')) } ) elif self.config.name == "ner": features = datasets.Features( { 'sent_id': datasets.Value('string'), 'text': datasets.Value('string'), 'tokens': datasets.Sequence(datasets.Value('string')), 'lemmas': datasets.Sequence(datasets.Value('string')), 'xpos_tags': datasets.Sequence(datasets.Value('string')), 'upos_tags': datasets.Sequence(datasets.Value('string')), 'feats': datasets.Sequence(datasets.Value('string')), 'iob_tags': datasets.Sequence( datasets.features.ClassLabel( names=[ 'I-org', 'B-misc', 'B-per', 'B-deriv-per', 'B-org', 'B-loc', 'I-deriv-per', 'I-misc', 'I-loc', 'I-per', 'O' ] ) ) } ) else: features = datasets.Features( { 'sent_id': datasets.Value('string'), 'text': datasets.Value('string'), 'tokens': datasets.Sequence(datasets.Value('string')), 'lemmas': datasets.Sequence(datasets.Value('string')), 'xpos_tags': datasets.Sequence(datasets.Value('string')), 'upos_tags': datasets.Sequence(datasets.Value('string')), 'feats': datasets.Sequence(datasets.Value('string')), 'iob_tags': datasets.Sequence(datasets.Value('string')), 'uds': datasets.Sequence( datasets.features.ClassLabel( names=[ 'punct', 'advmod', 'conj', 'aux', 'iobj', 'acl', 'fixed', 'vocative', 'root', 'nsubj', 'goeswith', 'cop', 'det', 'discourse', 'det_numgov', 'dep', 'ccomp', 'flat', 'compound', 'orphan', 'list', 'advcl', 'csubj', 'nummod_gov', 'case', 'obl', 'parataxis', 'amod', 'obj', 'cc', 'nmod', 'xcomp', 'appos', 'nummod', 'mark' ] ) ) } ) return datasets.DatasetInfo( description=_DESCRIPTION, features=features, supervised_keys=None, homepage=_HOMEPAGE, license=_LICENSE, citation=_CITATION, ) def _split_generators(self, dl_manager): """Returns SplitGenerators.""" data_dir = os.path.join(dl_manager.download_and_extract(_URLs[self.config.name]), _DATA_DIRS[self.config.name]) if self.config.name == 'ud': training_file = 'train_ner_ud.conllup' dev_file = 'dev_ner_ud.conllup' test_file = 'test_ner_ud.conllup' else: training_file = 'train_ner.conllu' dev_file = 'dev_ner.conllu' test_file = 'test_ner.conllu' return [ datasets.SplitGenerator( name=datasets.Split.TRAIN, gen_kwargs={ 'filepath': os.path.join(data_dir, training_file), 'split': 'train'} ), datasets.SplitGenerator( name=datasets.Split.VALIDATION, gen_kwargs={ 'filepath': os.path.join(data_dir, dev_file), 'split': 'dev'} ), datasets.SplitGenerator( name=datasets.Split.TEST, gen_kwargs={ 'filepath': os.path.join(data_dir, test_file), 'split': 'test'} ), ] def _generate_examples(self, filepath, split): if self.config.name == 'ud': with open(filepath, encoding='utf-8') as f: sent_id = '' text = '' tokens = [] lemmas = [] xpos_tags = [] upos_tags = [] feats = [] iob_tags = [] uds = [] data_id = 0 for line in f: if line and not line == '\n' and not line.startswith('# global.columns'): if line.startswith('#'): if line.startswith('# sent_id'): if tokens: yield data_id, { 'sent_id': sent_id, 'text': text, 'tokens': tokens, 'lemmas': lemmas, 'upos_tags': upos_tags, 'xpos_tags': xpos_tags, 'feats': feats, 'iob_tags': iob_tags, 'uds': uds } tokens = [] lemmas = [] upos_tags = [] xpos_tags = [] feats = [] iob_tags = [] uds = [] data_id += 1 sent_id = line.split(' = ')[1].strip() elif line.startswith('# text'): text = line.split(' = ')[1].strip() elif not line.startswith('_'): splits = line.split('\t') tokens.append(splits[1].strip()) lemmas.append(splits[2].strip()) upos_tags.append(splits[3].strip()) xpos_tags.append(splits[4].strip()) feats.append(splits[5].strip()) uds.append(splits[7].strip()) yield data_id, { 'sent_id': sent_id, 'text': text, 'tokens': tokens, 'lemmas': lemmas, 'upos_tags': upos_tags, 'xpos_tags': xpos_tags, 'feats': feats, 'iob_tags': iob_tags, 'uds': uds } else: with open(filepath, encoding='utf-8') as f: sent_id = '' text = '' tokens = [] lemmas = [] xpos_tags = [] upos_tags = [] feats = [] iob_tags = [] data_id = 0 for line in f: if line and not line == '\n': if line.startswith('#'): if line.startswith('# sent_id'): if tokens: yield data_id, { 'sent_id': sent_id, 'text': text, 'tokens': tokens, 'lemmas': lemmas, 'upos_tags': upos_tags, 'xpos_tags': xpos_tags, 'feats': feats, 'iob_tags': iob_tags } tokens = [] lemmas = [] upos_tags = [] xpos_tags = [] feats = [] iob_tags = [] data_id += 1 sent_id = line.split(' = ')[1].strip() elif line.startswith('# text'): text = line.split(' = ')[1].strip() elif not line.startswith('_'): splits = line.split('\t') tokens.append(splits[1].strip()) lemmas.append(splits[2].strip()) upos_tags.append(splits[3].strip()) xpos_tags.append(splits[4].strip()) feats.append(splits[5].strip()) iob_tags.append(splits[9].strip()) yield data_id, { 'sent_id': sent_id, 'text': text, 'tokens': tokens, 'lemmas': lemmas, 'upos_tags': upos_tags, 'xpos_tags': xpos_tags, 'feats': feats, 'iob_tags': iob_tags }