# coding=utf-8 # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor. # # Licensed under the Apache License, Version 2.0 (the 'License'); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an 'AS IS' BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import datasets _CITATION = '' _DESCRIPTION = """The dataset contains 6339 training samples, 815 validation samples and 785 test samples. Each sample represents a sentence and includes the following features: sentence ID ('sent_id'), list of tokens ('tokens'), list of lemmas ('lemmas'), list of UPOS tags ('upos_tags'), list of Multext-East tags ('xpos_tags), list of morphological features ('feats'), and list of IOB tags ('iob_tags'), which are encoded as class labels. """ _HOMEPAGE = '' _LICENSE = '' _URL = 'https://huggingface.co/datasets/classla/reldi_hr/raw/main/data.zip' _TRAINING_FILE = 'train_all.conllup' _DEV_FILE = 'dev_all.conllup' _TEST_FILE = 'test_all.conllup' _DATA_DIR = 'data' class ReldiHr(datasets.GeneratorBasedBuilder): VERSION = datasets.Version('1.0.1') BUILDER_CONFIGS = [ datasets.BuilderConfig( name='reldi_hr', version=VERSION, description='' ) ] def _info(self): features = datasets.Features( { 'sent_id': datasets.Value('string'), 'tokens': datasets.Sequence(datasets.Value('string')), 'norms': datasets.Sequence(datasets.Value('string')), 'lemmas': datasets.Sequence(datasets.Value('string')), 'upos_tags': datasets.Sequence(datasets.Value('string')), 'xpos_tags': datasets.Sequence(datasets.Value('string')), 'feats': datasets.Sequence(datasets.Value('string')), 'iob_tags': datasets.Sequence( datasets.features.ClassLabel( names=[ 'I-org', 'B-misc', 'B-per', 'B-deriv-per', 'B-org', 'B-loc', 'I-deriv-per', 'I-misc', 'I-loc', 'I-per', 'O' ] ) ) } ) return datasets.DatasetInfo( description=_DESCRIPTION, features=features, supervised_keys=None, homepage=_HOMEPAGE, license=_LICENSE, citation=_CITATION, ) def _split_generators(self, dl_manager): """Returns SplitGenerators.""" data_dir = os.path.join(dl_manager.download_and_extract(_URL), _DATA_DIR) return [ datasets.SplitGenerator( name=datasets.Split.TRAIN, gen_kwargs={ 'filepath': os.path.join(data_dir, _TRAINING_FILE), 'split': 'train'} ), datasets.SplitGenerator( name=datasets.Split.VALIDATION, gen_kwargs={ 'filepath': os.path.join(data_dir, _DEV_FILE), 'split': 'dev'} ), datasets.SplitGenerator( name=datasets.Split.TEST, gen_kwargs={ 'filepath': os.path.join(data_dir, _TEST_FILE), 'split': 'test'} ), ] def _generate_examples(self, filepath, split): with open(filepath, encoding='utf-8') as f: sent_id = '' tokens = [] norms = [] lemmas = [] upos_tags = [] xpos_tags = [] feats = [] iob_tags = [] data_id = 0 for line in f: if line and not line == '\n' and not line.startswith('# global.columns'): if line.startswith('# sent_id'): if tokens: yield data_id, { 'sent_id': sent_id, 'tokens': tokens, 'norms': norms, 'lemmas': lemmas, 'upos_tags': upos_tags, 'xpos_tags': xpos_tags, 'feats': feats, 'iob_tags': iob_tags } tokens = [] norms = [] lemmas = [] upos_tags = [] xpos_tags = [] feats = [] iob_tags = [] data_id += 1 sent_id = line.split(' = ')[1].strip() else: splits = line.split('\t') tokens.append(splits[1].strip()) norms.append(splits[2].strip()) lemmas.append(splits[3].strip()) upos_tags.append(splits[4].strip()) xpos_tags.append(splits[5].strip()) feats.append(splits[6].strip()) iob_tags.append(splits[7].strip()) yield data_id, { 'sent_id': sent_id, 'tokens': tokens, 'norms': norms, 'lemmas': lemmas, 'upos_tags': upos_tags, 'xpos_tags': xpos_tags, 'feats': feats, 'iob_tags': iob_tags }