ssj500k / ssj500k.py
SuzanaB
Add description
e9b7ae5
# coding=utf-8
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import datasets
_CITATION = ''
_DESCRIPTION = """The dataset contains 7432 training samples, 1164 validation samples and 893 test samples.
Each sample represents a sentence and includes the following features: sentence ID ('sent_id'),
list of tokens ('tokens'), list of lemmas ('lemmas'),
list of Multext-East tags ('xpos_tags), list of UPOS tags ('upos_tags'), list of morphological features ('feats'),
list of IOB tags ('iob_tags'), and list of universal dependency tags ('uds'). Three dataset configurations are
available, where the corresponding features are encoded as class labels: 'ner', 'upos', and 'ud'.
"""
_HOMEPAGE = ''
_LICENSE = ''
_URLs = {
'ner': 'https://huggingface.co/datasets/classla/ssj500k/raw/main/data_ner.zip',
'upos': 'https://huggingface.co/datasets/classla/ssj500k/raw/main/data_ner.zip',
'ud': 'https://huggingface.co/datasets/classla/ssj500k/raw/main/data_ud.zip'
}
_DATA_DIRS = {
'ner': 'data_ner',
'upos': 'data_ner',
'ud': 'data_ud'
}
class Ssj500K(datasets.GeneratorBasedBuilder):
VERSION = datasets.Version('1.0.0')
BUILDER_CONFIGS = [
datasets.BuilderConfig(
name='upos',
version=VERSION,
description=''
),
datasets.BuilderConfig(
name='ner',
version=VERSION,
description=''
),
datasets.BuilderConfig(
name='ud',
version=VERSION,
description=''
)
]
DEFAULT_CONFIG_NAME = 'ner'
def _info(self):
if self.config.name == "upos":
features = datasets.Features(
{
'sent_id': datasets.Value('string'),
'text': datasets.Value('string'),
'tokens': datasets.Sequence(datasets.Value('string')),
'lemmas': datasets.Sequence(datasets.Value('string')),
'xpos_tags': datasets.Sequence(datasets.Value('string')),
'upos_tags': datasets.Sequence(
datasets.features.ClassLabel(
names=[
'SCONJ',
'ADP',
'ADV',
'NUM',
'ADJ',
'PRON',
'DET',
'X',
'PART',
'NOUN',
'CCONJ',
'PROPN',
'PUNCT',
'AUX',
'VERB',
'INTJ'
]
)
),
'feats': datasets.Sequence(datasets.Value('string')),
'iob_tags': datasets.Sequence(datasets.Value('string'))
}
)
elif self.config.name == "ner":
features = datasets.Features(
{
'sent_id': datasets.Value('string'),
'text': datasets.Value('string'),
'tokens': datasets.Sequence(datasets.Value('string')),
'lemmas': datasets.Sequence(datasets.Value('string')),
'xpos_tags': datasets.Sequence(datasets.Value('string')),
'upos_tags': datasets.Sequence(datasets.Value('string')),
'feats': datasets.Sequence(datasets.Value('string')),
'iob_tags': datasets.Sequence(
datasets.features.ClassLabel(
names=[
'I-per',
'O',
'I-org',
'B-loc',
'B-deriv-per',
'I-loc',
'I-deriv-per',
'B-org',
'B-per',
'B-misc',
'I-misc'
]
)
)
}
)
else:
features = datasets.Features(
{
'sent_id': datasets.Value('string'),
'text': datasets.Value('string'),
'tokens': datasets.Sequence(datasets.Value('string')),
'lemmas': datasets.Sequence(datasets.Value('string')),
'xpos_tags': datasets.Sequence(datasets.Value('string')),
'upos_tags': datasets.Sequence(datasets.Value('string')),
'feats': datasets.Sequence(datasets.Value('string')),
'iob_tags': datasets.Sequence(datasets.Value('string')),
'uds': datasets.Sequence(
datasets.features.ClassLabel(
names=[
'nsubj', 'root', 'csubj', 'flat', 'aux', 'fixed', 'ccomp', 'discourse', 'nmod', 'amod',
'obj', 'nummod', 'iobj', 'mark', 'advmod', 'xcomp', 'acl', 'obl', 'flat_foreign', 'det',
'cop', 'cc', 'advcl', 'expl', 'flat_name', 'appos', 'cc_preconj', 'parataxis', 'conj',
'punct', 'case', 'dep'
]
)
)
}
)
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=features,
supervised_keys=None,
homepage=_HOMEPAGE,
license=_LICENSE,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
data_dir = os.path.join(dl_manager.download_and_extract(_URLs[self.config.name]), _DATA_DIRS[self.config.name])
if self.config.name == 'ud':
training_file = 'train_ner_ud.conllup'
dev_file = 'dev_ner_ud.conllup'
test_file = 'test_ner_ud.conllup'
else:
training_file = 'train_ner.conllu'
dev_file = 'dev_ner.conllu'
test_file = 'test_ner.conllu'
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN, gen_kwargs={
'filepath': os.path.join(data_dir, training_file),
'split': 'train'}
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION, gen_kwargs={
'filepath': os.path.join(data_dir, dev_file),
'split': 'dev'}
),
datasets.SplitGenerator(
name=datasets.Split.TEST, gen_kwargs={
'filepath': os.path.join(data_dir, test_file),
'split': 'test'}
),
]
def _generate_examples(self, filepath, split):
if self.config.name == 'ud':
with open(filepath, encoding='utf-8') as f:
sent_id = ''
text = ''
tokens = []
lemmas = []
xpos_tags = []
upos_tags = []
feats = []
iob_tags = []
uds = []
data_id = 0
for line in f:
if line and not line == '\n' and not line.startswith('# global.columns'):
if line.startswith('#'):
if line.startswith('# sent_id'):
if tokens:
yield data_id, {
'sent_id': sent_id,
'text': text,
'tokens': tokens,
'lemmas': lemmas,
'upos_tags': upos_tags,
'xpos_tags': xpos_tags,
'feats': feats,
'iob_tags': iob_tags,
'uds': uds
}
tokens = []
lemmas = []
upos_tags = []
xpos_tags = []
feats = []
iob_tags = []
uds = []
data_id += 1
sent_id = line.split(' = ')[1].strip()
elif line.startswith('# text'):
text = line.split(' = ')[1].strip()
elif not line.startswith('_'):
splits = line.split('\t')
tokens.append(splits[1].strip())
lemmas.append(splits[2].strip())
upos_tags.append(splits[3].strip())
xpos_tags.append(splits[4].strip())
feats.append(splits[5].strip())
uds.append(splits[7].strip())
yield data_id, {
'sent_id': sent_id,
'text': text,
'tokens': tokens,
'lemmas': lemmas,
'upos_tags': upos_tags,
'xpos_tags': xpos_tags,
'feats': feats,
'iob_tags': iob_tags,
'uds': uds
}
else:
with open(filepath, encoding='utf-8') as f:
sent_id = ''
text = ''
tokens = []
lemmas = []
xpos_tags = []
upos_tags = []
feats = []
iob_tags = []
data_id = 0
for line in f:
if line and not line == '\n':
if line.startswith('#'):
if line.startswith('# sent_id'):
if tokens:
yield data_id, {
'sent_id': sent_id,
'text': text,
'tokens': tokens,
'lemmas': lemmas,
'upos_tags': upos_tags,
'xpos_tags': xpos_tags,
'feats': feats,
'iob_tags': iob_tags
}
tokens = []
lemmas = []
upos_tags = []
xpos_tags = []
feats = []
iob_tags = []
data_id += 1
sent_id = line.split(' = ')[1].strip()
elif line.startswith('# text'):
text = line.split(' = ')[1].strip()
elif not line.startswith('_'):
splits = line.split('\t')
tokens.append(splits[1].strip())
lemmas.append(splits[2].strip())
upos_tags.append(splits[3].strip())
xpos_tags.append(splits[4].strip())
feats.append(splits[5].strip())
iob_tags.append(splits[9].strip())
yield data_id, {
'sent_id': sent_id,
'text': text,
'tokens': tokens,
'lemmas': lemmas,
'upos_tags': upos_tags,
'xpos_tags': xpos_tags,
'feats': feats,
'iob_tags': iob_tags
}