nlprepl / nlprepl.py
martynawck's picture
Update nlprepl.py
62e6c42
import conllu
import datasets
logger = datasets.logging.get_logger(__name__)
_CITATION = ""
BY_NAME = "by_name"
BY_TYPE = "by_type"
TAGSET_NKJP = "nkjp"
TAGSET_UD = "ud"
EXTENSION_CONLL = "conll"
EXTENSION_CONLLU = "conllu"
EXTENSION_CONLL_SPACE_AFTER = "conll_space_after"
_EXTENSIONS = [EXTENSION_CONLL, EXTENSION_CONLLU, EXTENSION_CONLL_SPACE_AFTER]
_DESCRIPTION = {
BY_NAME: {
TAGSET_NKJP: "NLPrePL divided by document name for NKJP tagset",
TAGSET_UD: "NLPrePL divided by document name for UD tagset"
},
BY_TYPE: {
TAGSET_NKJP: "NLPrePL divided by document type for NKJP tagset",
TAGSET_UD: "NLPrePL divided by document type for UD tagset"
}
}
_TYPES = [BY_NAME, BY_TYPE]
_TAGSETS = [TAGSET_NKJP, TAGSET_UD]
_URLS = {
BY_NAME: {
EXTENSION_CONLLU: {
TAGSET_NKJP: {
'train': "nkjp_tagset/fair_by_document_name/_conllu/train_nlprepl-nkjp.conllu.gz",
'dev': "nkjp_tagset/fair_by_document_name/_conllu/dev_nlprepl-nkjp.conllu.gz",
'test': "nkjp_tagset/fair_by_document_name/_conllu/test_nlprepl-nkjp.conllu.gz"
},
TAGSET_UD: {
'train': "ud_tagset/fair_by_document_name/_conllu/train_nlprepl-ud.conllu.gz",
'dev': "ud_tagset/fair_by_document_name/_conllu/dev_nlprepl-ud.conllu.gz",
'test': "ud_tagset/fair_by_document_name/_conllu/test_nlprepl-ud.conllu.gz"
}
},
EXTENSION_CONLL: {
TAGSET_NKJP: {
'train': "nkjp_tagset/fair_by_document_name/_conll/train_nlprepl-nkjp.conll.gz",
'dev': "nkjp_tagset/fair_by_document_name/_conll/dev_nlprepl-nkjp.conll.gz",
'test': "nkjp_tagset/fair_by_document_name/_conll/test_nlprepl-nkjp.conll.gz"
}
},
EXTENSION_CONLL_SPACE_AFTER: {
TAGSET_NKJP: {
'train': "nkjp_tagset/fair_by_document_name/_conll_space_after/multiword_space_after_train_nlprepl-nkjp.conll.gz",
'dev': "nkjp_tagset/fair_by_document_name/_conll_space_after/multiword_space_after_dev_nlprepl-nkjp.conll.gz",
'test': "nkjp_tagset/fair_by_document_name/_conll_space_after/multiword_space_after_test_nlprepl-nkjp.conll.gz"
}
},
},
BY_TYPE: {
EXTENSION_CONLLU: {
TAGSET_NKJP: {
'train': "nkjp_tagset/fair_by_document_type/_conllu/train_nlprepl-nkjp.conllu.gz",
'dev': "nkjp_tagset/fair_by_document_type/_conllu/dev_nlprepl-nkjp.conllu.gz",
'test': "nkjp_tagset/fair_by_document_type/_conllu/test_nlprepl-nkjp.conllu.gz"
},
TAGSET_UD: {
'train': "ud_tagset/fair_by_document_type/_conllu/train_nlprepl-ud.conllu.gz",
'dev': "ud_tagset/fair_by_document_type/_conllu/dev_nlprepl-ud.conllu.gz",
'test': "ud_tagset/fair_by_document_type/_conllu/test_nlprepl-ud.conllu.gz"
}
},
EXTENSION_CONLL: {
TAGSET_NKJP: {
'train': "nkjp_tagset/fair_by_document_type/_conll/train_nlprepl-nkjp.conll.gz",
'dev': "nkjp_tagset/fair_by_document_type/_conll/dev_nlprepl-nkjp.conll.gz",
'test': "nkjp_tagset/fair_by_document_type/_conll/test_nlprepl-nkjp.conll.gz"
}
},
EXTENSION_CONLL_SPACE_AFTER: {
TAGSET_NKJP: {
'train': "nkjp_tagset/fair_by_document_type/_conllu_space_after/multiword_space_after_train_nlprepl-nkjp.conll.gz",
'dev': "nkjp_tagset/fair_by_document_type/_conllu_space_after/multiword_space_after_dev_nlprepl-nkjp.conll.gz",
'test': "nkjp_tagset/fair_by_document_type/_conllu_space_after/multiword_space_after_test_nlprepl-nkjp.conll.gz"
}
},
}
}
class NLPrePLConfig(datasets.BuilderConfig):
"""BuilderConfig for NKJP1M"""
def __init__(self, tagset: str, extension: str, **kwargs):
"""BuilderConfig forNKJP1M.
Args:
**kwargs: keyword arguments forwarded to super.
"""
super(NLPrePLConfig, self).__init__(**kwargs)
self.tagset = tagset
self.extension = extension
class NLPrePL(datasets.GeneratorBasedBuilder):
"""NLPrePL dataset generator."""
BUILDER_CONFIGS = [
NLPrePLConfig(
name=t + "-" + tagset + "-" + extension,
version=datasets.Version("1.0.0"),
tagset=tagset,
extension=extension,
description=_DESCRIPTION[t]
)
for t in _URLS.keys() for extension in _URLS[t].keys() for tagset in _URLS[t][extension].keys()
]
def _info(self):
"""Informative function about dataset features"""
dataset, tagset, extension = self.config.name.split("-")
return datasets.DatasetInfo(
description=_DESCRIPTION[dataset][tagset],
features=datasets.Features(
{
"sent_id": datasets.Value("string"),
"text": datasets.Value("string"),
"orig_file_sentence": datasets.Value("string"),
"id": datasets.Value("string"),
"tokens": datasets.Sequence(datasets.Value("string")),
"lemmas": datasets.Sequence(datasets.Value("string")),
"upos": datasets.Sequence(datasets.Value("string")),
"xpos": datasets.Sequence(datasets.Value("string")),
"feats": datasets.Sequence(datasets.Value("string")),
"head": datasets.Sequence(datasets.Value("string")),
"deprel": datasets.Sequence(datasets.Value("string")),
"deps": datasets.Sequence(datasets.Value("string")),
"misc": datasets.Sequence(datasets.Value("string")),
}
),
supervised_keys=None,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators for train, dev, and test splits."""
dataset, tagset, extension = self.config.name.split("-")
urls = _URLS[dataset][extension][tagset]
downloaded_files = dl_manager.download_and_extract(urls)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={"filepath": downloaded_files["train"]}),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={"filepath": downloaded_files["dev"]}),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={"filepath": downloaded_files["test"]}),
]
def _generate_examples(self, filepath: str):
"""Function to generate example datapoints for the dataset."""
def generate_misc_column(misc_content: dict):
"""Helper function that creates proper formatting for MISC column from conllu file."""
if misc_content is None:
return ""
else:
return "|".join([k + "=" + v for k, v in misc_content.items()])
id = 0
logger.info("⏳ Generating examples from = %s", filepath)
print("Cached PATHS -- copy into STEP 5:", filepath)
with open(filepath, 'r', encoding="utf-8") as f:
tokenlist = list(conllu.parse_incr(f))
for sent in tokenlist:
if "sent_id" in sent.metadata:
idx = sent.metadata["sent_id"]
else:
idx = id
tokens = [token["form"] for token in sent]
if "text" in sent.metadata:
txt = sent.metadata["text"]
else:
txt = " ".join(tokens)
yield id, {
"sent_id": str(idx),
"text": txt,
"orig_file_sentence": sent.metadata["orig_file_sentence"],
"id": [token["id"] for token in sent],
"tokens": [token["form"] for token in sent],
"lemmas": [token["lemma"] for token in sent],
"upos": [token["upos"] for token in sent],
"xpos": [token["xpos"] for token in sent],
"feats": [str(token["feats"]) for token in sent],
"head": [str(token["head"]) for token in sent],
"deprel": [str(token["deprel"]) for token in sent],
"deps": [str(token["deps"]) for token in sent],
"misc": [generate_misc_column(token["misc"]) for token in sent],
}
id += 1