akces_gec / akces_gec.py
Matej Klemen
Add first version of the dataset script
75a8519
import os
from copy import deepcopy
import datasets
_CITATION = """\
@article{naplava2019wnut,
title={Grammatical Error Correction in Low-Resource Scenarios},
author={N{\'a}plava, Jakub and Straka, Milan},
journal={arXiv preprint arXiv:1910.00353},
year={2019}
}
"""
_DESCRIPTION = """\
AKCES-GEC is a grammar error correction corpus for Czech generated from a subset of AKCES resources.
"""
_HOMEPAGE = "https://lindat.mff.cuni.cz/repository/xmlui/handle/11234/1-3057"
_LICENSE = "Creative Commons - Attribution-NonCommercial-ShareAlike 4.0 International (CC BY-NC-SA 4.0)"
_URLS = {
"akces_gec": "https://lindat.mff.cuni.cz/repository/xmlui/bitstream/handle/11234/1-3057/AKCES-GEC.zip"
}
class AkcesGEC(datasets.GeneratorBasedBuilder):
"""AKCES-GEC dataset for grammatical error correction. """
VERSION = datasets.Version("1.0.0")
BUILDER_CONFIGS = [
datasets.BuilderConfig(name="ann0", version=VERSION, description="Use annotations from annotator#0"),
datasets.BuilderConfig(name="ann1", version=VERSION, description="Use annotations from annotator#1")
]
DEFAULT_CONFIG_NAME = "ann0"
def _info(self):
features = datasets.Features(
{
"src_tokens": datasets.Sequence(datasets.Value("string")),
"tgt_tokens": datasets.Sequence(datasets.Value("string")),
"corrections": [{
"idx_src": datasets.Sequence(datasets.Value("int32")),
"idx_tgt": datasets.Sequence(datasets.Value("int32")),
"corr_types": datasets.Sequence(datasets.Value("string"))
}]
}
)
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=features,
homepage=_HOMEPAGE,
license=_LICENSE,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
urls = _URLS["akces_gec"]
data_dir = dl_manager.download_and_extract(urls)
consider_annotator = 0 if self.config.name == "ann0" else 1
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={"file_path": os.path.join(data_dir, "train", "train.all.m2"), "annotator": consider_annotator},
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={"file_path": os.path.join(data_dir, "dev", "dev.all.m2"), "annotator": consider_annotator},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={"file_path": os.path.join(data_dir, "test", "test.all.m2"), "annotator": consider_annotator},
),
]
# method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
def _generate_examples(self, file_path, annotator=0):
skip_edits = {"noop", "UNK", "Um"}
with open(file_path, "r", encoding="utf-8") as f:
idx_ex = 0
src_sent, tgt_sent, corrections, offset = None, None, [], 0
for idx_line, _line in enumerate(f):
line = _line.strip()
if len(line) > 0:
prefix, remainder = line[0], line[2:]
if prefix == "S":
src_sent = remainder.split(" ")
tgt_sent = deepcopy(src_sent)
elif prefix == "A":
annotation_data = remainder.split("|||")
idx_start, idx_end = map(int, annotation_data[0].split(" "))
edit_types, edit_text = annotation_data[1], annotation_data[2]
edit_types = edit_types.split(",")
if len(set(edit_types) & skip_edits) > 0:
continue
formatted_correction = {
"idx_src": list(range(idx_start, idx_end)),
"idx_tgt": [],
"corr_types": edit_types
}
annotator_id = int(annotation_data[-1])
if annotator_id != annotator:
continue
removal = len(edit_text) == 0 or edit_text == "-NONE-"
if removal:
for idx_to_remove in range(idx_start, idx_end):
del tgt_sent[offset + idx_to_remove]
offset -= 1
else: # replacement/insertion
edit_tokens = edit_text.split(" ")
len_diff = len(edit_tokens) - (idx_end - idx_start)
formatted_correction["idx_tgt"] = list(
range(offset + idx_start, offset + idx_end + len_diff))
tgt_sent[offset + idx_start: offset + idx_end] = edit_tokens
offset += len_diff
corrections.append(formatted_correction)
else: # empty line, indicating end of example
if src_sent is None and tgt_sent is None: # multiple empty lines
continue
yield idx_ex, {
"src_tokens": src_sent,
"tgt_tokens": tgt_sent,
"corrections": corrections
}
src_sent, tgt_sent, corrections, offset = None, None, [], 0
idx_ex += 1