ik-nlp-22_pestyle / ik-nlp-22_pestyle.py
gsarti's picture
Fix edit_time
eb31d5a
import os
import datasets
import pandas as pd
_CITATION = """No citation information available."""
_DESCRIPTION = """\
This dataset contains a sample of sentences taken from the FLORES-101 dataset that were either translated
from scratch or post-edited from an existing automatic translation by three human translators.
Translation were performed for the English-Italian language pair, and translators' behavioral data
(keystrokes, pauses, editing times) were collected using the PET platform.
"""
_HOMEPAGE = "https://www.rug.nl/masters/information-science/?lang=en"
_LICENSE = "Sharing and publishing of the data is not allowed at the moment."
_PATHS = {
"full": os.path.join("IK_NLP_22_PESTYLE", "train.tsv"),
"mask_subject": os.path.join("IK_NLP_22_PESTYLE", "test.tsv"),
"mask_modality": os.path.join("IK_NLP_22_PESTYLE", "test.tsv"),
"mask_time": os.path.join("IK_NLP_22_PESTYLE", "test.tsv")
}
_ALL_FIELDS = [
"item_id", "subject_id", "modality",
"src_text", "mt_text", "tgt_text",
"edit_time", "k_total", "k_letter", "k_digit", "k_white", "k_symbol", "k_nav", "k_erase",
"k_copy", "k_cut", "k_paste", "n_pause_geq_300", "len_pause_geq_300",
"n_pause_geq_1000", "len_pause_geq_1000", "num_annotations",
"n_insert", "n_delete", "n_substitute", "n_shift", "bleu", "chrf", "ter", "aligned_edit"
]
_FIELDS_MASK_SUBJECT = [f for f in _ALL_FIELDS if f not in ["subject_id"]]
_FIELDS_MASK_MODALITY = [f for f in _ALL_FIELDS if f not in [
"modality", "mt_text", "n_insert", "n_delete", "n_substitute",
"n_shift", "ter", "bleu", "chrf", "aligned_edit"
]]
_FIELDS_MASK_TIME = [f for f in _ALL_FIELDS if f not in [
"edit_time", "n_pause_geq_300", "len_pause_geq_300",
"n_pause_geq_1000", "len_pause_geq_1000"
]]
_DICT_FIELDS = {
"full": _ALL_FIELDS,
"mask_subject": _FIELDS_MASK_SUBJECT,
"mask_modality": _FIELDS_MASK_MODALITY,
"mask_time": _FIELDS_MASK_TIME
}
class IkNlp22PEStyleConfig(datasets.BuilderConfig):
"""BuilderConfig for the IK NLP '22 Post-editing Stylometry Dataset."""
def __init__(
self,
features,
**kwargs,
):
"""
Args:
features: `list[string]`, list of the features that will appear in the
feature dict. Should not include "label".
**kwargs: keyword arguments forwarded to super.
"""
super().__init__(version=datasets.Version("1.0.0"), **kwargs)
self.features = features
class IkNlp22PEStyle(datasets.GeneratorBasedBuilder):
VERSION = datasets.Version("1.0.0")
BUILDER_CONFIGS = [
IkNlp22PEStyleConfig(
name=name,
features=fields,
)
for name, fields in _DICT_FIELDS.items()
]
DEFAULT_CONFIG_NAME = "full"
@property
def manual_download_instructions(self):
return (
"The access to the data is restricted to students of the IK MSc NLP 2022 course working on a related project."
"To load the data using this dataset, download and extract the IK_NLP_22_PESTYLE folder you were provided upon selecting the final project."
"After extracting it, the folder (referred to as root) must contain a IK_NLP_22_PESTYLE subfolder, containing train.tsv and test.tsv files."
f"Then, load the dataset with: `datasets.load_dataset('GroNLP/ik-nlp-22_pestyle', '{self.config.name}', data_dir='path/to/root/folder')`"
)
def _info(self):
features = {feature: datasets.Value("int32") for feature in self.config.features}
for field in ["subject_id", "modality", "src_text", "mt_text", "tgt_text", "aligned_edit"]:
if field in self.config.features:
features[field] = datasets.Value("string")
for field in ["edit_time", "bleu", "chrf", "ter", "n_insert", "n_delete", "n_substitute", "n_shift"]:
if field in self.config.features:
features[field] = datasets.Value("float32")
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(features),
homepage=_HOMEPAGE,
license=_LICENSE,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
data_dir = os.path.abspath(os.path.expanduser(dl_manager.manual_dir))
if not os.path.exists(data_dir):
raise FileNotFoundError(
"{} does not exist. Make sure you insert the unzipped IK_NLP_22_PESTYLE dir via "
"`datasets.load_dataset('GroNLP/ik-nlp-22_pestyle', data_dir=...)`"
"Manual download instructions: {}".format(
data_dir, self.manual_download_instructions
)
)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN if self.config.name == "full" else datasets.Split.TEST,
gen_kwargs={
"filepath": os.path.join(data_dir, _PATHS[self.config.name]),
"features": self.config.features,
},
)
]
def _generate_examples(self, filepath: str, features):
"""Yields examples as (key, example) tuples."""
data = pd.read_csv(filepath, sep="\t")
data = data[features]
for id_, row in data.iterrows():
yield id_, row.to_dict()