File size: 5,422 Bytes
a013352
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ca07c0a
 
 
 
 
a013352
 
109b7ff
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ca07c0a
 
 
 
109b7ff
a013352
109b7ff
ca07c0a
a013352
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
109b7ff
a013352
 
 
109b7ff
ca07c0a
 
 
 
a013352
 
ca07c0a
a013352
 
 
 
 
 
109b7ff
ca07c0a
 
a013352
 
 
ca07c0a
 
 
 
eb31d5a
ca07c0a
 
a013352
 
726d3e2
a013352
 
 
 
 
 
 
 
 
 
109b7ff
 
a013352
 
 
 
 
 
ca07c0a
a013352
ca07c0a
 
a013352
109b7ff
a013352
 
c1a6c46
a013352
0d0cf10
ca07c0a
a013352
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
import os

import datasets
import pandas as pd

_CITATION = """No citation information available."""

_DESCRIPTION = """\
This dataset contains a sample of sentences taken from the FLORES-101 dataset that were either translated 
from scratch or post-edited from an existing automatic translation by three human translators. 
Translation were performed for the English-Italian language pair, and translators' behavioral data 
(keystrokes, pauses, editing times) were collected using the PET platform.
"""

_HOMEPAGE = "https://www.rug.nl/masters/information-science/?lang=en"

_LICENSE = "Sharing and publishing of the data is not allowed at the moment."

_PATHS = {
    "full": os.path.join("IK_NLP_22_PESTYLE", "train.tsv"),
    "mask_subject": os.path.join("IK_NLP_22_PESTYLE", "test.tsv"),
    "mask_modality": os.path.join("IK_NLP_22_PESTYLE", "test.tsv"),
    "mask_time": os.path.join("IK_NLP_22_PESTYLE", "test.tsv")
}

_ALL_FIELDS = [
    "item_id", "subject_id", "modality",
    "src_text", "mt_text", "tgt_text",
    "edit_time", "k_total", "k_letter", "k_digit", "k_white", "k_symbol", "k_nav", "k_erase", 
    "k_copy", "k_cut", "k_paste", "n_pause_geq_300", "len_pause_geq_300", 
    "n_pause_geq_1000", "len_pause_geq_1000", "num_annotations",
    "n_insert", "n_delete", "n_substitute", "n_shift", "bleu", "chrf", "ter", "aligned_edit"
]

_FIELDS_MASK_SUBJECT = [f for f in _ALL_FIELDS if f not in ["subject_id"]]
_FIELDS_MASK_MODALITY = [f for f in _ALL_FIELDS if f not in [
    "modality", "mt_text", "n_insert", "n_delete", "n_substitute", 
    "n_shift", "ter", "bleu", "chrf", "aligned_edit"
]]
_FIELDS_MASK_TIME = [f for f in _ALL_FIELDS if f not in [
    "edit_time", "n_pause_geq_300", "len_pause_geq_300", 
    "n_pause_geq_1000", "len_pause_geq_1000"
]]

_DICT_FIELDS = {
    "full": _ALL_FIELDS,
    "mask_subject": _FIELDS_MASK_SUBJECT,
    "mask_modality": _FIELDS_MASK_MODALITY,
    "mask_time": _FIELDS_MASK_TIME
}

class IkNlp22PEStyleConfig(datasets.BuilderConfig):
    """BuilderConfig for the IK NLP '22 Post-editing Stylometry Dataset."""

    def __init__(
        self,
        features,
        **kwargs,
    ):
        """
        Args:
        features: `list[string]`, list of the features that will appear in the
            feature dict. Should not include "label".
        **kwargs: keyword arguments forwarded to super.
        """
        super().__init__(version=datasets.Version("1.0.0"), **kwargs)
        self.features = features


class IkNlp22PEStyle(datasets.GeneratorBasedBuilder):
    VERSION = datasets.Version("1.0.0")

    BUILDER_CONFIGS = [
        IkNlp22PEStyleConfig(
            name=name,
            features=fields,
        )
        for name, fields in _DICT_FIELDS.items()
    ]

    DEFAULT_CONFIG_NAME = "full"

    @property

    def manual_download_instructions(self):
        return (
            "The access to the data is restricted to students of the IK MSc NLP 2022 course working on a related project."
            "To load the data using this dataset, download and extract the IK_NLP_22_PESTYLE folder you were provided upon selecting the final project."
            "After extracting it, the folder (referred to as root) must contain a IK_NLP_22_PESTYLE subfolder, containing train.tsv and test.tsv files."
            f"Then, load the dataset with: `datasets.load_dataset('GroNLP/ik-nlp-22_pestyle', '{self.config.name}', data_dir='path/to/root/folder')`"
        )

    def _info(self):
        features = {feature: datasets.Value("int32") for feature in self.config.features}
        for field in ["subject_id", "modality", "src_text", "mt_text", "tgt_text", "aligned_edit"]:
            if field in self.config.features:
                features[field] = datasets.Value("string")
        for field in ["edit_time", "bleu", "chrf", "ter", "n_insert", "n_delete", "n_substitute", "n_shift"]:
            if field in self.config.features:
                features[field] = datasets.Value("float32")
        return datasets.DatasetInfo(
            description=_DESCRIPTION,
            features=datasets.Features(features),
            homepage=_HOMEPAGE,
            license=_LICENSE,
            citation=_CITATION,
        )

    def _split_generators(self, dl_manager):
        """Returns SplitGenerators."""
        data_dir = os.path.abspath(os.path.expanduser(dl_manager.manual_dir))
        if not os.path.exists(data_dir):
            raise FileNotFoundError(
                "{} does not exist. Make sure you insert the unzipped IK_NLP_22_PESTYLE dir via "
                "`datasets.load_dataset('GroNLP/ik-nlp-22_pestyle', data_dir=...)`"
                "Manual download instructions: {}".format(
                    data_dir, self.manual_download_instructions
                )
            )
        return [
            datasets.SplitGenerator(
                name=datasets.Split.TRAIN if self.config.name == "full" else datasets.Split.TEST,
                gen_kwargs={
                    "filepath": os.path.join(data_dir, _PATHS[self.config.name]),
                    "features": self.config.features,
                },
            )
        ]
    
    def _generate_examples(self, filepath: str, features):
        """Yields examples as (key, example) tuples."""
        data = pd.read_csv(filepath, sep="\t")
        data = data[features]
        for id_, row in data.iterrows():
            yield id_, row.to_dict()