File size: 4,632 Bytes
d6dd84c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
26f3452
d6dd84c
 
 
 
 
 
 
 
 
 
 
 
 
 
df05f50
d6dd84c
 
 
 
 
df05f50
 
 
 
 
 
 
 
 
 
d6dd84c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
e82061a
d6dd84c
 
 
 
 
 
 
 
 
 
 
 
 
 
df05f50
d6dd84c
 
df05f50
d6dd84c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
import os

import datasets
import pandas as pd

_CITATION = """@article{sarti-etal-2022-divemt,
    title={{DivEMT}: Neural Machine Translation Post-Editing Effort Across Typologically Diverse Languages},
    author={Sarti, Gabriele and Bisazza, Arianna and Guerberof Arenas, Ana and Toral, Antonio},
    journal={TBD},
    url={TBD},
    year={2022},
    month={may}
}"""

_DESCRIPTION = """\
DivEMT is the first publicly available post-editing study of Neural Machine Translation (NMT) over a typologically diverse set of target languages. Using a strictly controlled setup, 18 professional translators were instructed to translate or post-edit the same set of English documents into Arabic, Dutch, Italian, Turkish, Ukrainian, and Vietnamese. During the process, their edits, keystrokes, editing times, pauses, and perceived effort were logged, enabling an in-depth, cross-lingual evaluation of NMT quality and its post-editing process.
"""

_HOMEPAGE = "https://github.com/gsarti/divemt"

_LICENSE = "GNU General Public License v3.0"

_ROOT_PATH = "https://raw.githubusercontent.com/gsarti/divemt/main/data/"

_PATHS = {
    "main": os.path.join(_ROOT_PATH, "main.tsv"),
    "warmup": os.path.join(_ROOT_PATH, "warmup.tsv"),
}

_ALL_FIELDS = ['unit_id', 'flores_id', 'item_id', 'subject_id', 'task_type',
     'translation_type', 'src_len_chr', 'mt_len_chr', 'tgt_len_chr',
     'src_len_wrd', 'mt_len_wrd', 'tgt_len_wrd', 'edit_time', 'k_total',
     'k_letter', 'k_digit', 'k_white', 'k_symbol', 'k_nav', 'k_erase',
     'k_copy', 'k_cut', 'k_paste', 'k_do', 'n_pause_geq_300',
     'len_pause_geq_300', 'n_pause_geq_1000', 'len_pause_geq_1000',
     'event_time', 'num_annotations', 'last_modification_time', 'n_insert',
     'n_delete', 'n_substitute', 'n_shift', 'tot_shifted_words', 'tot_edits',
     'hter', 'cer', 'bleu', 'chrf', 'lang_id', 'doc_id', 'time_s', 'time_m',
     'time_h', 'time_per_char', 'time_per_word', 'key_per_char',
     'words_per_hour', 'words_per_minute', 'per_subject_visit_order',
     'src_text', 'mt_text', 'tgt_text', 'aligned_edit'
]

_FLOAT_FIELDS = ["edit_time", "bleu", "chrf", "hter", "n_insert", "n_delete", "n_substitute",
    "n_shift", "time_s", "time_m", "time_h", 'time_per_char', 'time_per_word', 'key_per_char',
    'words_per_hour', 'words_per_minute', 'tot_shifted_words', 'tot_edits', "mt_len_chr",
    "mt_len_wrd", "cer"
]

_STR_FIELDS = ["unit_id", "item_id", "subject_id", "lang_id", "task_type", "translation_type",
    "src_text", "mt_text", "tgt_text", "aligned_edit"
]

class DivEMTConfig(datasets.BuilderConfig):
    """BuilderConfig for the DivEMT Dataset."""

    def __init__(
        self,
        features,
        **kwargs,
    ):
        """
        Args:
        features: `list[string]`, list of the features that will appear in the
            feature dict. Should not include "label".
        **kwargs: keyword arguments forwarded to super.
        """
        super().__init__(version=datasets.Version("1.0.0"), **kwargs)
        self.features = features


class DivEMT(datasets.GeneratorBasedBuilder):
    VERSION = datasets.Version("1.0.0")

    BUILDER_CONFIGS = [
        DivEMTConfig(
            name=name,
            features=_ALL_FIELDS,
        )
        for name in ["warmup", "main"]
    ]

    DEFAULT_CONFIG_NAME = "main"

    def _info(self):
        features = {feature: datasets.Value("int32") for feature in self.config.features}
        for field in _STR_FIELDS:
            if field in self.config.features:
                features[field] = datasets.Value("string")
        for field in _FLOAT_FIELDS:
            if field in self.config.features:
                features[field] = datasets.Value("float32")
        return datasets.DatasetInfo(
            description=_DESCRIPTION,
            features=datasets.Features(features),
            homepage=_HOMEPAGE,
            license=_LICENSE,
            citation=_CITATION,
        )

    def _split_generators(self, dl_manager):
        """Returns SplitGenerators."""
        dl_dir = dl_manager.download_and_extract(_PATHS[self.config.name])
        return [
            datasets.SplitGenerator(
                name=datasets.Split.TRAIN,
                gen_kwargs={
                    "filepath": dl_dir,
                    "features": self.config.features,
                },
            )
        ]
    
    def _generate_examples(self, filepath: str, features):
        """Yields examples as (key, example) tuples."""
        data = pd.read_csv(filepath, sep="\t")
        data = data[features]
        for id_, row in data.iterrows():
            yield id_, row.to_dict()