Datasets:

Multilinguality:
translation
Size Categories:
1K<n<10K
Language Creators:
found
Source Datasets:
original
ArXiv:
Tags:
License:
File size: 6,429 Bytes
d6dd84c
afea2c9
553d369
d6dd84c
 
2788d50
d6dd84c
4209476
6a14e5f
 
 
 
 
 
 
 
 
 
4209476
 
d6dd84c
 
 
 
 
 
 
 
 
7e2f593
d6dd84c
 
d974eb1
 
d6dd84c
 
be5e0d0
 
d6dd84c
 
 
 
 
 
be5e0d0
d6dd84c
 
d2126e1
c411610
d6dd84c
 
df05f50
 
 
 
 
 
 
 
 
 
c411610
d2126e1
 
 
 
d6dd84c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
e82061a
d6dd84c
 
 
 
 
 
 
 
 
 
 
 
 
 
d2126e1
d6dd84c
d2126e1
 
 
 
 
 
 
 
6a14e5f
 
 
 
 
 
 
 
 
 
d2126e1
d6dd84c
 
 
 
 
 
 
 
 
 
d974eb1
d6dd84c
 
 
 
72bc338
d6dd84c
 
 
 
 
1c527c8
d6dd84c
d974eb1
1c527c8
d6dd84c
d974eb1
 
afea2c9
 
 
 
d974eb1
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
import os
import ast
import math
import datasets
import pandas as pd
from pprint import pprint

_CITATION = """
@inproceedings{sarti-etal-2022-divemt,
    title = "{D}iv{EMT}: Neural Machine Translation Post-Editing Effort Across Typologically Diverse Languages",
    author = "Sarti, Gabriele and Bisazza, Arianna and Guerberof Arenas, Ana and Toral, Antonio",
    booktitle = "Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing",
    month = dec,
    year = "2022",
    address = "Abu Dhabi, United Arab Emirates",
    publisher = "Association for Computational Linguistics",
    url = "https://aclanthology.org/2022.emnlp-main.532",
    pages = "7795--7816",
}
"""

_DESCRIPTION = """\
DivEMT is the first publicly available post-editing study of Neural Machine Translation (NMT) over a typologically diverse set of target languages. Using a strictly controlled setup, 18 professional translators were instructed to translate or post-edit the same set of English documents into Arabic, Dutch, Italian, Turkish, Ukrainian, and Vietnamese. During the process, their edits, keystrokes, editing times, pauses, and perceived effort were logged, enabling an in-depth, cross-lingual evaluation of NMT quality and its post-editing process.
"""

_HOMEPAGE = "https://github.com/gsarti/divemt"

_LICENSE = "GNU General Public License v3.0"

_ROOT_PATH = "https://huggingface.co/datasets/GroNLP/divemt/resolve/main"

_PATHS = {
    "main": os.path.join(_ROOT_PATH, "main.tsv"),
    "warmup": os.path.join(_ROOT_PATH, "warmup.tsv"),
}

_ALL_FIELDS = ['unit_id', 'flores_id', 'item_id', 'subject_id', 'lang_id', 'doc_id', 
     'task_type', 'translation_type', 'src_len_chr', 'mt_len_chr', 'tgt_len_chr',
     'src_len_wrd', 'mt_len_wrd', 'tgt_len_wrd', 'edit_time', 'k_total',
     'k_letter', 'k_digit', 'k_white', 'k_symbol', 'k_nav', 'k_erase',
     'k_copy', 'k_cut', 'k_paste', 'k_do', 'n_pause_geq_300',
     'len_pause_geq_300', 'n_pause_geq_1000', 'len_pause_geq_1000',
     'event_time', 'num_annotations', 'last_modification_time', 'n_insert',
     'n_delete', 'n_substitute', 'n_shift', 'tot_shifted_words', 'tot_edits',
     'hter', 'cer', 'bleu', 'chrf', 'time_s', 'time_m',
     'time_h', 'time_per_char', 'time_per_word', 'key_per_char',
     'words_per_hour', 'words_per_minute', 'per_subject_visit_order',
     'src_text', 'mt_text', 'tgt_text', 'aligned_edit', 'src_tokens', 'src_annotations',
     'mt_tokens', 'mt_annotations', 'tgt_tokens', 'tgt_annotations', 'src_wmt22_qe', 'mt_wmt22_qe'
]

_FLOAT_FIELDS = ["edit_time", "bleu", "chrf", "hter", "n_insert", "n_delete", "n_substitute",
    "n_shift", "time_s", "time_m", "time_h", 'time_per_char', 'time_per_word', 'key_per_char',
    'words_per_hour', 'words_per_minute', 'tot_shifted_words', 'tot_edits', "mt_len_chr",
    "mt_len_wrd", "cer"
]

_STR_FIELDS = ["unit_id", "item_id", "subject_id", "lang_id", "task_type", "translation_type",
    "src_text", "mt_text", "tgt_text", "aligned_edit"
]

_STR_SEQ_FIELDS = ['src_tokens', 'mt_tokens', 'tgt_tokens', 'src_wmt22_qe', 'mt_wmt22_qe']

_LANG_ANNOTATIONS_FIELDS = ['src_annotations', 'mt_annotations', 'tgt_annotations']


class DivEMTConfig(datasets.BuilderConfig):
    """BuilderConfig for the DivEMT Dataset."""

    def __init__(
        self,
        features,
        **kwargs,
    ):
        """
        Args:
        features: `list[string]`, list of the features that will appear in the
            feature dict. Should not include "label".
        **kwargs: keyword arguments forwarded to super.
        """
        super().__init__(version=datasets.Version("1.0.0"), **kwargs)
        self.features = features


class DivEMT(datasets.GeneratorBasedBuilder):
    VERSION = datasets.Version("1.0.0")

    BUILDER_CONFIGS = [
        DivEMTConfig(
            name=name,
            features=_ALL_FIELDS,
        )
        for name in ["warmup", "main"]
    ]

    DEFAULT_CONFIG_NAME = "main"

    def _info(self):
        features = {feature: datasets.Value("int32") for feature in self.config.features}
        for field in _ALL_FIELDS:
            if field in self.config.features:
                if field in _STR_FIELDS:
                    features[field] = datasets.Value("string")
                if field in _FLOAT_FIELDS:
                    features[field] = datasets.Value("float32")
                if field in _STR_SEQ_FIELDS:
                    features[field] = datasets.Sequence(datasets.Value("string"))
                if field in _LANG_ANNOTATIONS_FIELDS:
                    features[field] = datasets.features.Sequence(
                        {
                            "lemma": datasets.Value("string"),
                            "upos": datasets.Value("string"),
                            "feats": datasets.Value("string"),
                            "head": datasets.Value("string"),
                            "deprel": datasets.Value("string"),
                            "start_char": datasets.Value("int32"),
                            "end_char": datasets.Value("int32"),
                            "ner": datasets.Value("string"),
                        }
                    )
        return datasets.DatasetInfo(
            description=_DESCRIPTION,
            features=datasets.Features(features),
            homepage=_HOMEPAGE,
            license=_LICENSE,
            citation=_CITATION,
        )

    def _split_generators(self, dl_manager):
        """Returns SplitGenerators."""
        dl_dir = dl_manager.download_and_extract(_PATHS[self.config.name])
        return [
            datasets.SplitGenerator(
                name=datasets.Split.TRAIN,
                gen_kwargs={
                    "filepath": dl_dir,
                    "features": self.config.features,
                },
            )
        ]
    
    def _generate_examples(self, filepath: str, features):
        """Yields examples as (key, example) tuples."""
        data = pd.read_csv(filepath, sep="\t")
        data = data[features]
        for id_, row in data.iterrows():
            row_dic = row.to_dict()
            for field in _STR_SEQ_FIELDS + _LANG_ANNOTATIONS_FIELDS:
                if isinstance(row_dic[field], float) and math.isnan(row_dic[field]):
                    row_dic[field] = []
                else:
                    row_dic[field] = ast.literal_eval(row_dic[field])
            yield id_, row_dic