gsarti commited on
Commit
cbd07da
1 Parent(s): e17750f

Added loading script

Browse files
Files changed (1) hide show
  1. wmt_vat.py +152 -0
wmt_vat.py ADDED
@@ -0,0 +1,152 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Variance-Aware Machine Translation Test Sets"""
16
+
17
+ import os
18
+ import json
19
+ import textwrap
20
+ from typing import List
21
+
22
+ import datasets
23
+ from datasets.utils.download_manager import DownloadManager
24
+
25
+
26
+ _CITATION = """\
27
+ @inproceedings{
28
+ zhan2021varianceaware,
29
+ title={Variance-Aware Machine Translation Test Sets},
30
+ author={Runzhe Zhan and Xuebo Liu and Derek F. Wong and Lidia S. Chao},
31
+ booktitle={Thirty-fifth Conference on Neural Information Processing Systems, Datasets and Benchmarks Track},
32
+ year={2021},
33
+ url={https://openreview.net/forum?id=hhKA5k0oVy5}
34
+ }
35
+ """
36
+
37
+ _DESCRIPTION = """\
38
+ The Variance-Aware Machine Translation corpus contains 70 small and discriminative test sets for machine translation (MT)
39
+ evaluation called variance-aware test sets (VAT), covering 35 translation directions from WMT16 to WMT20 competitions.
40
+ VAT is automatically created by a novel variance-aware filtering method that filters the indiscriminative test instances
41
+ of the current MT benchmark without any human labor. Experimental results show that VAT outperforms the original WMT benchmark
42
+ in terms of the correlation with human judgment across mainstream language pairs and test sets. Further analysis on the properties
43
+ of VAT reveals the challenging linguistic features (e.g., translation of low-frequency words and proper nouns) for the competitive
44
+ MT systems, providing guidance for constructing future MT test sets.
45
+ """
46
+
47
+ _HOMEPAGE = "https://github.com/NLP2CT/Variance-Aware-MT-Test-Sets"
48
+
49
+ _LICENSE = "https://raw.githubusercontent.com/NLP2CT/Variance-Aware-MT-Test-Sets/main/LICENSE"
50
+
51
+ _BASE_URL = "https://github.com/NLP2CT/Variance-Aware-MT-Test-Sets/VAT_data"
52
+ _META_URL = "https://github.com/NLP2CT/Variance-Aware-MT-Test-Sets/VAT_meta"
53
+
54
+ _CONFIGS = {
55
+ "wmt16": ["tr_en", "ru_en", "ro_en", "de_en", "en_ru", "fi_en", "cs_en"],
56
+ "wmt17": ["en_lv", "zh_en", "en_tr", "lv_en", "en_de", "ru_en", "en_fi", "tr_en", "en_zh", "en_ru", "fi_en", "en_cs", "de_en", "cs_en"],
57
+ "wmt18": ["en_cs", "cs_en", "en_fi", "en_tr", "en_et", "ru_en", "et_en", "tr_en", "fi_en", "zh_en", "en_zh", "en_ru", "de_en", "en_de"],
58
+ "wmt19": ["zh_en", "en_cs", "de_en", "en_gu", "fr_de", "en_zh", "fi_en", "en_fi", "kk_en", "de_cs", "lt_en", "en_lt", "ru_en", "en_kk", "en_ru", "gu_en", "de_fr", "en_de"],
59
+ "wmt20": ["km_en", "cs_en", "en_de", "ja_en", "ps_en", "en_zh", "en_ta", "de_en", "zh_en", "en_ja", "en_cs", "en_pl", "en_ru", "pl_en", "iu_en", "ru_en", "ta_en"],
60
+ }
61
+
62
+ _PATHS = {
63
+ f"{year}_{pair}": {
64
+ "src" : os.path.join(_BASE_URL, year, f"vat_newstest20{year[3:]}-{pair.replace('_', '-')}-src.{pair.split('_')[0]}"),
65
+ "ref" : os.path.join(_BASE_URL, year, f"vat_newstest20{year[3:]}-{pair.replace('_', '-')}-ref.{pair.split('_')[1]}")
66
+ } for year, pairs in _CONFIGS.items() for pair in pairs
67
+ }
68
+
69
+ _METADATA_PATHS = {k:os.path.join(_META_URL, k, "bert-r_filter-std60.json") for k in _CONFIGS.keys()}
70
+
71
+
72
+ class WmtVatConfig(datasets.BuilderConfig):
73
+ def __init__(
74
+ self,
75
+ campaign: str,
76
+ source: str,
77
+ reference: str,
78
+ **kwargs
79
+ ):
80
+ """BuilderConfig for Variance-Aware MT Test Sets.
81
+
82
+ Args:
83
+ campaign: `str`, WMT campaign from which the test set was extracted
84
+ source: `str`, source for translation.
85
+ reference: `str`, reference translation.
86
+ **kwargs: keyword arguments forwarded to super.
87
+ """
88
+ super().__init__(**kwargs)
89
+ self.campaign = campaign
90
+ self.source = source
91
+ self.reference = reference
92
+
93
+
94
+ class WmtVat(datasets.GeneratorBasedBuilder):
95
+ """Variance-Aware Machine Translation Test Sets"""
96
+ VERSION = datasets.Version("1.0.0")
97
+
98
+ BUILDER_CONFIGS = [
99
+ WmtVatConfig(
100
+ name=cfg,
101
+ campaign=cfg.split("_")[0],
102
+ source=cfg.split("_")[1],
103
+ reference=cfg.split("_")[2],
104
+ ) for cfg in _PATHS.keys()
105
+ ]
106
+
107
+ def _info(self):
108
+ features = datasets.Features(
109
+ {
110
+ "orig_id": datasets.Value("int32"),
111
+ "source": datasets.Value("string"),
112
+ "reference": datasets.Value("string")
113
+ }
114
+ )
115
+ return datasets.DatasetInfo(
116
+ description=_DESCRIPTION,
117
+ features=features,
118
+ homepage=_HOMEPAGE,
119
+ license=_LICENSE,
120
+ citation=_CITATION,
121
+ )
122
+
123
+ def _split_generators(self, dl_manager: DownloadManager):
124
+ """Returns SplitGenerators."""
125
+ src_file = dl_manager.download_and_extract(_PATHS[self.config.name]["src"])
126
+ ref_file = dl_manager.download_and_extract(_PATHS[self.config.name]["ref"])
127
+ return [
128
+ datasets.SplitGenerator(
129
+ name=datasets.Split.TEST,
130
+ gen_kwargs={
131
+ "src_path": src_file,
132
+ "ref_path": ref_file,
133
+ "pair": self.config.name[5:].replace("_", "-"),
134
+ "meta_path": _METADATA_PATHS[self.config.name[:5]] # Only wmtXX
135
+ },
136
+ )
137
+ ]
138
+
139
+ def _generate_examples(
140
+ self, src_path: str, ref_path: str, pair: str, meta_path: str
141
+ ):
142
+ """ Yields examples as (key, example) tuples. """
143
+ with open(meta_path, encoding="utf-8") as meta:
144
+ ids = json.load(meta)[pair]
145
+ with open(src_path, encoding="utf-8") as src:
146
+ with open(src_path, encoding="utf-8") as ref:
147
+ for id_, (src_ex, ref_ex, orig_idx) in enumerate(zip(src, ref, ids)):
148
+ yield id_, {
149
+ "orig_id": orig_idx,
150
+ "source": src_ex,
151
+ "reference": ref_ex,
152
+ }