holylovenia commited on
Commit
9e74c78
1 Parent(s): 3450cbe

Upload muse.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. muse.py +197 -0
muse.py ADDED
@@ -0,0 +1,197 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ """
17
+ Contains 110 large-scale ground-truth bilingual dictionaries created and released by Meta using an internal translation tool.
18
+ The dictionaries account for polysemy. The data comprises of a train and test split of 5000 and 1500 unique source words, as well as a larger set of up to 100k pairs.
19
+ It comprises of Europeans languages in every direction, and SEA languages to and from English.
20
+ """
21
+
22
+ from pathlib import Path
23
+ from typing import Dict, List, Tuple
24
+
25
+ import datasets
26
+
27
+ from seacrowd.utils import schemas
28
+ from seacrowd.utils.configs import SEACrowdConfig
29
+ from seacrowd.utils.constants import Licenses, Tasks
30
+
31
+ _CITATION = """\
32
+ @inproceedings{lample2018word,
33
+ title={Word translation without parallel data},
34
+ author={Lample, Guillaume and Conneau, Alexis and Ranzato, Marc'Aurelio and Denoyer, Ludovic and J{\'e}gou, Herv{\'e}},
35
+ booktitle={International Conference on Learning Representations},
36
+ year={2018}}
37
+ }
38
+ """
39
+
40
+ _DATASETNAME = "muse"
41
+
42
+ _DESCRIPTION = """\
43
+ Contains 110 large-scale ground-truth bilingual dictionaries created and released by Meta using an internal translation tool.
44
+ The dictionaries account for polysemy. The data comprises of a train and test split of 5000 and 1500 unique source words, as well as a larger set of up to 100k pairs.
45
+ It comprises of Europeans languages in every direction, and SEA languages to and from English.
46
+ """
47
+
48
+ _HOMEPAGE = "https://github.com/facebookresearch/MUSE#ground-truth-bilingual-dictionaries"
49
+
50
+ _LANGUAGES = ["tgl", "ind", "zlm", "tha", "vie"]
51
+
52
+ _LICENSE = Licenses.CC_BY_NC_ND_4_0.value
53
+
54
+ _LOCAL = False
55
+
56
+ _TRAIN_URL_TEMPLATE = "https://dl.fbaipublicfiles.com/arrival/dictionaries/{src}-{tgt}.0-5000.txt"
57
+ _TEST_URL_TEMPLATE = "https://dl.fbaipublicfiles.com/arrival/dictionaries/{src}-{tgt}.5000-6500.txt"
58
+
59
+ _SUPPORTED_TASKS = [Tasks.MACHINE_TRANSLATION]
60
+
61
+ _SOURCE_VERSION = "1.0.0"
62
+ _SEACROWD_VERSION = "2024.06.20"
63
+
64
+ configs = {
65
+ "tgl": ["eng"],
66
+ "ind": ["eng"],
67
+ "zlm": ["eng"],
68
+ "tha": ["eng"],
69
+ "vie": ["eng"],
70
+ "eng": ["tha", "vie", "tgl", "zlm", "ind"],
71
+ }
72
+
73
+ langid_dict = {
74
+ "eng": "en",
75
+ "tgl": "tl",
76
+ "ind": "id",
77
+ "zlm": "ms",
78
+ "tha": "th",
79
+ "vie": "vi",
80
+ }
81
+
82
+
83
+ class MUSEDataset(datasets.GeneratorBasedBuilder):
84
+ """Large-scale ground-truth bilingual dictionaries"""
85
+
86
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
87
+ SEACROWD_VERSION = datasets.Version(_SEACROWD_VERSION)
88
+
89
+ BUILDER_CONFIGS = (
90
+ [
91
+ SEACrowdConfig(
92
+ name=f"{_DATASETNAME}_source",
93
+ version=SOURCE_VERSION,
94
+ description=f"{_DATASETNAME} source schema",
95
+ schema="source",
96
+ subset_id=f"{_DATASETNAME}_tgl_eng",
97
+ ),
98
+ SEACrowdConfig(
99
+ name=f"{_DATASETNAME}_seacrowd_t2t",
100
+ version=SEACROWD_VERSION,
101
+ description=f"{_DATASETNAME} SEACrowd schema",
102
+ schema="seacrowd_t2t",
103
+ subset_id=f"{_DATASETNAME}_tgl_eng",
104
+ ),
105
+ ]
106
+ + [
107
+ SEACrowdConfig(
108
+ name=f"{_DATASETNAME}_{src_lang}_{tgt_lang}_source",
109
+ version=datasets.Version(_SOURCE_VERSION),
110
+ description=f"{_DATASETNAME} source schema",
111
+ schema="source",
112
+ subset_id=f"{_DATASETNAME}_{src_lang}_{tgt_lang}",
113
+ )
114
+ for src_lang in configs
115
+ for tgt_lang in configs[src_lang]
116
+ ]
117
+ + [
118
+ SEACrowdConfig(
119
+ name=f"{_DATASETNAME}_{src_lang}_{tgt_lang}_seacrowd_t2t",
120
+ version=datasets.Version(_SOURCE_VERSION),
121
+ description=f"{_DATASETNAME} SEACrowd schema",
122
+ schema="seacrowd_t2t",
123
+ subset_id=f"{_DATASETNAME}_{src_lang}_{tgt_lang}",
124
+ )
125
+ for src_lang in configs
126
+ for tgt_lang in configs[src_lang]
127
+ ]
128
+ )
129
+
130
+ DEFAULT_CONFIG_NAME = f"{_DATASETNAME}_source"
131
+
132
+ def _info(self) -> datasets.DatasetInfo:
133
+
134
+ if self.config.schema == "source":
135
+ features = datasets.Features(
136
+ {
137
+ "id": datasets.Value("string"),
138
+ "src_text": datasets.Value("string"),
139
+ "tgt_text": datasets.Value("string"),
140
+ }
141
+ )
142
+
143
+ elif self.config.schema == "seacrowd_t2t":
144
+ features = schemas.text2text_features
145
+
146
+ return datasets.DatasetInfo(
147
+ description=_DESCRIPTION,
148
+ features=features,
149
+ homepage=_HOMEPAGE,
150
+ license=_LICENSE,
151
+ citation=_CITATION,
152
+ )
153
+
154
+ def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
155
+
156
+ _, src_lang, tgt_lang = self.config.subset_id.split("_")
157
+ train_url = _TRAIN_URL_TEMPLATE.format(src=langid_dict[src_lang], tgt=langid_dict[tgt_lang])
158
+ test_url = _TEST_URL_TEMPLATE.format(src=langid_dict[src_lang], tgt=langid_dict[tgt_lang])
159
+
160
+ train_file = dl_manager.download_and_extract(train_url)
161
+ test_file = dl_manager.download_and_extract(test_url)
162
+
163
+ return [
164
+ datasets.SplitGenerator(
165
+ name=datasets.Split.TRAIN,
166
+ gen_kwargs={
167
+ "src_lang": src_lang,
168
+ "tgt_lang": tgt_lang,
169
+ "filepath": train_file,
170
+ },
171
+ ),
172
+ datasets.SplitGenerator(
173
+ name=datasets.Split.VALIDATION,
174
+ gen_kwargs={
175
+ "src_lang": src_lang,
176
+ "tgt_lang": tgt_lang,
177
+ "filepath": test_file,
178
+ },
179
+ ),
180
+ ]
181
+
182
+ def _generate_examples(self, src_lang: str, tgt_lang: str, filepath: Path) -> Tuple[int, Dict]:
183
+ if self.config.schema == "source":
184
+ for row_id, line in enumerate(open(filepath)):
185
+ src_text, tgt_text = line.strip().split("\t")
186
+ yield row_id, {"id": row_id, "src_text": src_text, "tgt_text": tgt_text}
187
+
188
+ elif self.config.schema == "seacrowd_t2t":
189
+ for row_id, line in enumerate(open(filepath)):
190
+ src_text, tgt_text = line.strip().split("\t")
191
+ yield row_id, {
192
+ "id": row_id,
193
+ "text_1": src_text,
194
+ "text_2": tgt_text,
195
+ "text_1_name": src_lang,
196
+ "text_2_name": tgt_lang,
197
+ }