Davlan commited on
Commit
ca075d7
1 Parent(s): 3f799e9

Upload mafand.py

Browse files
Files changed (1) hide show
  1. mafand.py +224 -0
mafand.py ADDED
@@ -0,0 +1,224 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 HuggingFace Datasets Authors.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ # Lint as: python3
17
+ """MAFAND-MT: Masakhane Anglo and Franco Africa News Dataset for Machine Translation"""
18
+
19
+ import datasets
20
+ import json
21
+
22
+ logger = datasets.logging.get_logger(__name__)
23
+
24
+ _CITATION = """\
25
+ @inproceedings{adelani-etal-2022-thousand,
26
+ title = "A Few Thousand Translations Go a Long Way! Leveraging Pre-trained Models for {A}frican News Translation",
27
+ author = "Adelani, David and
28
+ Alabi, Jesujoba and
29
+ Fan, Angela and
30
+ Kreutzer, Julia and
31
+ Shen, Xiaoyu and
32
+ Reid, Machel and
33
+ Ruiter, Dana and
34
+ Klakow, Dietrich and
35
+ Nabende, Peter and
36
+ Chang, Ernie and
37
+ Gwadabe, Tajuddeen and
38
+ Sackey, Freshia and
39
+ Dossou, Bonaventure F. P. and
40
+ Emezue, Chris and
41
+ Leong, Colin and
42
+ Beukman, Michael and
43
+ Muhammad, Shamsuddeen and
44
+ Jarso, Guyo and
45
+ Yousuf, Oreen and
46
+ Niyongabo Rubungo, Andre and
47
+ Hacheme, Gilles and
48
+ Wairagala, Eric Peter and
49
+ Nasir, Muhammad Umair and
50
+ Ajibade, Benjamin and
51
+ Ajayi, Tunde and
52
+ Gitau, Yvonne and
53
+ Abbott, Jade and
54
+ Ahmed, Mohamed and
55
+ Ochieng, Millicent and
56
+ Aremu, Anuoluwapo and
57
+ Ogayo, Perez and
58
+ Mukiibi, Jonathan and
59
+ Ouoba Kabore, Fatoumata and
60
+ Kalipe, Godson and
61
+ Mbaye, Derguene and
62
+ Tapo, Allahsera Auguste and
63
+ Memdjokam Koagne, Victoire and
64
+ Munkoh-Buabeng, Edwin and
65
+ Wagner, Valencia and
66
+ Abdulmumin, Idris and
67
+ Awokoya, Ayodele and
68
+ Buzaaba, Happy and
69
+ Sibanda, Blessing and
70
+ Bukula, Andiswa and
71
+ Manthalu, Sam",
72
+ booktitle = "Proceedings of the 2022 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies",
73
+ month = jul,
74
+ year = "2022",
75
+ address = "Seattle, United States",
76
+ publisher = "Association for Computational Linguistics",
77
+ url = "https://aclanthology.org/2022.naacl-main.223",
78
+ doi = "10.18653/v1/2022.naacl-main.223",
79
+ pages = "3053--3070",
80
+ abstract = "Recent advances in the pre-training for language models leverage large-scale datasets to create multilingual models. However, low-resource languages are mostly left out in these datasets. This is primarily because many widely spoken languages that are not well represented on the web and therefore excluded from the large-scale crawls for datasets. Furthermore, downstream users of these models are restricted to the selection of languages originally chosen for pre-training. This work investigates how to optimally leverage existing pre-trained models to create low-resource translation systems for 16 African languages. We focus on two questions: 1) How can pre-trained models be used for languages not included in the initial pretraining? and 2) How can the resulting translation models effectively transfer to new domains? To answer these questions, we create a novel African news corpus covering 16 languages, of which eight languages are not part of any existing evaluation dataset. We demonstrate that the most effective strategy for transferring both additional languages and additional domains is to leverage small quantities of high-quality translation data to fine-tune large pre-trained models.",
81
+ }
82
+ """
83
+
84
+ _DESCRIPTION = """\
85
+ MAFAND-MT is the largest MT benchmark for African languages in the news domain, covering 21 languages. The languages covered are:
86
+ - Amharic
87
+ - Bambara
88
+ - Ghomala
89
+ - Ewe
90
+ - Fon
91
+ - Hausa
92
+ - Igbo
93
+ - Kinyarwanda
94
+ - Luganda
95
+ - Luo
96
+ - Mossi
97
+ - Nigerian-Pidgin
98
+ - Chichewa
99
+ - Shona
100
+ - Swahili
101
+ - Setswana
102
+ - Twi
103
+ - Wolof
104
+ - Xhosa
105
+ - Yoruba
106
+ - Zulu
107
+
108
+ The train/validation/test sets are available for 16 languages, and validation/test set for amh, kin, nya, sna, and xho
109
+
110
+ For more details see https://aclanthology.org/2022.naacl-main.223/
111
+ """
112
+
113
+ _URL = "https://raw.githubusercontent.com/masakhane-io/lafand-mt/main/data/json_files/"
114
+ _TRAINING_FILE = "train.json"
115
+ _DEV_FILE = "dev.json"
116
+ _TEST_FILE = "test.json"
117
+
118
+
119
+ class MafandConfig(datasets.BuilderConfig):
120
+ """BuilderConfig for Mafand"""
121
+
122
+ def __init__(self, **kwargs):
123
+ """BuilderConfig for Masakhaner.
124
+ Args:
125
+ **kwargs: keyword arguments forwarded to super.
126
+ """
127
+ super(MafandConfig, self).__init__(**kwargs)
128
+
129
+
130
+ class Mafand(datasets.GeneratorBasedBuilder):
131
+ """Mafand dataset."""
132
+ BUILDER_CONFIGS = [
133
+ MafandConfig(name="en-amh", version=datasets.Version("1.0.0"),
134
+ description="Mafand English-Amharic dataset"),
135
+ MafandConfig(name="en-hau", version=datasets.Version("1.0.0"),
136
+ description="Mafand English-Hausa dataset"),
137
+ MafandConfig(name="en-ibo", version=datasets.Version("1.0.0"),
138
+ description="Mafand English-Igbo dataset"),
139
+ MafandConfig(name="en-kin", version=datasets.Version("1.0.0"),
140
+ description="Mafand English-Kinyarwanda dataset"),
141
+ MafandConfig(name="en-lug", version=datasets.Version("1.0.0"),
142
+ description="Mafand English-Luganda dataset"),
143
+ MafandConfig(name="en-nya", version=datasets.Version("1.0.0"),
144
+ description="Mafand English-Chichewa dataset"),
145
+ MafandConfig(name="en-pcm", version=datasets.Version("1.0.0"),
146
+ description="Mafand English-Naija dataset"),
147
+ MafandConfig(name="en-sna", version=datasets.Version("1.0.0"),
148
+ description="Mafand English-Shona dataset"),
149
+ MafandConfig(name="en-swa", version=datasets.Version("1.0.0"),
150
+ description="Mafand English-Swahili dataset"),
151
+ MafandConfig(name="en-tsn", version=datasets.Version("1.0.0"),
152
+ description="Mafand English-Setswana dataset"),
153
+ MafandConfig(name="en-twi", version=datasets.Version("1.0.0"),
154
+ description="Mafand English-Twi dataset"),
155
+ MafandConfig(name="en-xho", version=datasets.Version("1.0.0"),
156
+ description="Mafand English-Xhosa dataset"),
157
+ MafandConfig(name="en-yor", version=datasets.Version("1.0.0"),
158
+ description="Mafand English-Yoruba dataset"),
159
+ MafandConfig(name="en-zul", version=datasets.Version("1.0.0"),
160
+ description="Mafand English-Zulu dataset"),
161
+ MafandConfig(name="fr-bam", version=datasets.Version("1.0.0"),
162
+ description="Mafand French-Bambara dataset"),
163
+ MafandConfig(name="fr-bbj", version=datasets.Version("1.0.0"),
164
+ description="Mafand French-Ghomala dataset"),
165
+ MafandConfig(name="fr-ewe", version=datasets.Version("1.0.0"),
166
+ description="Mafand French-Ewe dataset"),
167
+ MafandConfig(name="fr-fon", version=datasets.Version("1.0.0"),
168
+ description="Mafand French-Fon dataset"),
169
+ MafandConfig(name="fr-mos", version=datasets.Version("1.0.0"),
170
+ description="Mafand French-Mossi dataset"),
171
+ MafandConfig(name="fr-wol", version=datasets.Version("1.0.0"),
172
+ description="Mafand French-Wolof dataset"),
173
+ ]
174
+
175
+ def _info(self):
176
+ source, target = self.config.name.split('-')
177
+ return datasets.DatasetInfo(
178
+ description=_DESCRIPTION,
179
+ features=datasets.Features({"translation": datasets.features.Translation(languages=(source, target))}),
180
+ supervised_keys=(source, target),
181
+ homepage="https://github.com/masakhane-io/lafand-mt",
182
+ citation=_CITATION,
183
+ )
184
+
185
+ def _split_generators(self, dl_manager):
186
+ """Returns SplitGenerators."""
187
+ source, target = self.config.name.split('-')
188
+ if target in ['amh', 'kin', 'nya', 'sna', 'xho']:
189
+ urls_to_download = {
190
+ "dev": f"{_URL}{self.config.name}/{_DEV_FILE}",
191
+ "test": f"{_URL}{self.config.name}/{_TEST_FILE}",
192
+ }
193
+ downloaded_files = dl_manager.download_and_extract(urls_to_download)
194
+ return [
195
+ datasets.SplitGenerator(name=datasets.Split.VALIDATION,
196
+ gen_kwargs={"filepath": downloaded_files["dev"]}),
197
+ datasets.SplitGenerator(name=datasets.Split.TEST,
198
+ gen_kwargs={"filepath": downloaded_files["test"]}),
199
+ ]
200
+ else:
201
+ urls_to_download = {
202
+ "train": f"{_URL}{self.config.name}/{_TRAINING_FILE}",
203
+ "dev": f"{_URL}{self.config.name}/{_DEV_FILE}",
204
+ "test": f"{_URL}{self.config.name}/{_TEST_FILE}",
205
+ }
206
+ downloaded_files = dl_manager.download_and_extract(urls_to_download)
207
+
208
+ return [
209
+ datasets.SplitGenerator(name=datasets.Split.TRAIN,
210
+ gen_kwargs={"filepath": downloaded_files["train"]}),
211
+ datasets.SplitGenerator(name=datasets.Split.VALIDATION,
212
+ gen_kwargs={"filepath": downloaded_files["dev"]}),
213
+ datasets.SplitGenerator(name=datasets.Split.TEST,
214
+ gen_kwargs={"filepath": downloaded_files["test"]}),
215
+ ]
216
+
217
+ def _generate_examples(self, filepath):
218
+ logger.info("⏳ Generating examples from = %s", filepath)
219
+ with open(filepath, encoding="utf-8") as f:
220
+ idx = 0
221
+ for line in f:
222
+ src_tgt = json.loads(line)
223
+ yield idx, src_tgt
224
+ idx += 1