Datasets:

ArXiv:
akshitab commited on
Commit
9d23551
1 Parent(s): c40f6f3

add test script

Browse files
Files changed (1) hide show
  1. nllb.py +144 -0
nllb.py ADDED
@@ -0,0 +1,144 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """No Language Left Behind (NLLB)"""
16
+
17
+ import datasets
18
+ import csv
19
+ import json
20
+
21
+ _CITATION = "" # TODO
22
+
23
+
24
+ _DESCRIPTION = ""
25
+
26
+ _HOMEPAGE = (
27
+ ""
28
+ )
29
+
30
+ _LICENSE = (
31
+ ""
32
+ )
33
+
34
+ with open("https://huggingface.co/datasets/allenai/nllb/resolve/main/all_lang_pairs.json") as f:
35
+ _LANGUAGE_PAIRS = json.load(f)
36
+
37
+ _LANGUAGE_PAIRS = _LANGUAGE_PAIRS[:2]
38
+
39
+ _URL_BASE = "https://storage.googleapis.com/allennlp-data-bucket/nllb/"
40
+
41
+ _URLs = {
42
+ f"{src_lg}-{trg_lg}": f"{_URL_BASE}{src_lg}-{trg_lg}.gz"
43
+ for src_lg, trg_lg in _LANGUAGE_PAIRS
44
+ }
45
+
46
+
47
+ class NLLBTaskConfig(datasets.BuilderConfig):
48
+ """BuilderConfig for No Language Left Behind Dataset."""
49
+
50
+ def __init__(self, src_lg, tgt_lg, **kwargs):
51
+ super(NLLBTaskConfig, self).__init__(**kwargs)
52
+ self.src_lg = src_lg
53
+ self.tgt_lg = tgt_lg
54
+
55
+
56
+ class NLLB(datasets.GeneratorBasedBuilder):
57
+ """No Language Left Behind Dataset."""
58
+
59
+ BUILDER_CONFIGS = [
60
+ NLLBTaskConfig(
61
+ name=f"{src_lg}-{tgt_lg}",
62
+ version=datasets.Version("1.0.0"),
63
+ description=f"No Language Left Behind (NLLB): {src_lg} - {tgt_lg}",
64
+ src_lg=src_lg,
65
+ tgt_lg=tgt_lg,
66
+ )
67
+ for (src_lg, tgt_lg) in _LANGUAGE_PAIRS
68
+ ]
69
+ BUILDER_CONFIG_CLASS = NLLBTaskConfig
70
+
71
+ def _info(self):
72
+ # define feature types
73
+ features = datasets.Features(
74
+ {
75
+ "translation": datasets.Translation(
76
+ languages=(self.config.src_lg, self.config.tgt_lg)
77
+ ),
78
+ "laser_score": datasets.Value("float32"),
79
+ "source_sentence_lid": datasets.Value("float32"),
80
+ "target_sentence_lid": datasets.Value("float32"),
81
+ "source_sentence_source": datasets.Value("string"),
82
+ "source_sentence_url": datasets.Value("string"),
83
+ "target_sentence_source": datasets.Value("string"),
84
+ "target_sentence_url": datasets.Value("string")
85
+ }
86
+ )
87
+
88
+ return datasets.DatasetInfo(
89
+ description=_DESCRIPTION,
90
+ features=features,
91
+ supervised_keys=None,
92
+ homepage=_HOMEPAGE,
93
+ license=_LICENSE,
94
+ citation=_CITATION,
95
+ )
96
+
97
+ def _split_generators(self, dl_manager):
98
+ """Returns SplitGenerators."""
99
+ pair = f"{self.config.src_lg}-{self.config.tgt_lg}" # string identifier for language pair
100
+ url = _URLs[pair] # url for download of pair-specific file
101
+ data_file = dl_manager.download_and_extract(
102
+ url
103
+ ) # extract downloaded data and store path in data_file
104
+
105
+ return [
106
+ datasets.SplitGenerator(
107
+ name=datasets.Split.TRAIN,
108
+ gen_kwargs={
109
+ "filepath": data_file,
110
+ "source_lg": self.config.src_lg,
111
+ "target_lg": self.config.tgt_lg,
112
+ },
113
+ )
114
+ ]
115
+
116
+ def _generate_examples(self, filepath, source_lg, target_lg):
117
+ with open(filepath, encoding="utf-8") as f:
118
+ # reader = csv.reader(f, delimiter="\t")
119
+ for id_, example in enumerate(f):
120
+ try:
121
+ datarow = example.split("\t")
122
+ row = {}
123
+ row["translation"] = {
124
+ source_lg: datarow[0],
125
+ target_lg: datarow[1],
126
+ } # create translation json
127
+ row["laser_score"] = float(datarow[2])
128
+ row["source_sentence_lid"] = float(datarow[3])
129
+ row["target_sentence_lid"] = float(datarow[4])
130
+ row["source_sentence_source"] = datarow[5]
131
+ row["source_sentence_url"] = datarow[6]
132
+ row["target_sentence_source"] = datarow[7]
133
+ row["target_sentence_url"] = datarow[8]
134
+ row = {
135
+ k: None if not v else v for k, v in row.items()
136
+ } # replace empty values
137
+ except:
138
+ print(datarow)
139
+ raise
140
+ yield id_, row
141
+
142
+
143
+ # to test the script, go to the root folder of the repo (nllb) and run:
144
+ # datasets-cli test nllb --save_infos --all_configs