Datasets:

ArXiv:
Guillaume Wenzek commited on
Commit
7248137
1 Parent(s): 69397c2

fix ccmatrix loading

Browse files
Files changed (1) hide show
  1. nllb.py +46 -54
nllb.py CHANGED
@@ -26,7 +26,7 @@ import typing as tp
26
  _CITATION = (
27
  "@article{team2022NoLL,"
28
  "title={No Language Left Behind: Scaling Human-Centered Machine Translation},"
29
- "author={Nllb team and Marta Ruiz Costa-juss{\`a} and James Cross and Onur Celebi and Maha Elbayad and Kenneth Heafield and Kevin Heffernan and Elahe Kalbassi and Janice Lam and Daniel Licht and Jean Maillard and Anna Sun and Skyler Wang and Guillaume Wenzek and Alison Youngblood and Bapi Akula and Lo{\"i}c Barrault and Gabriel Mejia Gonzalez and Prangthip Hansanti and John Hoffman and Semarley Jarrett and Kaushik Ram Sadagopan and Dirk Rowe and Shannon L. Spruit and C. Tran and Pierre Andrews and Necip Fazil Ayan and Shruti Bhosale and Sergey Edunov and Angela Fan and Cynthia Gao and Vedanuj Goswami and Francisco Guzm'an and Philipp Koehn and Alexandre Mourachko and Christophe Ropers and Safiyyah Saleem and Holger Schwenk and Jeff Wang},"
30
  "journal={ArXiv},"
31
  "year={2022},"
32
  "volume={abs/2207.04672}"
@@ -41,61 +41,55 @@ _HOMEPAGE = "" # TODO
41
  _LICENSE = "https://opendatacommons.org/licenses/by/1-0/"
42
 
43
  from .nllb_lang_pairs import LANG_PAIRS as _LANGUAGE_PAIRS
44
- from .ccmatrix_lang_pairs import LANG_PAIRS as _CCMATRIX_LANGUAGE_PAIRS
45
-
46
- _URL_BASE = "https://storage.googleapis.com/allennlp-data-bucket/nllb/"
47
-
48
- _URLs = {
49
- f"{src_lg}-{trg_lg}": f"{_URL_BASE}{src_lg}-{trg_lg}.gz"
50
- for src_lg, trg_lg in _LANGUAGE_PAIRS
51
- }
52
 
 
53
  _STATMT_URL = "http://data.statmt.org/cc-matrix/"
54
- _URLs.update(
55
- {
56
- f"{src_lg}-{trg_lg}": f"{_STATMT_URL}{src_lg}-{trg_lg}.bitextf.tsv.gz"
57
- for src_lg, trg_lg in _CCMATRIX_LANGUAGE_PAIRS
58
- }
59
- )
60
 
61
 
62
  class NLLBTaskConfig(datasets.BuilderConfig):
63
  """BuilderConfig for No Language Left Behind Dataset."""
64
 
65
- def __init__(self, src_lg, tgt_lg, source, **kwargs):
66
  super(NLLBTaskConfig, self).__init__(**kwargs)
67
  self.src_lg = src_lg
68
  self.tgt_lg = tgt_lg
69
- url = _URLs.get(f"{src_lg}-{tgt_lg}", "")
70
- self.source = source
71
-
72
 
73
  def _builder_configs() -> tp.List[NLLBTaskConfig]:
74
- configs = []
75
- for (src_lg, tgt_lg) in _LANGUAGE_PAIRS:
76
- configs.append(
77
- NLLBTaskConfig(
78
- name=f"{src_lg}-{tgt_lg}",
79
- version=datasets.Version("1.0.0"),
80
- description=f"No Language Left Behind (NLLB): {src_lg} - {tgt_lg}",
81
- src_lg=src_lg,
82
- tgt_lg=tgt_lg,
83
- source="allenai",
84
- )
 
 
 
 
 
 
 
 
85
  )
86
 
87
- for (src_lg, tgt_lg) in _CCMATRIX_LANGUAGE_PAIRS:
88
- configs.append(
89
- NLLBTaskConfig(
90
- name=f"{src_lg}-{tgt_lg}",
91
- version=datasets.Version("1.0.0"),
92
- description=f"No Language Left Behind (NLLB): {src_lg} - {tgt_lg}",
93
- src_lg=src_lg,
94
- tgt_lg=tgt_lg,
95
- source="mtstats",
96
- )
97
  )
98
- return configs
99
 
100
 
101
  class NLLB(datasets.GeneratorBasedBuilder):
@@ -120,7 +114,7 @@ class NLLB(datasets.GeneratorBasedBuilder):
120
  "target_sentence_url": datasets.Value("string"),
121
  }
122
  )
123
- if self.config.source == "mtstats":
124
  # MT stats didn't published all the metadata
125
  features = datasets.Features(
126
  {
@@ -141,18 +135,16 @@ class NLLB(datasets.GeneratorBasedBuilder):
141
  )
142
 
143
  def _split_generators(self, dl_manager):
144
- """Returns SplitGenerators."""
145
- pair = f"{self.config.src_lg}-{self.config.tgt_lg}" # string identifier for language pair
146
- url = _URLs[pair] # url for download of pair-specific file
147
- data_file = dl_manager.download_and_extract(
148
- url
149
- ) # extract downloaded data and store path in data_file
150
 
 
 
 
151
  return [
152
  datasets.SplitGenerator(
153
  name=datasets.Split.TRAIN,
154
  gen_kwargs={
155
- "filepath": data_file,
156
  "source_lg": self.config.src_lg,
157
  "target_lg": self.config.tgt_lg,
158
  },
@@ -160,7 +152,7 @@ class NLLB(datasets.GeneratorBasedBuilder):
160
  ]
161
 
162
  def _generate_examples(self, filepath, source_lg, target_lg):
163
- if self.config.source == "mtstats":
164
  # MT stats didn't published all the metadata
165
  return self._generate_minimal_examples(filepath, source_lg, target_lg)
166
 
@@ -173,10 +165,11 @@ class NLLB(datasets.GeneratorBasedBuilder):
173
  try:
174
  datarow = example.split("\t")
175
  row = {}
 
176
  row["translation"] = {
177
  source_lg: datarow[0],
178
  target_lg: datarow[1],
179
- } # create translation json
180
  row["laser_score"] = float(datarow[2])
181
  row["source_sentence_lid"] = float(datarow[3])
182
  row["target_sentence_lid"] = float(datarow[4])
@@ -184,9 +177,8 @@ class NLLB(datasets.GeneratorBasedBuilder):
184
  row["source_sentence_url"] = datarow[6]
185
  row["target_sentence_source"] = datarow[7]
186
  row["target_sentence_url"] = datarow[8]
187
- row = {
188
- k: None if not v else v for k, v in row.items()
189
- } # replace empty values
190
  except:
191
  print(datarow)
192
  raise
26
  _CITATION = (
27
  "@article{team2022NoLL,"
28
  "title={No Language Left Behind: Scaling Human-Centered Machine Translation},"
29
+ r"author={Nllb team and Marta Ruiz Costa-juss{\`a} and James Cross and Onur Celebi and Maha Elbayad and Kenneth Heafield and Kevin Heffernan and Elahe Kalbassi and Janice Lam and Daniel Licht and Jean Maillard and Anna Sun and Skyler Wang and Guillaume Wenzek and Alison Youngblood and Bapi Akula and Lo{\"i}c Barrault and Gabriel Mejia Gonzalez and Prangthip Hansanti and John Hoffman and Semarley Jarrett and Kaushik Ram Sadagopan and Dirk Rowe and Shannon L. Spruit and C. Tran and Pierre Andrews and Necip Fazil Ayan and Shruti Bhosale and Sergey Edunov and Angela Fan and Cynthia Gao and Vedanuj Goswami and Francisco Guzm'an and Philipp Koehn and Alexandre Mourachko and Christophe Ropers and Safiyyah Saleem and Holger Schwenk and Jeff Wang},"
30
  "journal={ArXiv},"
31
  "year={2022},"
32
  "volume={abs/2207.04672}"
41
  _LICENSE = "https://opendatacommons.org/licenses/by/1-0/"
42
 
43
  from .nllb_lang_pairs import LANG_PAIRS as _LANGUAGE_PAIRS
44
+ from .ccmatrix_lang_pairs import PAIRS as CCMATRIX_PAIRS
45
+ from .ccmatrix_lang_pairs import MAPPING as CCMATRIX_MAPPING
 
 
 
 
 
 
46
 
47
+ _ALLENAI_URL = "https://storage.googleapis.com/allennlp-data-bucket/nllb/"
48
  _STATMT_URL = "http://data.statmt.org/cc-matrix/"
 
 
 
 
 
 
49
 
50
 
51
  class NLLBTaskConfig(datasets.BuilderConfig):
52
  """BuilderConfig for No Language Left Behind Dataset."""
53
 
54
+ def __init__(self, src_lg, tgt_lg, url, **kwargs):
55
  super(NLLBTaskConfig, self).__init__(**kwargs)
56
  self.src_lg = src_lg
57
  self.tgt_lg = tgt_lg
58
+ self.url = url
59
+ self.source = "statmt" if url.startswith(_STATMT_URL) else "allenai"
 
60
 
61
  def _builder_configs() -> tp.List[NLLBTaskConfig]:
62
+ """
63
+ Note we always return data from AllenAI if possible because CC-Matrix data
64
+ is older, and most language pairs have been improved between the two versions.
65
+ """
66
+ configs = {}
67
+
68
+ for (src, tgt) in CCMATRIX_PAIRS:
69
+ src_lg = CCMATRIX_MAPPING[src]
70
+ tgt_lg = CCMATRIX_MAPPING[tgt]
71
+ if not src_lg or not tgt_lg:
72
+ continue
73
+ configs[(src_lg, tgt_lg)] = NLLBTaskConfig(
74
+ name=f"{src_lg}-{tgt_lg}",
75
+ version=datasets.Version("1.0.0"),
76
+ description=f"No Language Left Behind (NLLB): {src_lg} - {tgt_lg}",
77
+ src_lg=src_lg,
78
+ tgt_lg=tgt_lg,
79
+ # Use CCMatrix language code to fetch from statmt
80
+ url = f"{_STATMT_URL}{src}-{tgt}.bitextf.tsv.gz"
81
  )
82
 
83
+ for (src_lg, tgt_lg) in _LANGUAGE_PAIRS:
84
+ configs[(src_lg, tgt_lg)] = NLLBTaskConfig(
85
+ name=f"{src_lg}-{tgt_lg}",
86
+ version=datasets.Version("1.0.0"),
87
+ description=f"No Language Left Behind (NLLB): {src_lg} - {tgt_lg}",
88
+ src_lg=src_lg,
89
+ tgt_lg=tgt_lg,
90
+ url = f"{_ALLENAI_URL}{src_lg}-{tgt_lg}.gz"
 
 
91
  )
92
+ return list(configs.values())
93
 
94
 
95
  class NLLB(datasets.GeneratorBasedBuilder):
114
  "target_sentence_url": datasets.Value("string"),
115
  }
116
  )
117
+ if self.config.source == "statmt":
118
  # MT stats didn't published all the metadata
119
  features = datasets.Features(
120
  {
135
  )
136
 
137
  def _split_generators(self, dl_manager):
138
+ """Returns one training generator. NLLB200 is meant for training.
 
 
 
 
 
139
 
140
+ If you're interested in evaluation look at https://huggingface.co/datasets/facebook/flores
141
+ """
142
+ local_file = dl_manager.download_and_extract(self.config.url)
143
  return [
144
  datasets.SplitGenerator(
145
  name=datasets.Split.TRAIN,
146
  gen_kwargs={
147
+ "filepath": local_file,
148
  "source_lg": self.config.src_lg,
149
  "target_lg": self.config.tgt_lg,
150
  },
152
  ]
153
 
154
  def _generate_examples(self, filepath, source_lg, target_lg):
155
+ if self.config.source == "statmt":
156
  # MT stats didn't published all the metadata
157
  return self._generate_minimal_examples(filepath, source_lg, target_lg)
158
 
165
  try:
166
  datarow = example.split("\t")
167
  row = {}
168
+ # create translation json
169
  row["translation"] = {
170
  source_lg: datarow[0],
171
  target_lg: datarow[1],
172
+ }
173
  row["laser_score"] = float(datarow[2])
174
  row["source_sentence_lid"] = float(datarow[3])
175
  row["target_sentence_lid"] = float(datarow[4])
177
  row["source_sentence_url"] = datarow[6]
178
  row["target_sentence_source"] = datarow[7]
179
  row["target_sentence_url"] = datarow[8]
180
+ # replace empty values
181
+ row = {k: None if not v else v for k, v in row.items()}
 
182
  except:
183
  print(datarow)
184
  raise