holylovenia commited on
Commit
38208c9
1 Parent(s): 80a24ac

Upload covost2.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. covost2.py +239 -0
covost2.py ADDED
@@ -0,0 +1,239 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ import csv
17
+ import os
18
+ from pathlib import Path
19
+ from typing import Dict, List, Tuple
20
+
21
+ import datasets
22
+ import pandas as pd
23
+
24
+ from nusacrowd.utils import schemas
25
+ from nusacrowd.utils.configs import NusantaraConfig
26
+ from nusacrowd.utils.constants import DEFAULT_NUSANTARA_VIEW_NAME, DEFAULT_SOURCE_VIEW_NAME, Tasks
27
+
28
+ _LANGUAGES = ["ind", "eng"]
29
+ _CITATION = """\
30
+
31
+ @article{wang2020covost,
32
+ title={Covost 2 and massively multilingual speech-to-text translation},
33
+ author={Wang, Changhan and Wu, Anne and Pino, Juan},
34
+ journal={arXiv preprint arXiv:2007.10310},
35
+ year={2020}
36
+ }
37
+
38
+ @inproceedings{wang21s_interspeech,
39
+ author={Wang, Changhan and Wu, Anne and Pino, Juan},
40
+ title={{CoVoST 2 and Massively Multilingual Speech Translation}},
41
+ year=2021,
42
+ booktitle={Proc. Interspeech 2021},
43
+ pages={2247--2251},
44
+ url={https://www.isca-speech.org/archive/interspeech_2021/wang21s_interspeech}
45
+ doi={10.21437/Interspeech.2021-2027}
46
+ }
47
+ """
48
+
49
+ _DATASETNAME = "covost2"
50
+ _SOURCE_VIEW_NAME = DEFAULT_SOURCE_VIEW_NAME
51
+ _UNIFIED_VIEW_NAME = DEFAULT_NUSANTARA_VIEW_NAME
52
+
53
+ _DESCRIPTION = """\
54
+ CoVoST2 is a large-scale multilingual speech translation corpus covering translations from 21 languages to English
55
+ and from English into 15 languages. The dataset is created using Mozilla's open-source Common Voice database of
56
+ crowdsourced voice recordings. There are 2,900 hours of speech represented in the corpus.
57
+ """
58
+
59
+ _HOMEPAGE = "https://huggingface.co/datasets/covost2"
60
+
61
+ _LOCAL = False
62
+ _LICENSE = "CC BY-NC 4.0"
63
+
64
+ COMMONVOICE_URL_TEMPLATE = "https://voice-prod-bundler-ee1969a6ce8178826482b88e843c335139bd3fb4.s3.amazonaws.com/cv-corpus-6.1-2020-12-11/{lang}.tar.gz"
65
+ LANG_CODE = {"eng": "en", "ind": "id"}
66
+ LANG_COMBINATION_CODE = [("ind", "eng"), ("eng", "ind")]
67
+ _URLS = {_DATASETNAME: {"ind": COMMONVOICE_URL_TEMPLATE.format(lang=LANG_CODE["ind"]), "eng": COMMONVOICE_URL_TEMPLATE.format(lang=LANG_CODE["eng"])}}
68
+
69
+ _SUPPORTED_TASKS = [Tasks.SPEECH_TO_TEXT_TRANSLATION, Tasks.MACHINE_TRANSLATION]
70
+ _SOURCE_VERSION = "1.0.0"
71
+ _NUSANTARA_VERSION = "1.0.0"
72
+
73
+
74
+ def nusantara_config_constructor(src_lang, tgt_lang, schema, version):
75
+ if src_lang == "" or tgt_lang == "":
76
+ raise ValueError(f"Invalid src_lang {src_lang} or tgt_lang {tgt_lang}")
77
+
78
+ if schema not in ["source", "nusantara_sptext", "nusantara_t2t"]:
79
+ raise ValueError(f"Invalid schema: {schema}")
80
+
81
+ return NusantaraConfig(
82
+ name="covost2_{src}_{tgt}_{schema}".format(src=src_lang, tgt=tgt_lang, schema=schema),
83
+ version=datasets.Version(version),
84
+ description="covost2 source schema for {schema} from {src} to {tgt}".format(schema=schema, src=src_lang, tgt=tgt_lang),
85
+ schema=schema,
86
+ subset_id="co_vo_st2_{src}_{tgt}".format(src=src_lang, tgt=tgt_lang),
87
+ )
88
+
89
+
90
+ class Covost2(datasets.GeneratorBasedBuilder):
91
+ """CoVoST2 dataset is a dataset mainly for speech to text translation task. The data was taken from Mozilla Common
92
+ Voices dataset. In the implementation of the source schema, the audio and transcriptions of the source language,
93
+ as well as the translated transcriptions are provided. In the implementation of the nusantara schema, only the audio of the source language and transcriptions of the
94
+ target language are provided. The source and target languages available are eng->ind and ind -> eng respectively.
95
+ In addition to the speech to text translation, this dataset (text only) can be used as a machine translation for
96
+ eng->ind and ind->eng.
97
+
98
+ Side note: the amount of data takes about 40GB for the English source data and 1GB for the Indonesian source data.
99
+ """
100
+
101
+ COVOST_URL_TEMPLATE = "https://dl.fbaipublicfiles.com/covost/covost_v2.{src_lang}_{tgt_lang}.tsv.tar.gz"
102
+
103
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
104
+ NUSANTARA_VERSION = datasets.Version(_NUSANTARA_VERSION)
105
+
106
+ BUILDER_CONFIGS = (
107
+ [nusantara_config_constructor(src, tgt, "source", _SOURCE_VERSION) for (src, tgt) in LANG_COMBINATION_CODE]
108
+ + [nusantara_config_constructor(src, tgt, "nusantara_sptext", _NUSANTARA_VERSION) for (src, tgt) in LANG_COMBINATION_CODE]
109
+ + [nusantara_config_constructor(src, tgt, "nusantara_t2t", _NUSANTARA_VERSION) for (src, tgt) in LANG_COMBINATION_CODE]
110
+ )
111
+
112
+ DEFAULT_CONFIG_NAME = f"{_DATASETNAME}_eng_ind_source"
113
+
114
+ def _info(self) -> datasets.DatasetInfo:
115
+ if self.config.schema == "source":
116
+ features = datasets.Features(
117
+ {"client_id": datasets.Value("string"), "file": datasets.Value("string"), "audio": datasets.Audio(sampling_rate=16_000), "sentence": datasets.Value("string"), "translation": datasets.Value("string"), "id": datasets.Value("string")}
118
+ )
119
+ elif self.config.schema == "nusantara_sptext":
120
+ features = schemas.speech_text_features
121
+ elif self.config.schema == "nusantara_t2t":
122
+ features = schemas.text2text_features
123
+
124
+ return datasets.DatasetInfo(
125
+ description=_DESCRIPTION,
126
+ features=features,
127
+ homepage=_HOMEPAGE,
128
+ license=_LICENSE,
129
+ citation=_CITATION,
130
+ task_templates=[datasets.AutomaticSpeechRecognition(audio_column="audio", transcription_column="sentences")] if (self.config.schema == "nusantara_sptext" or self.config.schema == "source") else None,
131
+ )
132
+
133
+ def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
134
+ """Returns SplitGenerators."""
135
+ name_split = self.config.name.split("_")
136
+ src_lang, tgt_lang = name_split[1], name_split[2]
137
+
138
+ urls = _URLS[_DATASETNAME]
139
+ data_dir = dl_manager.download_and_extract(urls[src_lang])
140
+
141
+ src_lang = LANG_CODE[src_lang]
142
+ tgt_lang = LANG_CODE[tgt_lang]
143
+
144
+ data_dir = data_dir + "/" + "/".join(["cv-corpus-6.1-2020-12-11", src_lang])
145
+
146
+ covost_tsv_path = self.COVOST_URL_TEMPLATE.format(src_lang=src_lang, tgt_lang=tgt_lang)
147
+ extracted_dir = dl_manager.download_and_extract(covost_tsv_path)
148
+
149
+ covost_tsv_filename = "covost_v2.{src_lang}_{tgt_lang}.tsv"
150
+ covost_tsv_dir = os.path.join(extracted_dir, covost_tsv_filename.format(src_lang=src_lang, tgt_lang=tgt_lang))
151
+ cv_tsv_dir = os.path.join(data_dir, "validated.tsv")
152
+
153
+ return [
154
+ datasets.SplitGenerator(
155
+ name=datasets.Split.TRAIN,
156
+ gen_kwargs={
157
+ "filepath": data_dir,
158
+ "covost_tsv_path": covost_tsv_dir,
159
+ "cv_tsv_path": cv_tsv_dir,
160
+ "split": "train",
161
+ },
162
+ ),
163
+ datasets.SplitGenerator(
164
+ name=datasets.Split.TEST,
165
+ gen_kwargs={
166
+ "filepath": data_dir,
167
+ "covost_tsv_path": covost_tsv_dir,
168
+ "cv_tsv_path": cv_tsv_dir,
169
+ "split": "test",
170
+ },
171
+ ),
172
+ datasets.SplitGenerator(
173
+ name=datasets.Split.VALIDATION,
174
+ gen_kwargs={
175
+ "filepath": data_dir,
176
+ "covost_tsv_path": covost_tsv_dir,
177
+ "cv_tsv_path": cv_tsv_dir,
178
+ "split": "dev",
179
+ },
180
+ ),
181
+ ]
182
+
183
+ def _generate_examples(self, filepath: Path, covost_tsv_path: Path, cv_tsv_path: Path, split: str) -> Tuple[int, Dict]:
184
+ """Yields examples as (key, example) tuples."""
185
+ name_split = self.config.name.split("_")
186
+ src_lang, tgt_lang = name_split[1], name_split[2]
187
+
188
+ covost_tsv = self._load_df_from_tsv(covost_tsv_path)
189
+ cv_tsv = self._load_df_from_tsv(cv_tsv_path)
190
+
191
+ df = pd.merge(
192
+ left=cv_tsv[["path", "sentence", "client_id"]],
193
+ right=covost_tsv[["path", "translation", "split"]],
194
+ how="inner",
195
+ on="path",
196
+ )
197
+ if split == "train":
198
+ df = df[(df["split"] == "train") | (df["split"] == "train_covost")]
199
+ else:
200
+ df = df[df["split"] == split]
201
+
202
+ for id, row in df.iterrows():
203
+ if self.config.schema == "source":
204
+ yield id, {
205
+ "id": row["path"].replace(".mp3", ""),
206
+ "client_id": row["client_id"],
207
+ "sentence": row["sentence"],
208
+ "translation": row["translation"],
209
+ "file": os.path.join(filepath, "clips", row["path"]),
210
+ "audio": os.path.join(filepath, "clips", row["path"]),
211
+ }
212
+ elif self.config.schema == "nusantara_sptext":
213
+ yield id, {
214
+ "id": row["path"].replace(".mp3", ""),
215
+ "speaker_id": row["client_id"],
216
+ "text": row["translation"],
217
+ "path": os.path.join(filepath, "clips", row["path"]),
218
+ "audio": os.path.join(filepath, "clips", row["path"]),
219
+ "metadata": {
220
+ "speaker_age": None,
221
+ "speaker_gender": None,
222
+ },
223
+ }
224
+ elif self.config.schema == "nusantara_t2t":
225
+ yield id, {"id": row["path"].replace(".mp3", ""), "text_1": row["sentence"], "text_2": row["translation"], "text_1_name": src_lang, "text_2_name": tgt_lang}
226
+ else:
227
+ raise NotImplementedError(f"Schema '{self.config.schema}' is not defined.")
228
+
229
+ @staticmethod
230
+ def _load_df_from_tsv(path):
231
+ return pd.read_csv(
232
+ path,
233
+ sep="\t",
234
+ header=0,
235
+ encoding="utf-8",
236
+ escapechar="\\",
237
+ quoting=csv.QUOTE_NONE,
238
+ na_filter=False,
239
+ )