Khalsuu commited on
Commit
6128e88
1 Parent(s): 002377d

Upload filipino_dataset_script.py

Browse files
Files changed (1) hide show
  1. filipino_dataset_script.py +234 -0
filipino_dataset_script.py ADDED
@@ -0,0 +1,234 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ # coding=utf-8
3
+ # Copyright 2021 The HuggingFace Datasets Authors and the current dataset script contributor.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ """ Magic Hub Dataset"""
17
+
18
+
19
+ import os
20
+
21
+ import datasets
22
+ from datasets.tasks import AutomaticSpeechRecognition
23
+
24
+
25
+ _DATA_URL = "./fil_tsv.tar.gz" #https://huggingface.co/datasets/Khalsuu/filipino_dataset_script/resolve/main/fil.zip
26
+
27
+ # _CITATION = """\
28
+ # @inproceedings{commonvoice:2020,
29
+ # author = {Ardila, R. and Branson, M. and Davis, K. and Henretty, M. and Kohler, M. and Meyer, J. and Morais, R. and Saunders, L. and Tyers, F. M. and Weber, G.},
30
+ # title = {Common Voice: A Massively-Multilingual Speech Corpus},
31
+ # booktitle = {Proceedings of the 12th Conference on Language Resources and Evaluation (LREC 2020)},
32
+ # pages = {4211--4215},
33
+ # year = 2020
34
+ # }
35
+ # """
36
+
37
+ _DESCRIPTION = """\
38
+ Magic Hub's initiative to help teach machines how real people speak. They wanted to provide structured data that
39
+ will help enthusiasts and researchers to spend more time on training models rather than cleaning and structuring data.
40
+ """
41
+
42
+ _HOMEPAGE = "https://magichub.com/datasets/filipino-scripted-speech-corpus-daily-use-sentence/"
43
+
44
+ _LICENSE = "https://magichub.com/magic-data-open-source-license/"
45
+
46
+ _LANGUAGES = {
47
+ "fil": {
48
+ "Language": "Filipino",
49
+ "Date": "2022-4-20",
50
+ "Size": "414 MB",
51
+ "Version": "ab_1h_2020-12-11",
52
+ "Validated_Hr_Total": 4.58,
53
+ "Overall_Hr_Total": 4.58,
54
+ "Number_Of_Voice": 10,
55
+ },
56
+ }
57
+
58
+
59
+ class FilipinoVoiceConfig(datasets.BuilderConfig):
60
+ """BuilderConfig for Filipino Speech."""
61
+
62
+ def __init__(self, name, sub_version, **kwargs):
63
+ """
64
+ Args:
65
+ data_dir: `string`, the path to the folder containing the files in the
66
+ downloaded .tar
67
+ citation: `string`, citation for the data set
68
+ url: `string`, url for information about the data set
69
+ **kwargs: keyword arguments forwarded to super.
70
+ """
71
+ self.sub_version = sub_version
72
+ self.language = kwargs.pop("language", None)
73
+ self.date_of_snapshot = kwargs.pop("date", None)
74
+ self.size = kwargs.pop("size", None)
75
+ self.validated_hr_total = kwargs.pop("val_hrs", None)
76
+ self.total_hr_total = kwargs.pop("total_hrs", None)
77
+ self.num_of_voice = kwargs.pop("num_of_voice", None)
78
+ description = f"Magic Hub speech to text dataset in {self.language} version {self.sub_version} of {self.date_of_snapshot}. The dataset comprises {self.validated_hr_total} of validated transcribed speech data from {self.num_of_voice} speakers. The dataset has a size of {self.size}"
79
+ super(FilipinoVoiceConfig, self).__init__(
80
+ name=name, version=datasets.Version("1.0.0", ""), description=description, **kwargs
81
+ )
82
+
83
+
84
+ class FilipinoVoice(datasets.GeneratorBasedBuilder):
85
+
86
+ DEFAULT_WRITER_BATCH_SIZE = 1000
87
+ BUILDER_CONFIGS = [
88
+ FilipinoVoiceConfig(
89
+ name=lang_id,
90
+ language=_LANGUAGES[lang_id]["Language"],
91
+ sub_version=_LANGUAGES[lang_id]["Version"],
92
+ date=_LANGUAGES[lang_id]["Date"],
93
+ size=_LANGUAGES[lang_id]["Size"],
94
+ val_hrs=_LANGUAGES[lang_id]["Validated_Hr_Total"],
95
+ total_hrs=_LANGUAGES[lang_id]["Overall_Hr_Total"],
96
+ num_of_voice=_LANGUAGES[lang_id]["Number_Of_Voice"],
97
+ )
98
+ for lang_id in _LANGUAGES.keys()
99
+ ]
100
+
101
+ def _info(self):
102
+ features = datasets.Features(
103
+ {
104
+ "CHANNEL": datasets.Value("string"),
105
+ "UTTRANS_ID": datasets.Value("string"),
106
+ "SPEAKER_ID": datasets.Value("string"),
107
+ "PROMPT": datasets.Value("string"),
108
+ "TRANSCRIPTION": datasets.Value("string"),
109
+ "audio": datasets.Audio(sampling_rate=16_000),
110
+ }
111
+ )
112
+
113
+ return datasets.DatasetInfo(
114
+ description=_DESCRIPTION,
115
+ features=features,
116
+ supervised_keys=None,
117
+ homepage=_HOMEPAGE,
118
+ license=_LICENSE,
119
+ # citation=_CITATION,
120
+ task_templates=[AutomaticSpeechRecognition(task="automatic-speech-recognition", transcription_column="sentence")],
121
+ )
122
+
123
+ def _split_generators(self, dl_manager):
124
+ """Returns SplitGenerators."""
125
+ # Download the TAR archive that contains the audio files:
126
+ archive_path = dl_manager.download(_DATA_URL.format(self.config.name))
127
+
128
+ # First we locate the data using the path within the archive:
129
+ # path_to_data = "/".join(["cv-corpus-6.1-2020-12-11", self.config.name])
130
+ path_to_data = "fil"
131
+ path_to_clips = "/".join([path_to_data, "clips"])
132
+ metadata_filepaths = {
133
+ split: "/".join([path_to_data, f"{split}.txt"])
134
+ for split in ["train", "test"]
135
+ }
136
+
137
+ # print("Archive path:" + archive_path)
138
+ # print("Path to clips:" + path_to_clips)
139
+ # print(metadata_filepaths)
140
+ # (Optional) In non-streaming mode, we can extract the archive locally to have actual local audio files:
141
+ local_extracted_archive = dl_manager.extract(archive_path)
142
+
143
+ # print("Local extracted archive:")
144
+ # print(local_extracted_archive)
145
+
146
+ # To access the audio data from the TAR archives using the download manager,
147
+ # we have to use the dl_manager.iter_archive method.
148
+ #
149
+ # This is because dl_manager.download_and_extract
150
+ # doesn't work to stream TAR archives in streaming mode.
151
+ # (we have to stream the files of a TAR archive one by one)
152
+ #
153
+ # The iter_archive method returns an iterable of (path_within_archive, file_obj) for every
154
+ # file in the TAR archive.
155
+
156
+ return [
157
+ datasets.SplitGenerator(
158
+ name=datasets.Split.TRAIN,
159
+ gen_kwargs={
160
+ "local_extracted_archive": local_extracted_archive,
161
+ "archive_iterator": dl_manager.iter_archive(
162
+ archive_path
163
+ ), # use iter_archive here to access the files in the TAR archives
164
+ "metadata_filepath": metadata_filepaths["train"],
165
+ "path_to_clips": path_to_clips,
166
+ },
167
+ ),
168
+ datasets.SplitGenerator(
169
+ name=datasets.Split.TEST,
170
+ gen_kwargs={
171
+ "local_extracted_archive": local_extracted_archive,
172
+ "archive_iterator": dl_manager.iter_archive(
173
+ archive_path
174
+ ), # use iter_archive here to access the files in the TAR archives
175
+ "metadata_filepath": metadata_filepaths["test"],
176
+ "path_to_clips": path_to_clips,
177
+ },
178
+ ),
179
+ ]
180
+
181
+ def _generate_examples(self, local_extracted_archive, archive_iterator, metadata_filepath, path_to_clips):
182
+ """Yields examples."""
183
+ data_fields = list(self._info().features.keys())
184
+
185
+ # print(data_fields)
186
+ # audio is not a header of the csv files
187
+ data_fields.remove("audio")
188
+ path_idx = data_fields.index("UTTRANS_ID")
189
+ # print(path_idx)
190
+
191
+ # print("Requested metadata: " + metadata_filepath)
192
+ # print(archive_iterator)
193
+ all_field_values = {}
194
+ metadata_found = False
195
+ # Here we iterate over all the files within the TAR archive:
196
+ for path, f in archive_iterator:
197
+ print(path, ", ", f)
198
+ # Parse the metadata CSV file
199
+ if path == metadata_filepath:
200
+ metadata_found = True
201
+ lines = f.readlines()
202
+ headline = lines[0].decode("utf-8")
203
+ column_names = headline.strip().split("\t")
204
+ assert (
205
+ column_names == data_fields
206
+ ), f"The file should have {data_fields} as column names, but has {column_names}"
207
+ for line in lines[1:]:
208
+ field_values = line.decode("utf-8").strip().split("\t")
209
+ # set full path for mp3 audio file
210
+ audio_path = "/".join([path_to_clips, field_values[path_idx]])
211
+ all_field_values[audio_path] = field_values
212
+ # Else, read the audio file and yield an example
213
+ elif path.startswith(path_to_clips):
214
+ assert metadata_found, "Found audio clips before the metadata TSV file."
215
+ if not all_field_values:
216
+ break
217
+ if path in all_field_values:
218
+ # retrieve the metadata corresponding to this audio file
219
+ field_values = all_field_values[path]
220
+
221
+ # print("Found field values")
222
+ # print(field_values)
223
+ # if data is incomplete, fill with empty values
224
+ if len(field_values) < len(data_fields):
225
+ field_values += (len(data_fields) - len(field_values)) * ["''"]
226
+ result = {key: value for key, value in zip(data_fields, field_values)}
227
+
228
+ # set audio feature
229
+ result["audio"] = {"path": path, "bytes": f.read()}
230
+
231
+ # set path to None if the audio file doesn't exist locally (i.e. in streaming mode)
232
+ result["UTTRANS_ID"] = os.path.join(local_extracted_archive, path) if local_extracted_archive else None
233
+
234
+ yield path, result