holylovenia commited on
Commit
37c56be
1 Parent(s): 70d50b2

Upload voxlingua.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. voxlingua.py +204 -0
voxlingua.py ADDED
@@ -0,0 +1,204 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ This new update refers to the this HF dataloader script
3
+ https://huggingface.co/datasets/csebuetnlp/xlsum/blob/main/xlsum.py
4
+ while conforming to SEACrowd schema
5
+ """
6
+
7
+ import os
8
+ from pathlib import Path
9
+ from typing import Dict, List, Tuple
10
+
11
+ import datasets
12
+
13
+ from seacrowd.utils import schemas
14
+ from seacrowd.utils.configs import SEACrowdConfig
15
+ from seacrowd.utils.constants import TASK_TO_SCHEMA, Licenses, Tasks
16
+
17
+ _CITATION = """\
18
+ @inproceedings{valk2021slt,
19
+ title={{VoxLingua107}: a Dataset for Spoken Language Recognition},
20
+ author={J{\"o}rgen Valk and Tanel Alum{\"a}e},
21
+ booktitle={Proc. IEEE SLT Workshop},
22
+ year={2021},
23
+ }
24
+ """
25
+
26
+ _LOCAL = False
27
+ _LANGUAGES = ["ceb", "ind", "jav", "khm", "lao", "zlm", "mya", "sun", "tha", "tgl", "vie", "war"] # We follow ISO639-3 language code (https://iso639-3.sil.org/code_tables/639/data)
28
+
29
+ _LANG_TO_DATASOURCE_LANG = {
30
+ "ceb": "ceb",
31
+ "ind": "id",
32
+ "jav": "jw",
33
+ "khm": "km",
34
+ "lao": "lo",
35
+ "zlm": "ms",
36
+ "mya": "my",
37
+ "sun": "su",
38
+ "tha": "th",
39
+ "tgl": "tl",
40
+ "vie": "vi",
41
+ "war": "war"}
42
+
43
+ _DATASETNAME = "voxlingua"
44
+
45
+ _DESCRIPTION = """\
46
+ VoxLingua107 is a comprehensive speech dataset designed for training spoken language identification models.
47
+ It comprises short speech segments sourced from YouTube videos, labeled based on the language indicated in the video
48
+ title and description. The dataset covers 107 languages and contains a total of 6628 hours of speech data,
49
+ averaging 62 hours per language. However, the actual amount of data per language varies significantly.
50
+ Additionally, there is a separate development set consisting of 1609 speech segments from 33 languages,
51
+ validated by at least two volunteers to ensure the accuracy of language representation.
52
+ """
53
+
54
+ _HOMEPAGE = "https://bark.phon.ioc.ee/voxlingua107/"
55
+
56
+ _LICENSE = Licenses.CC_BY_4_0.value
57
+
58
+ _URLS = "https://bark.phon.ioc.ee/voxlingua107/{identifier}.zip"
59
+
60
+ _SUPPORTED_TASKS = [Tasks.SPEECH_LANGUAGE_IDENTIFICATION]
61
+
62
+ _SOURCE_VERSION = "1.0.0"
63
+
64
+ _SEACROWD_VERSION = "2024.06.20"
65
+
66
+
67
+ def construct_configs() -> List[SEACrowdConfig]:
68
+ """
69
+ The function `construct_configs` constructs a list of SEACrowdConfig objects, and returns the list.
70
+
71
+ output:
72
+ a list of `SEACrowdConfig` objects.
73
+ """
74
+
75
+ # set output var
76
+ config_list = []
77
+
78
+ # construct zipped arg for config instantiation
79
+ CONFIG_SUFFIXES_FOR_TASK = [TASK_TO_SCHEMA.get(task).lower() for task in _SUPPORTED_TASKS]
80
+ TASKS_AND_CONFIG_SUFFIX_PAIRS = list(zip(_SUPPORTED_TASKS, CONFIG_SUFFIXES_FOR_TASK))
81
+
82
+ # implement source schema
83
+ version, config_name_prefix = _SOURCE_VERSION, "source"
84
+ config_list += [
85
+ SEACrowdConfig(
86
+ name=f"{_DATASETNAME}_{config_name_prefix}",
87
+ version=datasets.Version(version),
88
+ description=f"{_DATASETNAME} {config_name_prefix} schema",
89
+ schema=f"{config_name_prefix}",
90
+ subset_id=f"{config_name_prefix}",
91
+ )
92
+ ]
93
+
94
+ # implement SEACrowd schema
95
+ version, config_name_prefix = _SEACROWD_VERSION, "seacrowd"
96
+ for task_obj, config_name_suffix in TASKS_AND_CONFIG_SUFFIX_PAIRS:
97
+ config_list += [
98
+ SEACrowdConfig(
99
+ name=f"{_DATASETNAME}_{config_name_prefix}_{config_name_suffix}",
100
+ version=datasets.Version(version),
101
+ description=f"{_DATASETNAME} {config_name_prefix} schema for {task_obj.name}",
102
+ schema=f"{config_name_prefix}_{config_name_suffix}",
103
+ subset_id=f"{config_name_prefix}_{config_name_suffix}",
104
+ )
105
+ ]
106
+ return config_list
107
+
108
+
109
+ class VoxLinguaDataset(datasets.GeneratorBasedBuilder):
110
+ """Speech Lang ID on dataset VoxLingua."""
111
+
112
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
113
+ SEACROWD_VERSION = datasets.Version(_SEACROWD_VERSION)
114
+
115
+ BUILDER_CONFIGS = construct_configs()
116
+
117
+ def _info(self) -> datasets.DatasetInfo:
118
+ if self.config.schema == "source":
119
+ # since the source only contains audio folder structure,
120
+ # we will define it using simplified ver of SEACrowd speech_features schema
121
+ features = datasets.Features({
122
+ "id": datasets.Value("string"),
123
+ "path": datasets.Value("string"),
124
+ "audio": datasets.Audio(sampling_rate=16_000),
125
+ "labels": datasets.ClassLabel(names=_LANGUAGES)})
126
+
127
+ elif self.config.schema == "seacrowd_speech":
128
+ features = schemas.speech_features(label_names=_LANGUAGES)
129
+
130
+ else:
131
+ raise ValueError(f"Unexpected self.config.schema of {self.config.schema}!")
132
+
133
+ return datasets.DatasetInfo(
134
+ description=_DESCRIPTION,
135
+ features=features,
136
+ homepage=_HOMEPAGE,
137
+ license=_LICENSE,
138
+ citation=_CITATION,
139
+ )
140
+
141
+ def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
142
+ # since this is a Speech LID, all languages must be downloaded in a single lists
143
+ # for train data, the identifier is a lang_code defined in `_LANG_TO_DATASOURCE_LANG`
144
+ train_url_list = [_URLS.format(identifier=_LANG_TO_DATASOURCE_LANG[lang_val]) for lang_val in _LANGUAGES]
145
+ train_data_dir = dl_manager.download_and_extract(train_url_list)
146
+
147
+ # for val data, the `dev.zip` doesn't contain any data indicated in _LANGUAGES
148
+
149
+ return [
150
+ datasets.SplitGenerator(
151
+ name=datasets.Split.TRAIN,
152
+ gen_kwargs={
153
+ "filepath": train_data_dir,
154
+ },
155
+ )
156
+ ]
157
+
158
+ def _generate_examples(self, filepath: List[Path]) -> Tuple[int, Dict]:
159
+
160
+ # this is defined as -1 so that in the first loop it will have value of 0
161
+ example_idx = -1
162
+
163
+ for idx, child_path in enumerate(filepath):
164
+ # check for 2 things:
165
+
166
+ # 1. for every filepath list element (which contain 1 lang data), it will contain only 1 subdir and named its lang code in source
167
+ first_level_rel_dir = os.listdir(child_path)
168
+ expected_lang_label = _LANG_TO_DATASOURCE_LANG[_LANGUAGES[idx]]
169
+ assert first_level_rel_dir == [expected_lang_label], f"The structure of path is unexpected! Expected {[expected_lang_label]} got: {first_level_rel_dir}"
170
+
171
+ # 2. within the first_level_dir, all of them are file (no directory)
172
+ first_level_dir = os.path.join(child_path, first_level_rel_dir[0])
173
+ second_level_dir = os.listdir(first_level_dir)
174
+ assert not all(os.path.isdir(expected_file) for expected_file in second_level_dir), f"Found directory within folder {first_level_dir}!"
175
+
176
+ # extract sound data with format ".wav"
177
+ wav_files = [os.path.join(first_level_dir, file) for file in second_level_dir if file.endswith(".wav")]
178
+
179
+ if self.config.schema == "source":
180
+ for _fp in wav_files:
181
+ example_idx += 1
182
+ ex = {"id": example_idx, "path": _fp, "audio": _fp, "labels": _LANGUAGES.index(expected_lang_label)}
183
+ yield example_idx, ex
184
+
185
+ elif self.config.schema == "seacrowd_speech":
186
+ for _fp in wav_files:
187
+ example_idx += 1
188
+ # audio = {"path": file, "bytes": file.read()}
189
+ ex = {
190
+ "id": example_idx,
191
+ "path": _fp,
192
+ "audio": _fp,
193
+ "speaker_id": "",
194
+ "labels": _LANGUAGES.index(expected_lang_label),
195
+ "metadata": {
196
+ # unavailable, filled with default val
197
+ "speaker_age": -1,
198
+ "speaker_gender": "",
199
+ },
200
+ }
201
+ yield example_idx, ex
202
+
203
+ else:
204
+ raise ValueError(f"Invalid config schema of {self.config.schema}!")