mj-new commited on
Commit
349c94d
1 Parent(s): 76bd302

Adding build script

Browse files
Files changed (3) hide show
  1. .python-version +1 -0
  2. pl-asr-bigos-v2.py +243 -0
  3. test.py +54 -0
.python-version ADDED
@@ -0,0 +1 @@
 
 
1
+ bigos-hf
pl-asr-bigos-v2.py ADDED
@@ -0,0 +1,243 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ # TODO: Address all TODOs and remove all explanatory comments
15
+ """Build script for Polish ASR-BIGOS dataset"""
16
+
17
+
18
+ import csv
19
+ import json
20
+ import os
21
+
22
+ import datasets
23
+
24
+ print("Running script")
25
+
26
+ # TODO: Add BibTeX citation
27
+ # Find for instance the citation on arxiv or on the dataset repo/website
28
+ _CITATION = """\
29
+ @inproceedings{FedCSIS20231609,
30
+ author={Michał Junczyk},
31
+ pages={585–590},
32
+ title={BIGOS - Benchmark Intended Grouping of Open Speech Corpora for Polish Automatic Speech Recognition},
33
+ booktitle={Proceedings of the 18th Conference on Computer Science and Intelligence Systems},
34
+ year={2023},
35
+ editor={Maria Ganzha and Leszek Maciaszek and Marcin Paprzycki and Dominik Ślęzak},
36
+ publisher={IEEE},
37
+ doi={10.15439/2023F1609},
38
+ url={http://dx.doi.org/10.15439/2023F1609},
39
+ volume={35},
40
+ series={Annals of Computer Science and Information Systems}
41
+ }
42
+ """
43
+
44
+ _DESCRIPTION = """\
45
+ BIGOS (Benchmark Intended Grouping of Open Speech) dataset goal is to simplify access to the openly available Polish speech corpora and
46
+ enable systematic benchmarking of open and commercial Polish ASR systems.
47
+ """
48
+
49
+ _HOMEPAGE = "https://huggingface.co/datasets/michaljunczyk/pl-asr-bigos"
50
+
51
+ _LICENSE = "CC-BY-SA-4.0"
52
+
53
+ _BIGOS_SUBSETS = ["pjatk-clarin_mobile-15", "pjatk-clarin_studio-15", "fair-mls-20", "mailabs-corpus_librivox-19", "mozilla-common_voice_15-23", "pwr-azon_read-20", "pwr-azon_spont-20", "pwr-maleset-unk", "pwr-shortwords-unk", "pwr-viu-unk", "google-fleurs-22", "polyai-minds14-21"]
54
+ _ALL_CONFIGS = []
55
+
56
+ for subset in _BIGOS_SUBSETS:
57
+ _ALL_CONFIGS.append(subset)
58
+
59
+ _ALL_CONFIGS.append("all")
60
+
61
+ _BASE_PATH = "data/{subset}/"
62
+ _DATA_URL = _BASE_PATH + "{split}.tar.gz"
63
+ _META_URL = _BASE_PATH + "{split}.tsv"
64
+
65
+ class BigosConfig(datasets.BuilderConfig):
66
+
67
+ def __init__(
68
+ self, name, description, citation, homepage
69
+ ):
70
+ super(BigosConfig, self).__init__(
71
+ name=self.name,
72
+ version=datasets.Version("2.0.0", ""),
73
+ description=self.description,
74
+ )
75
+ self.name = name
76
+ self.description = description
77
+ self.citation = citation
78
+ self.homepage = homepage
79
+
80
+
81
+ def _build_config(name):
82
+ return BigosConfig(
83
+ name=name,
84
+ description=_DESCRIPTION,
85
+ citation=_CITATION,
86
+ homepage=_HOMEPAGE,
87
+ )
88
+
89
+ class Bigos(datasets.GeneratorBasedBuilder):
90
+
91
+ DEFAULT_WRITER_BATCH_SIZE = 2
92
+ #in case the issue persits, investigatae the following:
93
+ #https://github.com/huggingface/datasets/issues/4057
94
+ BUILDER_CONFIGS = [_build_config(name) for name in _ALL_CONFIGS]
95
+
96
+ def _info(self):
97
+ task_templates = None
98
+ features = datasets.Features(
99
+ {
100
+ "audioname": datasets.Value("string"),
101
+ "split": datasets.Value("string"),
102
+ "dataset": datasets.Value("string"),
103
+ "speaker_id": datasets.Value("string"),
104
+ "ref_orig": datasets.Value("string"),
105
+ "audio": datasets.Audio(sampling_rate=16_000),
106
+ "samplingrate_orig": datasets.Value("int32"),
107
+ "sampling_rate": datasets.Value("int32"),
108
+ "audiopath_bigos": datasets.Value("string"),
109
+ #"ref_spoken": datasets.Value("string"),
110
+ #"ref_written": datasets.Value("string"),
111
+ #"hyp_whisper_cloud": datasets.Value("string"),
112
+ #"hyp_google_default": datasets.Value("string"),
113
+ #"hyp_azure_default": datasets.Value("string"),
114
+ #"hyp_whisper_tiny": datasets.Value("string"),
115
+ #"hyp_whisper_base": datasets.Value("string"),
116
+ #"hyp_whisper_small": datasets.Value("string"),
117
+ #"hyp_whisper_medium": datasets.Value("string"),
118
+ #"hyp_whisper_large": datasets.Value("string")
119
+ #"gender": datasets.ClassLabel(names=["male", "female", "other"]),
120
+ #"speaker_id": datasets.Value("int32"),
121
+ #"raw_transcription": datasets.Value("string"),
122
+ }
123
+ )
124
+
125
+ return datasets.DatasetInfo(
126
+ description=self.config.description + "\n" + _DESCRIPTION,
127
+ features=features,
128
+ supervised_keys=("audio", "ref_orig"),
129
+ homepage=self.config.homepage,
130
+ citation=self.config.citation + "\n" + _CITATION,
131
+ task_templates=task_templates,
132
+ )
133
+
134
+ def _split_generators(self, dl_manager):
135
+ splits = ["test", "train", "validation"]
136
+
137
+ if self.config.name == "all":
138
+ data_urls = {split: [_DATA_URL.format(subset=subset,split=split) for subset in _BIGOS_SUBSETS] for split in splits}
139
+ meta_urls = {split: [_META_URL.format(subset=subset,split=split) for subset in _BIGOS_SUBSETS] for split in splits}
140
+ else:
141
+ data_urls = {split: [_DATA_URL.format(subset=self.config.name, split=split)] for split in splits}
142
+ meta_urls = {split: [_META_URL.format(subset=self.config.name, split=split)] for split in splits}
143
+
144
+ archive_paths = dl_manager.download(data_urls)
145
+ local_extracted_archives = dl_manager.extract(archive_paths) if not dl_manager.is_streaming else {}
146
+ archive_iters = {split: [dl_manager.iter_archive(path) for path in paths] for split, paths in archive_paths.items()}
147
+
148
+ meta_paths = dl_manager.download(meta_urls)
149
+
150
+ return [
151
+ datasets.SplitGenerator(
152
+ name=datasets.Split.TEST,
153
+ gen_kwargs={
154
+ "local_extracted_archives": local_extracted_archives.get("test", [None] * len(meta_paths.get("test"))),
155
+ "archive_iters": archive_iters.get("test"),
156
+ "text_paths": meta_paths.get("test")
157
+ },
158
+ ),
159
+
160
+ datasets.SplitGenerator(
161
+ name=datasets.Split.TRAIN,
162
+ gen_kwargs={
163
+ "local_extracted_archives": local_extracted_archives.get("train", [None] * len(meta_paths.get("train"))),
164
+ "archive_iters": archive_iters.get("train"),
165
+ "text_paths": meta_paths.get("train")
166
+ },
167
+ ),
168
+ datasets.SplitGenerator(
169
+ name=datasets.Split.VALIDATION,
170
+ gen_kwargs={
171
+ "local_extracted_archives": local_extracted_archives.get("validation", [None] * len(meta_paths.get("validation"))),
172
+ "archive_iters": archive_iters.get("validation"),
173
+ "text_paths": meta_paths.get("validation")
174
+ },
175
+ ),
176
+ ]
177
+
178
+ def _get_data(self, lines, subset_id):
179
+ data = {}
180
+ for line in lines:
181
+ # parse TSV
182
+ if isinstance(line, bytes):
183
+ line = line.decode("utf-8")
184
+ (
185
+ _id,
186
+ split,
187
+ dataset,
188
+ speaker_id,
189
+ sampling_rate_orig,
190
+ sampling_rate,
191
+ ref_orig,
192
+ audio_path_bigos,
193
+ ) = line.strip().split("\t")
194
+
195
+
196
+ data[audio_path_bigos] = {
197
+ "audioname": _id,
198
+ "split": split,
199
+ "dataset": dataset,
200
+ "speaker_id": speaker_id,
201
+ "samplingrate_orig": sampling_rate_orig,
202
+ "sampling_rate": sampling_rate,
203
+ "ref_orig": ref_orig,
204
+ "audiopath_bigos": audio_path_bigos,
205
+ #"age": int(age),
206
+ }
207
+
208
+ return data
209
+
210
+ def _generate_examples(self, local_extracted_archives, archive_iters, text_paths):
211
+ assert len(local_extracted_archives) == len(archive_iters) == len(text_paths)
212
+ key = 0
213
+
214
+ print("Generating examples")
215
+ if self.config.name == "all":
216
+ subsets = _BIGOS_SUBSETS
217
+ else:
218
+ subsets = [self.config.name]
219
+
220
+ for archive, text_path, local_extracted_path, subset_id in zip(archive_iters, text_paths, local_extracted_archives, subsets):
221
+ with open(text_path, encoding="utf-8") as f:
222
+ lines = f.readlines()
223
+ data = self._get_data(lines, subset_id)
224
+
225
+ for audio_path, audio_file in archive:
226
+ #print("audio_path: ", audio_path)
227
+
228
+ audio_filename = audio_path.split("/")[-1]
229
+ #if audio_filename not in data.keys():
230
+ # continue
231
+
232
+ #print("audio_filename: ", audio_filename)
233
+ result = data[audio_filename]
234
+ #print("result: ", result)
235
+ extracted_audio_path = (
236
+ os.path.join(local_extracted_path, audio_filename)
237
+ if local_extracted_path is not None
238
+ else None
239
+ )
240
+ #result["path"] = extracted_audio_path
241
+ result["audio"] = {"path": audio_path, "bytes": audio_file.read()}
242
+ yield key, result
243
+ key += 1
test.py ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import datasets
2
+ from datasets import load_dataset
3
+
4
+ # test reading all subsets for "test" split
5
+ _BIGOS_SUBSETS = ["pjatk-clarin_mobile-15", "pjatk-clarin_studio-15", "fair-mls-20", "mailabs-corpus_librivox-23", "mozilla-common_voice_15-19", "pwr-azon_read-20", "pwr-azon_spont-20", "pwr-maleset-unk", "pwr-shortwords-unk", "pwr-viu-unk", "google-fleurs-22", "polyai-minds14-21"]
6
+
7
+ splits=["test", "validation", "train"]
8
+ # Refer to documentation for the descriptions of splits and subsets
9
+
10
+ print("Testing dataset all subsets")
11
+
12
+ dataset_name = "pl-asr-bigos-v2"
13
+ hf_account = "amu-cai"
14
+
15
+ hf_db_name="/".join([hf_account,dataset_name])
16
+ print(hf_db_name, hf_db_name)
17
+
18
+ print("Testing dataset: ${dataset_name} from account ${hf_account} for all subsets")
19
+
20
+ print("Checking build script locally - test split")
21
+ dataset_local = load_dataset(f"{dataset_name}.py", "all", split="test")
22
+
23
+ print("Checking build script locally - test split")
24
+ dataset_local = load_dataset(f"{dataset_name}.py", "all", split="validation")
25
+
26
+ print("Checking build script on huggingface.co")
27
+ dataset_hf = load_dataset(hf_db_name, "all")
28
+
29
+ """
30
+ for split in splits:
31
+ print("Checking split: ", split)
32
+ print(dataset[split][0])
33
+ #TODO - rename to include date of test set creation in order to check if adding new split removes the previous one
34
+ if split == "test":
35
+ assert len(dataset["test"]) == 1900
36
+ _BIGOS_SUBSETS = ["clarin-pjatk-mobile-15", "clarin-pjatk-studio-15", "fair-mls-20", "mailabs-19", "mozilla-common-voice-19", "pwr-azon-read-20", "pwr-azon-spont-20", "pwr-maleset-unk", "pwr-shortwords-unk", "pwr-viu-unk"]
37
+
38
+ print("Testing specific subsets")
39
+ for subset in _BIGOS_SUBSETS:
40
+ dataset = load_dataset('michaljunczyk/pl-asr-bigos', subset)
41
+ print("subset: ", subset)
42
+
43
+ for split in splits:
44
+ print("Checking split: ", split)
45
+ print(dataset[split][0])
46
+ if split == "test":
47
+ if subset == "pwr-azon-spont-20":
48
+ assert len(dataset["test"]) == 100
49
+ else:
50
+ assert len(dataset["test"]) == 200
51
+ print(dataset)
52
+
53
+ # TODO - add more tests for other splits
54
+ """