gttsehu commited on
Commit
1620c26
1 Parent(s): c4124a6

Initial release

Browse files
.gitattributes CHANGED
@@ -56,3 +56,5 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
56
  # Video files - compressed
57
  *.mp4 filter=lfs diff=lfs merge=lfs -text
58
  *.webm filter=lfs diff=lfs merge=lfs -text
 
 
 
56
  # Video files - compressed
57
  *.mp4 filter=lfs diff=lfs merge=lfs -text
58
  *.webm filter=lfs diff=lfs merge=lfs -text
59
+ metadata/eval.tsv filter=lfs diff=lfs merge=lfs -text
60
+ audio/eval_0.tar filter=lfs diff=lfs merge=lfs -text
Albayzin-2024-BBS-S2T-eval.py ADDED
@@ -0,0 +1,173 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023 GTTS (http://gtts.ehu.eus)
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ # Lint as: python3
16
+ """Albayzin 2024 Bilingual Basque-Spanish Speech to Text Challenge (BBS-S2TC), dataset"""
17
+
18
+
19
+ import datasets
20
+ from datasets.utils.py_utils import size_str
21
+ import os
22
+ import csv
23
+ from tqdm import tqdm
24
+
25
+ from .languages import LANGUAGES
26
+ from .release_stats import STATS
27
+
28
+
29
+ _CITATION = """\
30
+ """
31
+
32
+ _HOMEPAGE = "https://huggingface.co/datasets/gttsehu/Albayzin-2024-BBS-S2T-eval"
33
+
34
+ _LICENSE = "https://creativecommons.org/publicdomain/zero/1.0/"
35
+
36
+ _DESCRIPTION = (
37
+ f"blah blah blah..."
38
+ f"blah blah blah..."
39
+ f"blah blah blah..."
40
+ )
41
+
42
+ _BASE_URL = "https://huggingface.co/datasets/gttsehu/Albayzin-2024-BBS-S2T-eval"
43
+
44
+ _AUDIO_URL = _BASE_URL + "audio/{split}_{shard_idx}.tar"
45
+ _METADATA_URL = _BASE_URL + "metadata/{split}.tsv"
46
+
47
+ class Albayzin2024BBSS2TEvalConfig(datasets.BuilderConfig):
48
+ """BuilderConfig for Albayzin2024BBSS2TEval."""
49
+
50
+ def __init__(self, name, version, **kwargs):
51
+ self.language = kwargs.pop("language", None)
52
+ self.release_date = kwargs.pop("release_date", None)
53
+ self.num_clips = kwargs.pop("num_clips", None)
54
+ self.num_speakers = kwargs.pop("num_speakers", None)
55
+ self.validated_hr = kwargs.pop("validated_hr", None)
56
+ self.total_hr = kwargs.pop("total_hr", None)
57
+ self.size_bytes = kwargs.pop("size_bytes", None)
58
+ self.size_human = size_str(self.size_bytes)
59
+ description = _DESCRIPTION
60
+
61
+ super(Albayzin2024BBSS2TEvalConfig, self).__init__(
62
+ name = name,
63
+ version = datasets.Version(version),
64
+ description = _DESCRIPTION,
65
+ **kwargs,
66
+ )
67
+
68
+
69
+ class Albayzin2024BBSS2TEval(datasets.GeneratorBasedBuilder):
70
+ """Evaluation corpus for Albayzin 2024 Bilingual Basque-Spanish Speech to Text Challenge (BBS-S2TC)."""
71
+
72
+ DEFAULT_CONFIG_NAME = "all"
73
+
74
+ BUILDER_CONFIGS = [
75
+ Albayzin2024BBSS2TEvalConfig(
76
+ name=lang,
77
+ version=STATS["version"],
78
+ language=LANGUAGES[lang],
79
+ release_date=STATS["date"],
80
+ num_clips=lang_stats["clips"],
81
+ num_speakers=lang_stats["users"],
82
+ total_hr=float(lang_stats["totalHrs"]) if lang_stats["totalHrs"] else None,
83
+ size_bytes=int(lang_stats["size"]) if lang_stats["size"] else None,
84
+ )
85
+ for lang, lang_stats in STATS["locales"].items()
86
+ ]
87
+
88
+ def _info(self):
89
+ description = (
90
+ f"blah blah blah..."
91
+ f"blah blah blah..."
92
+ f"blah blah blah..."
93
+ )
94
+ features = datasets.Features(
95
+ {
96
+ "path": datasets.Value("string"),
97
+ "audio": datasets.features.Audio(sampling_rate=16_000),
98
+ "sentence": datasets.Value("string"),
99
+ "speaker_id": datasets.Value("string"),
100
+ "language": datasets.Value("string"),
101
+ "PRR": datasets.Value("float32"),
102
+ "length": datasets.Value("float32"),
103
+ }
104
+ )
105
+
106
+ return datasets.DatasetInfo(
107
+ description = _DESCRIPTION,
108
+ features = features,
109
+ supervised_keys = None,
110
+ homepage = _HOMEPAGE,
111
+ license = _LICENSE,
112
+ citation = _CITATION,
113
+ version = self.config.version,
114
+ )
115
+
116
+
117
+ def _split_generators(self, dl_manager):
118
+ lang = self.config.name
119
+
120
+ audio_urls = {}
121
+ splits = ("eval",)
122
+ for split in splits:
123
+ if split == "train_clean": continue
124
+ audio_urls[split] = [
125
+ _AUDIO_URL.format(split=split, shard_idx=i) for i in range(STATS["n_shards"][split])
126
+ ]
127
+ archive_paths = dl_manager.download(audio_urls)
128
+ local_extracted_archive_paths = dl_manager.extract(archive_paths) if not dl_manager.is_streaming else {}
129
+
130
+ metadata_urls = {split: _METADATA_URL.format(lang=lang, split=split) for split in splits}
131
+ metadata_paths = dl_manager.download_and_extract(metadata_urls)
132
+
133
+ split_generators = []
134
+ split_names = {
135
+ "eval": datasets.Split.TEST,
136
+ }
137
+ for split in splits:
138
+ split_generators.append(
139
+ datasets.SplitGenerator(
140
+ name=split_names.get(split, split),
141
+ gen_kwargs={
142
+ "local_extracted_archive_paths": local_extracted_archive_paths.get(split),
143
+ "archives": [dl_manager.iter_archive(path) for path in archive_paths.get(split)],
144
+ "metadata_path": metadata_paths[split],
145
+ },
146
+ ),
147
+ )
148
+
149
+ return split_generators
150
+
151
+ def _generate_examples(self, local_extracted_archive_paths, archives, metadata_path):
152
+ lang = self.config.name
153
+ data_fields = list(self._info().features.keys())
154
+ metadata = {}
155
+ with open(metadata_path, encoding="utf-8") as f:
156
+ reader = csv.DictReader(f, delimiter="\t", quoting=csv.QUOTE_NONE)
157
+ metadata = { row["path"]:row for row in tqdm(reader, desc="Reading metadata...") }
158
+
159
+ excluded = 0
160
+ for i, audio_archive in enumerate(archives):
161
+ for path, file in audio_archive:
162
+ if path not in metadata :
163
+ excluded += 1
164
+ continue
165
+ result = dict(metadata[path])
166
+ if lang == "all" or lang == result["language"] :
167
+ # set the audio feature and the path to the extracted file
168
+ path = os.path.join(local_extracted_archive_paths[i], path) if local_extracted_archive_paths else path
169
+ result["audio"] = {"path": path, "bytes": file.read()}
170
+ result["path"] = path
171
+ yield path, result
172
+ print(excluded,'audio files not found in metadata')
173
+
audio/eval_0.tar ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2ea0011fe18bebeeb3c688fec478c350058558b8b18127c5c4c8bd3c3ebd0ed6
3
+ size 272844800
languages.py ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ LANGUAGES = {
2
+ 'eu': 'Basque (Euskara)' ,
3
+ 'es': 'Spanish (Español)' ,
4
+ 'bi': 'Mixed Basque + Spanish (Euskara + Español)' ,
5
+ 'all': 'All' ,
6
+ }
metadata/eval.tsv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:027aacb76eafdeeb7bb7cfb9bd1e5066784b217faba35c7c9296484f5f05b395
3
+ size 389494
release_stats.py ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ STATS = {
2
+ "name": "Albayzin2024BBSS2TEval",
3
+ "version": "1.0.0",
4
+ "date": "2024-09-09",
5
+ "years": (2024,),
6
+ "locales": {
7
+ "es": {'reportedSentences': 1, 'duration': 1, 'clips': 1, 'users': 1, 'size': 1, 'avgDurationSecs': 1, 'totalHrs': 1},
8
+ "eu": {'reportedSentences': 1, 'duration': 1, 'clips': 1, 'users': 1, 'size': 1, 'avgDurationSecs': 1, 'totalHrs': 1},
9
+ "bi": {'reportedSentences': 1, 'duration': 1, 'clips': 1, 'users': 1, 'size': 1, 'avgDurationSecs': 1, 'totalHrs': 1},
10
+ "all": {'reportedSentences': 1, 'duration': 1, 'clips': 1, 'users': 1, 'size': 1, 'avgDurationSecs': 1, 'totalHrs': 1},
11
+ },
12
+ "n_shards" : {
13
+ "eval" : 1
14
+ },
15
+ 'totalDuration': 1,
16
+ 'totalHrs': 1
17
+ }