Datasets:

Languages:
Iban
ArXiv:
License:
holylovenia commited on
Commit
1830e50
1 Parent(s): 2f8675a

Upload asr_ibsc.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. asr_ibsc.py +190 -0
asr_ibsc.py ADDED
@@ -0,0 +1,190 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ from pathlib import Path
17
+ from typing import Dict, List, Tuple
18
+
19
+ import datasets
20
+ import fsspec
21
+ import pandas as pd
22
+ from fsspec.callbacks import TqdmCallback
23
+
24
+ from seacrowd.utils.configs import SEACrowdConfig
25
+ from seacrowd.utils.constants import (SCHEMA_TO_FEATURES, TASK_TO_SCHEMA,
26
+ Licenses, Tasks)
27
+
28
+ _CITATION = """\
29
+ @inproceedings{Juan14,
30
+ Title = {Semi-supervised G2P bootstrapping and its application to ASR for a very under-resourced language: Iban},
31
+ Author = {Sarah Samson Juan and Laurent Besacier and Solange Rossato},
32
+ Booktitle = {Proceedings of Workshop for Spoken Language Technology for Under-resourced (SLTU)},
33
+ Year = {2014}}
34
+ Month = {May},
35
+
36
+ @inproceedings{Juan2015,
37
+ Title = {Using Resources from a closely-Related language to develop ASR for a very under-resourced Language: A case study for Iban},
38
+ Author = {Sarah Samson Juan and Laurent Besacier and Benjamin Lecouteux and Mohamed Dyab},
39
+ Booktitle = {Proceedings of INTERSPEECH},
40
+ Year = {2015},
41
+ Month = {September}}
42
+ Address = {Dresden, Germany},
43
+ """
44
+
45
+ _DATASETNAME = "asr_ibsc"
46
+
47
+ _DESCRIPTION = """\
48
+ This package contains Iban language text and speech suitable for Automatic
49
+ Speech Recognition (ASR) experiments. In addition, transcribed speech, 2M tokens
50
+ corpus crawled from an online newspaper site is provided. News data was provided
51
+ by a local radio station in Sarawak, Malaysia.
52
+ """
53
+
54
+ _HOMEPAGE = "https://github.com/sarahjuan/iban"
55
+
56
+ _LANGUAGES = ["iba"]
57
+
58
+ _LICENSE = Licenses.CC_BY_SA_3_0.value
59
+
60
+ _LOCAL = False
61
+
62
+ _URL = "https://github.com/sarahjuan/iban/tree/master/data"
63
+
64
+ _SUPPORTED_TASKS = [Tasks.SPEECH_RECOGNITION]
65
+ _SEACROWD_SCHEMA = f"seacrowd_{TASK_TO_SCHEMA[_SUPPORTED_TASKS[0]].lower()}" # sptext
66
+
67
+ _SOURCE_VERSION = "1.0.0"
68
+
69
+ _SEACROWD_VERSION = "2024.06.20"
70
+
71
+
72
+ class ASRIbanDataset(datasets.GeneratorBasedBuilder):
73
+ """Iban language text and speech suitable for ASR experiments"""
74
+
75
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
76
+ SEACROWD_VERSION = datasets.Version(_SEACROWD_VERSION)
77
+
78
+ BUILDER_CONFIGS = [
79
+ SEACrowdConfig(
80
+ name=f"{_DATASETNAME}_source",
81
+ version=SOURCE_VERSION,
82
+ description=f"{_DATASETNAME} source schema",
83
+ schema="source",
84
+ subset_id=_DATASETNAME,
85
+ ),
86
+ SEACrowdConfig(
87
+ name=f"{_DATASETNAME}_{_SEACROWD_SCHEMA}",
88
+ version=SEACROWD_VERSION,
89
+ description=f"{_DATASETNAME} SEACrowd schema",
90
+ schema=_SEACROWD_SCHEMA,
91
+ subset_id=_DATASETNAME,
92
+ ),
93
+ ]
94
+
95
+ DEFAULT_CONFIG_NAME = f"{_DATASETNAME}_source"
96
+
97
+ def _info(self) -> datasets.DatasetInfo:
98
+ if self.config.schema == "source":
99
+ features = datasets.Features(
100
+ {
101
+ "audio": datasets.Audio(sampling_rate=16_000),
102
+ "transcription": datasets.Value("string"),
103
+ "speaker_id": datasets.Value("string"),
104
+ }
105
+ )
106
+ elif self.config.schema == _SEACROWD_SCHEMA:
107
+ features = SCHEMA_TO_FEATURES[TASK_TO_SCHEMA[_SUPPORTED_TASKS[0]]] # speech_text_features
108
+
109
+ return datasets.DatasetInfo(
110
+ description=_DESCRIPTION,
111
+ features=features,
112
+ homepage=_HOMEPAGE,
113
+ license=_LICENSE,
114
+ citation=_CITATION,
115
+ )
116
+
117
+ def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
118
+ """Returns SplitGenerators."""
119
+ # prepare data directory
120
+ data_dir = Path.cwd() / "data" / "asr_ibsc"
121
+ data_dir.mkdir(parents=True, exist_ok=True)
122
+
123
+ # download data
124
+ # if rate limiting is an issue, pass github username and token
125
+ username = None
126
+ token = None
127
+ fs = fsspec.filesystem("github", org="sarahjuan", repo="iban", ref="master", username=username, token=token)
128
+ fs.clear_instance_cache()
129
+
130
+ # download annotation
131
+ print("Downloading annotation...")
132
+ fs.get(fs.ls("data/train/"), (data_dir / "train").as_posix(), recursive=True)
133
+ fs.get(fs.ls("data/test/"), (data_dir / "test").as_posix(), recursive=True)
134
+
135
+ # download audio files
136
+ print("Downloading audio files (~1GB). It may take several minutes...")
137
+ for idx, folder in enumerate(fs.ls("data/wav/")):
138
+ folder_name = folder.split("/")[-1]
139
+ pbar = TqdmCallback(tqdm_kwargs={"desc": f"-> {folder_name} [{idx+1:2d}/{len(fs.ls('data/wav/'))}]", "unit": "file"})
140
+ fs.get(fs.ls(f"data/wav/{folder_name}/"), (data_dir / "wav" / folder_name).as_posix(), recursive=True, callback=pbar)
141
+
142
+ return [
143
+ datasets.SplitGenerator(
144
+ name=datasets.Split.TRAIN,
145
+ gen_kwargs={
146
+ "data_dir": data_dir,
147
+ "split": "train",
148
+ },
149
+ ),
150
+ datasets.SplitGenerator(
151
+ name=datasets.Split.TEST,
152
+ gen_kwargs={
153
+ "data_dir": data_dir,
154
+ "split": "test",
155
+ },
156
+ ),
157
+ ]
158
+
159
+ def _generate_examples(self, data_dir: Path, split: str) -> Tuple[int, Dict]:
160
+ """Yields examples as (key, example) tuples."""
161
+
162
+ text_file = data_dir / split / f"{split}_text"
163
+ utt2spk_file = data_dir / split / f"{split}_utt2spk"
164
+ wav_scp_file = data_dir / split / f"{split}_wav.scp"
165
+
166
+ # load the data
167
+ text_df = pd.read_csv(text_file, sep=" ", header=None, names=["utt_id", "text"])
168
+ utt2spk_df = pd.read_csv(utt2spk_file, sep="\t", header=None, names=["utt_id", "speaker"])
169
+ wav_df = pd.read_csv(wav_scp_file, sep="\t", header=None, names=["utt_id", "wav_path"])
170
+ merged_df = pd.merge(text_df, utt2spk_df, on="utt_id")
171
+ merged_df = pd.merge(merged_df, wav_df, on="utt_id")
172
+
173
+ for _, row in merged_df.iterrows():
174
+ wav_file = data_dir / "wav" / row["speaker"] / row["wav_path"].split("/")[-1]
175
+
176
+ if self.config.schema == "source":
177
+ yield row["utt_id"], {
178
+ "audio": str(wav_file.as_posix()),
179
+ "transcription": row["text"],
180
+ "speaker_id": row["speaker"],
181
+ }
182
+ elif self.config.schema == _SEACROWD_SCHEMA:
183
+ yield row["utt_id"], {
184
+ "id": row["utt_id"],
185
+ "path": str(wav_file),
186
+ "audio": str(wav_file.as_posix()),
187
+ "text": row["text"],
188
+ "speaker_id": row["speaker"],
189
+ "metadata": None,
190
+ }