holylovenia commited on
Commit
f4f21c6
1 Parent(s): 6c9e6d0

Upload asr_indocsc.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. asr_indocsc.py +192 -0
asr_indocsc.py ADDED
@@ -0,0 +1,192 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+
17
+ import os
18
+ from pathlib import Path
19
+ from typing import Dict, List, Tuple
20
+
21
+ import datasets
22
+
23
+ from seacrowd.utils import schemas
24
+ from seacrowd.utils.configs import SEACrowdConfig
25
+ from seacrowd.utils.constants import Licenses, Tasks
26
+
27
+ # no bibtex citation
28
+ _CITATION = ""
29
+ _DATASETNAME = "asr_indocsc"
30
+ _DESCRIPTION = """\
31
+ This open-source dataset consists of 4.54 hours of transcribed Indonesian
32
+ conversational speech on certain topics, where seven conversations between two
33
+ pairs of speakers were contained. Please create an account and be logged in on
34
+ https://magichub.com to download the data.
35
+ """
36
+
37
+ _HOMEPAGE = "https://magichub.com/datasets/indonesian-conversational-speech-corpus/"
38
+ _LANGUAGES = ["ind"]
39
+ _LICENSE = Licenses.CC_BY_NC_ND_4_0.value
40
+ _LOCAL = False
41
+ _URLS = {
42
+ _DATASETNAME: "https://magichub.com/df/df.php?file_name=Indonesian_Conversational_Speech_Corpus.zip",
43
+ }
44
+ _SUPPORTED_TASKS = [Tasks.SPEECH_RECOGNITION]
45
+
46
+ _SOURCE_VERSION = "1.0.0"
47
+ _SEACROWD_VERSION = "2024.06.20"
48
+
49
+
50
+ class ASRIndocscDataset(datasets.GeneratorBasedBuilder):
51
+ """ASR-Indocsc consists transcribed Indonesian conversational speech on certain topics"""
52
+
53
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
54
+ SEACROWD_VERSION = datasets.Version(_SEACROWD_VERSION)
55
+
56
+ SEACROWD_SCHEMA_NAME = "sptext"
57
+
58
+ BUILDER_CONFIGS = [
59
+ SEACrowdConfig(
60
+ name=f"{_DATASETNAME}_source",
61
+ version=SOURCE_VERSION,
62
+ description=f"{_DATASETNAME} source schema",
63
+ schema="source",
64
+ subset_id=_DATASETNAME,
65
+ ),
66
+ SEACrowdConfig(
67
+ name=f"{_DATASETNAME}_seacrowd_{SEACROWD_SCHEMA_NAME}",
68
+ version=SEACROWD_VERSION,
69
+ description=f"{_DATASETNAME} SEACrowd schema",
70
+ schema=f"seacrowd_{SEACROWD_SCHEMA_NAME}",
71
+ subset_id=_DATASETNAME,
72
+ ),
73
+ ]
74
+
75
+ DEFAULT_CONFIG_NAME = f"{_DATASETNAME}_source"
76
+
77
+ def _info(self) -> datasets.DatasetInfo:
78
+
79
+ if self.config.schema == "source":
80
+ features = datasets.Features(
81
+ {
82
+ "id": datasets.Value("string"),
83
+ "channel": datasets.Value("string"),
84
+ "uttrans_id": datasets.Value("string"),
85
+ "speaker_id": datasets.Value("string"),
86
+ "topic": datasets.Value("string"),
87
+ "text": datasets.Value("string"),
88
+ "path": datasets.Value("string"),
89
+ "audio": datasets.Audio(sampling_rate=16_000),
90
+ "speaker_gender": datasets.Value("string"),
91
+ "speaker_age": datasets.Value("int64"),
92
+ "speaker_region": datasets.Value("string"),
93
+ "speaker_device": datasets.Value("string"),
94
+ }
95
+ )
96
+
97
+ elif self.config.schema == f"seacrowd_{self.SEACROWD_SCHEMA_NAME}":
98
+ features = schemas.speech_text_features
99
+
100
+ return datasets.DatasetInfo(
101
+ description=_DESCRIPTION,
102
+ features=features,
103
+ homepage=_HOMEPAGE,
104
+ license=_LICENSE,
105
+ citation=_CITATION,
106
+ )
107
+
108
+ def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
109
+ """Returns SplitGenerators."""
110
+
111
+ data_paths = {
112
+ _DATASETNAME: Path(dl_manager.download_and_extract(_URLS[_DATASETNAME])),
113
+ }
114
+
115
+ return [
116
+ datasets.SplitGenerator(
117
+ name=datasets.Split.TRAIN,
118
+ gen_kwargs={
119
+ "filepath": data_paths[_DATASETNAME],
120
+ "split": "train",
121
+ },
122
+ )
123
+ ]
124
+
125
+ def _generate_examples(self, filepath: Path, split: str) -> Tuple[int, Dict]:
126
+ """Yields examples as (key, example) tuples."""
127
+
128
+ # read AUDIOINFO file
129
+ # columns: channel, uttrans_id, speaker_id, topic
130
+ audioinfo_filepath = os.path.join(filepath, "AUDIOINFO.txt")
131
+ with open(audioinfo_filepath, "r", encoding="utf-8") as audioinfo_file:
132
+ audioinfo_data = audioinfo_file.readlines()
133
+ audioinfo_data = audioinfo_data[1:] # remove header
134
+ audioinfo_data = [s.strip("\n").split("\t") for s in audioinfo_data]
135
+
136
+ # read SPKINFO file
137
+ # columns: channel, speaker_id, gender, age, region, device
138
+ spkinfo_filepath = os.path.join(filepath, "SPKINFO.txt")
139
+ with open(spkinfo_filepath, "r", encoding="utf-8") as spkinfo_file:
140
+ spkinfo_data = spkinfo_file.readlines()
141
+ spkinfo_data = spkinfo_data[1:] # remove header
142
+ spkinfo_data = [s.strip("\n").split("\t") for s in spkinfo_data]
143
+ for i, s in enumerate(spkinfo_data):
144
+ if s[2] == "M":
145
+ s[2] = "male"
146
+ elif s[2] == "F":
147
+ s[2] = "female"
148
+ else:
149
+ s[2] = None
150
+ # dictionary of metadata of each speaker
151
+ spkinfo_dict = {s[1]: {"speaker_gender": s[2], "speaker_age": int(s[3]), "speaker_region": s[4], "speaker_device": s[5]} for s in spkinfo_data}
152
+
153
+ num_sample = len(audioinfo_data)
154
+
155
+ for i in range(num_sample):
156
+ # wav file
157
+ wav_path = os.path.join(filepath, "WAV", audioinfo_data[i][1])
158
+ # transcription file
159
+ transcription_path = os.path.join(filepath, "TXT", audioinfo_data[i][1].replace("wav", "txt"))
160
+ with open(transcription_path, "r", encoding="utf-8") as transcription_file:
161
+ transcription = transcription_file.readlines()
162
+ # remove redundant speaker info from transcription file
163
+ transcription = [s.strip("\n").split("\t") for s in transcription]
164
+ transcription = [s[-1] for s in transcription]
165
+ text = " \n ".join(transcription)
166
+
167
+ if self.config.schema == "source":
168
+ example = {
169
+ "id": audioinfo_data[i][1].strip(".wav"),
170
+ "channel": audioinfo_data[i][0],
171
+ "uttrans_id": audioinfo_data[i][1],
172
+ "speaker_id": audioinfo_data[i][2],
173
+ "topic": audioinfo_data[i][3],
174
+ "text": text,
175
+ "path": wav_path,
176
+ "audio": wav_path,
177
+ "speaker_gender": spkinfo_dict[audioinfo_data[i][2]]["speaker_gender"],
178
+ "speaker_age": spkinfo_dict[audioinfo_data[i][2]]["speaker_age"],
179
+ "speaker_region": spkinfo_dict[audioinfo_data[i][2]]["speaker_region"],
180
+ "speaker_device": spkinfo_dict[audioinfo_data[i][2]]["speaker_device"],
181
+ }
182
+ elif self.config.schema == f"seacrowd_{self.SEACROWD_SCHEMA_NAME}":
183
+ example = {
184
+ "id": audioinfo_data[i][1].strip(".wav"),
185
+ "speaker_id": audioinfo_data[i][2],
186
+ "path": wav_path,
187
+ "audio": wav_path,
188
+ "text": text,
189
+ "metadata": {"speaker_age": spkinfo_dict[audioinfo_data[i][2]]["speaker_age"], "speaker_gender": spkinfo_dict[audioinfo_data[i][2]]["speaker_gender"]},
190
+ }
191
+
192
+ yield i, example