holylovenia commited on
Commit
e026f32
1 Parent(s): 564be85

Upload su_id_asr.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. su_id_asr.py +149 -0
su_id_asr.py ADDED
@@ -0,0 +1,149 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import csv
2
+ import os
3
+ from typing import Dict, List
4
+
5
+ import datasets
6
+
7
+ from nusacrowd.utils import schemas
8
+ from nusacrowd.utils.configs import NusantaraConfig
9
+ from nusacrowd.utils.constants import (DEFAULT_NUSANTARA_VIEW_NAME,
10
+ DEFAULT_SOURCE_VIEW_NAME, Tasks)
11
+
12
+ _DATASETNAME = "su_id_asr"
13
+ _SOURCE_VIEW_NAME = DEFAULT_SOURCE_VIEW_NAME
14
+ _UNIFIED_VIEW_NAME = DEFAULT_NUSANTARA_VIEW_NAME
15
+
16
+ _LANGUAGES = ["sun"]
17
+ _LOCAL = False
18
+ _CITATION = """\
19
+ @inproceedings{sodimana18_sltu,
20
+ author={Keshan Sodimana and Pasindu {De Silva} and Supheakmungkol Sarin and Oddur Kjartansson and Martin Jansche and Knot Pipatsrisawat and Linne Ha},
21
+ title={{A Step-by-Step Process for Building TTS Voices Using Open Source Data and Frameworks for Bangla, Javanese, Khmer, Nepali, Sinhala, and Sundanese}},
22
+ year=2018,
23
+ booktitle={Proc. 6th Workshop on Spoken Language Technologies for Under-Resourced Languages (SLTU 2018)},
24
+ pages={66--70},
25
+ doi={10.21437/SLTU.2018-14}
26
+ }
27
+ """
28
+
29
+ _DESCRIPTION = """\
30
+ Sundanese ASR training data set containing ~220K utterances.
31
+ This dataset was collected by Google in Indonesia.
32
+
33
+
34
+ """
35
+
36
+ _HOMEPAGE = "https://indonlp.github.io/nusa-catalogue/card.html?su_id_asr"
37
+
38
+ _LICENSE = "Attribution-ShareAlike 4.0 International."
39
+
40
+ _URLs = {
41
+ "su_id_asr": "https://www.openslr.org/resources/36/asr_sundanese_{}.zip",
42
+ }
43
+
44
+ _SUPPORTED_TASKS = [Tasks.SPEECH_RECOGNITION]
45
+
46
+ _SOURCE_VERSION = "1.0.0"
47
+ _NUSANTARA_VERSION = "1.0.0"
48
+
49
+
50
+ class SuIdASR(datasets.GeneratorBasedBuilder):
51
+ """su_id contains ~220K utterances for Sundanese ASR training data."""
52
+
53
+ BUILDER_CONFIGS = [
54
+ NusantaraConfig(
55
+ name="su_id_asr_source",
56
+ version=datasets.Version(_SOURCE_VERSION),
57
+ description="SU_ID_ASR source schema",
58
+ schema="source",
59
+ subset_id="su_id_asr",
60
+ ),
61
+ NusantaraConfig(
62
+ name="su_id_asr_nusantara_sptext",
63
+ version=datasets.Version(_NUSANTARA_VERSION),
64
+ description="SU_ID_ASR Nusantara schema",
65
+ schema="nusantara_sptext",
66
+ subset_id="su_id_asr",
67
+ ),
68
+ ]
69
+
70
+ DEFAULT_CONFIG_NAME = "su_id_asr_source"
71
+
72
+ def _info(self):
73
+ if self.config.schema == "source":
74
+ features = datasets.Features(
75
+ {
76
+ "id": datasets.Value("string"),
77
+ "speaker_id": datasets.Value("string"),
78
+ "path": datasets.Value("string"),
79
+ "audio": datasets.Audio(sampling_rate=16_000),
80
+ "text": datasets.Value("string"),
81
+ }
82
+ )
83
+ elif self.config.schema == "nusantara_sptext":
84
+ features = schemas.speech_text_features
85
+
86
+ return datasets.DatasetInfo(
87
+ description=_DESCRIPTION,
88
+ features=features,
89
+ homepage=_HOMEPAGE,
90
+ license=_LICENSE,
91
+ citation=_CITATION,
92
+ task_templates=[datasets.AutomaticSpeechRecognition(audio_column="audio", transcription_column="text")],
93
+ )
94
+
95
+ def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
96
+ base_path = {}
97
+ for id in range(10):
98
+ base_path[id] = dl_manager.download_and_extract(_URLs["su_id_asr"].format(str(id)))
99
+ for id in ["a", "b", "c", "d", "e", "f"]:
100
+ base_path[id] = dl_manager.download_and_extract(_URLs["su_id_asr"].format(str(id)))
101
+ return [
102
+ datasets.SplitGenerator(
103
+ name=datasets.Split.TRAIN,
104
+ gen_kwargs={"filepath": base_path},
105
+ ),
106
+ ]
107
+
108
+ def _generate_examples(self, filepath: Dict):
109
+
110
+ if self.config.schema == "source" or self.config.schema == "nusantara_sptext":
111
+
112
+ for key, each_filepath in filepath.items():
113
+
114
+ tsv_file = os.path.join(each_filepath, "asr_sundanese", "utt_spk_text.tsv")
115
+
116
+ with open(tsv_file, "r") as file:
117
+ tsv_file = csv.reader(file, delimiter="\t")
118
+
119
+ for line in tsv_file:
120
+ audio_id, speaker_id, transcription_text = line[0], line[1], line[2]
121
+
122
+ wav_path = os.path.join(each_filepath, "asr_sundanese", "data", "{}".format(audio_id[:2]), "{}.flac".format(audio_id))
123
+
124
+ if os.path.exists(wav_path):
125
+ if self.config.schema == "source":
126
+ ex = {
127
+ "id": audio_id,
128
+ "speaker_id": speaker_id,
129
+ "path": wav_path,
130
+ "audio": wav_path,
131
+ "text": transcription_text,
132
+ }
133
+ yield audio_id, ex
134
+ elif self.config.schema == "nusantara_sptext":
135
+ ex = {
136
+ "id": audio_id,
137
+ "speaker_id": speaker_id,
138
+ "path": wav_path,
139
+ "audio": wav_path,
140
+ "text": transcription_text,
141
+ "metadata": {
142
+ "speaker_age": None,
143
+ "speaker_gender": None,
144
+ },
145
+ }
146
+ yield audio_id, ex
147
+
148
+ else:
149
+ raise ValueError(f"Invalid config: {self.config.name}")