Datasets:
Languages:
Sundanese
Tags:
speech-recognition
File size: 6,045 Bytes
bc8d176 1ed9bc6 bc8d176 f93ba28 0f0dda3 f93ba28 bc8d176 f93ba28 34efacc 0f0dda3 f93ba28 cbb315f |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 |
import csv
import os
from typing import Dict, List
import datasets
from seacrowd.utils import schemas
from seacrowd.utils.configs import SEACrowdConfig
from seacrowd.utils.constants import (DEFAULT_SEACROWD_VIEW_NAME,
DEFAULT_SOURCE_VIEW_NAME, Tasks)
_DATASETNAME = "su_id_asr"
_SOURCE_VIEW_NAME = DEFAULT_SOURCE_VIEW_NAME
_UNIFIED_VIEW_NAME = DEFAULT_SEACROWD_VIEW_NAME
_LANGUAGES = ["sun"]
_LOCAL = False
_CITATION = """\
@inproceedings{sodimana18_sltu,
author={Keshan Sodimana and Pasindu {De Silva} and Supheakmungkol Sarin and Oddur Kjartansson and Martin Jansche and Knot Pipatsrisawat and Linne Ha},
title={{A Step-by-Step Process for Building TTS Voices Using Open Source Data and Frameworks for Bangla, Javanese, Khmer, Nepali, Sinhala, and Sundanese}},
year=2018,
booktitle={Proc. 6th Workshop on Spoken Language Technologies for Under-Resourced Languages (SLTU 2018)},
pages={66--70},
doi={10.21437/SLTU.2018-14}
}
"""
_DESCRIPTION = """\
Sundanese ASR training data set containing ~220K utterances.
This dataset was collected by Google in Indonesia.
"""
_HOMEPAGE = "https://indonlp.github.io/nusa-catalogue/card.html?su_id_asr"
_LICENSE = "Attribution-ShareAlike 4.0 International."
_URLs = {
"su_id_asr_train": "https://univindonesia-my.sharepoint.com/:u:/g/personal/bimasena_putra_office_ui_ac_id/EdJMdFZbSp5LlAT0TEP2fvcB38OHB1hIRslTpCs-wTrJMA?e=jfK6xC&download=1",
"su_id_asr_dev": "https://univindonesia-my.sharepoint.com/:u:/g/personal/bimasena_putra_office_ui_ac_id/Efe8LnwT8KtOjJybXjOQdFwBso5RBp39SwGGWsEbindXDQ?e=IFIN6J&download=1",
"su_id_asr_test": "https://univindonesia-my.sharepoint.com/:u:/g/personal/bimasena_putra_office_ui_ac_id/EfjvnrniV_hKmrSMY0XYvt8BXiXx5SNxt5mhfLiMw0dExw?e=zGCjc5&download=1",
}
_SUPPORTED_TASKS = [Tasks.SPEECH_RECOGNITION]
_SOURCE_VERSION = "1.0.0"
_SEACROWD_VERSION = "2024.06.20"
class SuIdASR(datasets.GeneratorBasedBuilder):
"""su_id contains ~220K utterances for Sundanese ASR training data."""
BUILDER_CONFIGS = [
SEACrowdConfig(
name="su_id_asr_source",
version=datasets.Version(_SOURCE_VERSION),
description="SU_ID_ASR source schema",
schema="source",
subset_id="su_id_asr",
),
SEACrowdConfig(
name="su_id_asr_seacrowd_sptext",
version=datasets.Version(_SEACROWD_VERSION),
description="SU_ID_ASR Nusantara schema",
schema="seacrowd_sptext",
subset_id="su_id_asr",
),
]
DEFAULT_CONFIG_NAME = "su_id_asr_source"
def _info(self):
if self.config.schema == "source":
features = datasets.Features(
{
"id": datasets.Value("string"),
"speaker_id": datasets.Value("string"),
"path": datasets.Value("string"),
"audio": datasets.Audio(sampling_rate=16_000),
"text": datasets.Value("string"),
}
)
elif self.config.schema == "seacrowd_sptext":
features = schemas.speech_text_features
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=features,
homepage=_HOMEPAGE,
license=_LICENSE,
citation=_CITATION,
task_templates=[datasets.AutomaticSpeechRecognition(audio_column="audio", transcription_column="text")],
)
def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={"filepath": dl_manager.download_and_extract(_URLs["su_id_asr_train"])},
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={"filepath": dl_manager.download_and_extract(_URLs["su_id_asr_dev"])},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={"filepath": dl_manager.download_and_extract(_URLs["su_id_asr_test"])},
)
]
def _generate_examples(self, filepath: str):
if self.config.schema == "source" or self.config.schema == "seacrowd_sptext":
tsv_file = os.path.join(filepath, "asr_sundanese", "utt_spk_text.tsv")
with open(tsv_file, "r") as file:
tsv_file = csv.reader(file, delimiter="\t")
for line in tsv_file:
audio_id, speaker_id, transcription_text = line[0], line[1], line[2]
wav_path = os.path.join(filepath, "asr_sundanese", "data", "{}".format(audio_id[:2]), "{}.flac".format(audio_id))
if os.path.exists(wav_path):
if self.config.schema == "source":
ex = {
"id": audio_id,
"speaker_id": speaker_id,
"path": wav_path,
"audio": wav_path,
"text": transcription_text,
}
yield audio_id, ex
elif self.config.schema == "seacrowd_sptext":
ex = {
"id": audio_id,
"speaker_id": speaker_id,
"path": wav_path,
"audio": wav_path,
"text": transcription_text,
"metadata": {
"speaker_age": None,
"speaker_gender": None,
},
}
yield audio_id, ex
else:
raise ValueError(f"Invalid config: {self.config.name}") |