Datasets:

Languages:
Indonesian
ArXiv:
License:
File size: 7,729 Bytes
b9808d0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
import os
from itertools import chain
from pathlib import Path
from typing import Dict, List, Tuple

import datasets

from seacrowd.utils import schemas
from seacrowd.utils.configs import SEACrowdConfig
from seacrowd.utils.constants import Licenses, Tasks

_CITATION = """\
@inproceedings{widiaputri-etal-5641,
  author = {Widiaputri, Ruhiyah Faradishi and Purwarianti, Ayu and Lestari, Dessi Puji and Azizah, Kurniawati and Tanaya, Dipta and Sakti, Sakriani},
  title = {Speech Recognition and Meaning Interpretation: Towards Disambiguation of Structurally Ambiguous Spoken Utterances in Indonesian},
  booktitle = {Proceedings of the EMNLP 2023},
  year = {2023}
}
"""

_DATASETNAME = "struct_amb_ind"

_DESCRIPTION = """
This dataset contains the first Indonesian speech dataset for structurally ambiguous utterances and each of transcription and two disambiguation texts.
The structurally ambiguous sentences were adapted from Types 4,5,6, and 10 of Types Of Syntactic Ambiguity in English by [Taha et al., 1983].
For each chosen type, 100 structurally ambiguous sentences in Indonesian were made by crowdsourcing.
Each Indonesian ambiguous sentence has two possible interpretations, resulting in two disambiguation text outputs for each ambiguous sentence.
Each disambiguation text is made up of two sentences. All of the sentences have been checked by linguists.
"""

_HOMEPAGE = "https://github.com/ha3ci-lab/struct_amb_ind"

_LICENSE = Licenses.UNKNOWN.value

_LOCAL = True  # get the audio data externally from https://drive.google.com/drive/folders/1QeaptstBgwGYO6THGkZHHViExrogCMUj
_LANGUAGES = ["ind"]

_URL_TEMPLATES = {
    "keys": "https://raw.githubusercontent.com/ha3ci-lab/struct_amb_ind/main/keys/train_dev_test_spk_keys/",
    "text": "https://raw.githubusercontent.com/ha3ci-lab/struct_amb_ind/main/text/",
}

_URLS = {
    "split_train": _URL_TEMPLATES["keys"] + "train_spk",
    "split_dev": _URL_TEMPLATES["keys"] + "dev_spk",
    "split_test": _URL_TEMPLATES["keys"] + "test_spk",
    "text_transcript": _URL_TEMPLATES["text"] + "ID_amb_disam_transcript.txt",
}

_SUPPORTED_TASKS = [Tasks.SPEECH_RECOGNITION]

_SOURCE_VERSION = "1.0.0"

_SEACROWD_VERSION = "2024.06.20"


class StructAmbInd(datasets.GeneratorBasedBuilder):
    """
    This dataset contains the first Indonesian speech dataset for structurally ambiguous utterances and each of transcription and two disambiguation texts.
    This dataloader does NOT contain the additional training data for as mentioned in the _HOMEPAGE, as it is already implemented in the dataloader "indspeech_news_lvcsr".
    """

    SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
    SEACROWD_VERSION = datasets.Version(_SEACROWD_VERSION)

    BUILDER_CONFIGS = [
        SEACrowdConfig(
            name=f"{_DATASETNAME}_source",
            version=SOURCE_VERSION,
            description=f"{_DATASETNAME} source schema",
            schema="source",
            subset_id=f"{_DATASETNAME}",
        ),
        SEACrowdConfig(
            name=f"{_DATASETNAME}_seacrowd_sptext",
            version=SEACROWD_VERSION,
            description=f"{_DATASETNAME} SEACrowd schema",
            schema="seacrowd_sptext",
            subset_id=f"{_DATASETNAME}",
        ),
    ]

    DEFAULT_CONFIG_NAME = f"{_DATASETNAME}_source"

    def _info(self) -> datasets.DatasetInfo:
        if self.config.schema == "source":
            features = datasets.Features(
                {
                    "id": datasets.Value("string"),
                    "speaker_id": datasets.Value("string"),
                    "path": datasets.Value("string"),
                    "audio": datasets.Audio(sampling_rate=16_000),
                    "amb_transcript": datasets.Value("string"),
                    "disam_text": datasets.Value("string"),
                }
            )

        elif self.config.schema == "seacrowd_sptext":
            features = schemas.speech_text_features

        return datasets.DatasetInfo(
            description=_DESCRIPTION,
            features=features,
            homepage=_HOMEPAGE,
            license=_LICENSE,
            citation=_CITATION,
        )

    def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
        # The data_dir configuration is required ONLY for the audio_urls.
        if self.config.data_dir is None:
            raise ValueError("This is a local dataset. Please pass the data_dir kwarg to load_dataset.")
        else:
            data_dir = self.config.data_dir

        # load the local audio folders
        audio_urls = [data_dir + "/" + f"{gender}{_id:02}.zip" for gender in ["F", "M"] for _id in range(1, 12, 1)]
        audio_files_dir = [Path(dl_manager.extract(audio_url)) / audio_url.split("/")[-1][:-4] for audio_url in audio_urls]
        # load the speaker splits and transcript
        split_train = Path(dl_manager.download(_URLS["split_train"]))
        split_dev = Path(dl_manager.download(_URLS["split_dev"]))
        split_test = Path(dl_manager.download(_URLS["split_test"]))
        text_transcript = Path(dl_manager.download(_URLS["text_transcript"]))

        return [
            datasets.SplitGenerator(
                name=datasets.Split.TRAIN,
                gen_kwargs={"split": split_train, "transcript": text_transcript, "audio_files_dir": audio_files_dir},
            ),
            datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"split": split_dev, "transcript": text_transcript, "audio_files_dir": audio_files_dir}),
            datasets.SplitGenerator(
                name=datasets.Split.TEST,
                gen_kwargs={"split": split_test, "transcript": text_transcript, "audio_files_dir": audio_files_dir},
            ),
        ]

    def _generate_examples(self, split: Path, transcript: Path, audio_files_dir: List[Path]) -> Tuple[int, Dict]:
        speaker_ids = open(split, "r").readlines()
        speaker_ids = [id.replace("\n", "") for id in speaker_ids]
        speech_folders = [audio_folder for audio_folder in audio_files_dir if audio_folder.name.split("/")[-1] in speaker_ids]
        speech_files = list(chain(*[list(map((str(speech_folder) + "/").__add__, os.listdir(speech_folder))) for speech_folder in speech_folders]))

        transcript = open(transcript, "r").readlines()
        transcript = [sent.replace("\n", "").split("|") for sent in transcript]
        transcript_dict = {sent[0]: {"amb_transcript": sent[1], "disam_text": sent[2]} for sent in transcript}

        for key, aud_file in enumerate(speech_files):
            aud_id = aud_file.split("/")[-1][:-4]
            aud_info = aud_id.split("_")
            if self.config.schema == "source":
                row = {
                    "id": aud_id,
                    "speaker_id": aud_info[1],
                    "path": aud_file,
                    "audio": aud_file,
                    "amb_transcript": transcript_dict[aud_id]["amb_transcript"],
                    "disam_text": transcript_dict[aud_id]["disam_text"],
                }
                yield key, row
            elif self.config.schema == "seacrowd_sptext":
                row = {
                    "id": aud_id,
                    "path": aud_file,
                    "audio": aud_file,
                    "text": transcript_dict[aud_id]["amb_transcript"],
                    "speaker_id": aud_info[1],
                    "metadata": {
                        "speaker_age": None,
                        "speaker_gender": aud_info[1][0],
                    },
                }
                yield key, row
            else:
                raise NotImplementedError(f"Schema '{self.config.schema}' is not defined.")