File size: 5,114 Bytes
40c4f19
 
bcbb5b0
 
40c4f19
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
00f96ff
 
40c4f19
 
 
 
 
 
4c7bd43
40c4f19
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
00f96ff
 
 
40c4f19
 
 
 
 
 
 
 
 
00f96ff
 
 
 
 
 
 
40c4f19
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
# coding=utf-8
# Copyright 2022 Artem Ploujnikov (HuggingFace adaptation only)
# 
# Original dataset: https://github.com/soerenab/AudioMNIST
#
# If you use this dataset, please cite the following article:
# @ARTICLE{becker2018interpreting,
#   author    = {Becker, S\"oren and Ackermann, Marcel and Lapuschkin, Sebastian and M\"uller, Klaus-Robert and Samek, Wojciech},
#   title     = {Interpreting and Explaining Deep Neural Networks for Classification of Audio Signals},
#   journal   = {CoRR},
#   volume    = {abs/1807.03418},
#   year      = {2018},
#   archivePrefix = {arXiv},
#   eprint    = {1807.03418},
# }


# Lint as: python3
import json
import logging
import re
import os

import datasets

from glob import glob

logger = logging.getLogger(__name__)

_DESCRIPTION = """\
AudioMNIST, a research baseline dataset 
"""

_BASE_URL = "https://huggingface.co/datasets/flexthink/audiomnist/resolve/main"
_HOMEPAGE_URL = "https://huggingface.co/datasets/flexthink/audiomnist"
_SPLITS = ["train", "valid", "test"]
_GENDERS = ["female", "male"]
_ACCENTS = [
    "Arabic",
    "Brasilian",
    "Chinese",
    "Danish",
    "English",
    "French",
    "German",
    "Italian",
    "Levant",
    "Madras",
    "South African",
    "South Korean",
    "Spanish",
    "Tamil",
]
_SAMPLING_RATE = 48000

_ACCENT_MAP = {
    "german": "German",
    "Egyptian_American?": "Arabic",
    "German/Spanish": "German",
}

_META_FILE = "audioMNIST_meta.json"
_RE_FILE_NAME = re.compile("(?P<digit>\d+)_(?P<speaker_id>\d+)_(?P<sample_idx>\d+).wav")


class GraphemeToPhoneme(datasets.GeneratorBasedBuilder):
    def __init__(self, base_url=None, splits=None, *args, **kwargs):
        super().__init__(*args, **kwargs)
        self.base_url = base_url or _BASE_URL
        self.splits = splits or _SPLITS

    def _info(self):
        return datasets.DatasetInfo(
            description=_DESCRIPTION,
            features=datasets.Features(
                {
                    "file_name": datasets.Value("string"),
                    "audio": datasets.features.Audio(sampling_rate=_SAMPLING_RATE),
                    "speaker_id": datasets.Value("string"),
                    "age": datasets.Value("int8"),
                    "gender": datasets.ClassLabel(names=_GENDERS),
                    "accent": datasets.ClassLabel(names=_ACCENTS),
                    "native_speaker": datasets.Value("bool"),
                    "origin": datasets.Value("string"),
                    "digit": datasets.Value("int8"),
                },
            ),
            supervised_keys=None,
            homepage=_HOMEPAGE_URL,
        )

    def _get_url(self, split):
        return f"{self.base_url}/dataset/{split}.tar.gz"

    def _get_meta_url(self):
        return f"{self.base_url}/meta/{_META_FILE}"

    def _split_generator(self, dl_manager, split):
        archive_url = self._get_url(split)
        archive_path = dl_manager.download_and_extract(archive_url)
        meta_url = self._get_meta_url()
        meta_file = dl_manager.download(meta_url)
        speaker_map = self._get_speaker_map(meta_file)
        return datasets.SplitGenerator(
            name=split,
            gen_kwargs={
                "archive_path": archive_path,
                "speaker_map": speaker_map,
            },
        )

    def _get_speaker_map(self, file_name):
        with open(file_name) as speaker_file:
            result = json.load(speaker_file)
            for entry in result.values():
                entry["accent"] = _ACCENT_MAP.get(
                    entry["accent"], entry["accent"])
            return result

    def _split_generators(self, dl_manager):
        return [self._split_generator(dl_manager, split) for split in self.splits]

    def _map_speaker_info(self, speaker_info):
        result = dict(speaker_info)
        result["native_speaker"] = speaker_info["native speaker"] == "yes"
        del result["native speaker"]
        del result["recordingdate"]
        del result["recordingroom"]
        return result

    def _generate_examples(self, archive_path, speaker_map):
        wav_files = glob(os.path.join(archive_path, 'dataset', '**', '*.wav'))        
        for path in wav_files:
            match = _RE_FILE_NAME.search(path)
            if not match:
                logger.warn(
                    f"File {path} does not match the naming convention"
                )
                continue
            digit, speaker_id = [
                match.group(key) for key in ["digit", "speaker_id"]
            ]
            with open(path, 'rb') as wav_file:
                sample = {
                    "digit": digit,
                    "speaker_id": speaker_id,
                    "file_name": os.path.join(archive_path, path),
                    "audio": {"path": path, "bytes": wav_file.read()},
                }
            if speaker_id not in speaker_map:
                logger.warn(f"Speaker {speaker_id} not found")
            speaker_info = speaker_map[speaker_id]
            sample.update(self._map_speaker_info(speaker_info))
            yield path, sample