File size: 6,826 Bytes
3a2fe2d
 
 
 
24f61ed
 
3a2fe2d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ce1343e
3a2fe2d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5a1e795
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3a2fe2d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
51fdbae
0199a19
51fdbae
 
 
 
 
 
 
0199a19
 
 
 
 
 
 
 
 
3a2fe2d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2910050
 
 
 
3a2fe2d
2910050
3a2fe2d
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
import csv
import os
import json

import datasets




_CITATION = """ TBD """

_DESCRIPTION = """\
ALORESB is a collection of african speech corpus for  ASR Task.
"""

_DL_URL_FORMAT = "audio/{name}"

class AloresbConfig(datasets.BuilderConfig):
    """BuilderConfig for aloresb"""
    def __init__(
        self, name, **kwargs
    ):
        """
        Args:
            name: name of the configuration
            **kwargs: keyword arguments forwarded to super.
        """
        super(AloresbConfig, self).__init__(
            version=datasets.Version("1.0.0", ""), name=name, **kwargs)
        self.data_root_url = _DL_URL_FORMAT.format(name=name)



class Aloresb(datasets.GeneratorBasedBuilder):
    """
    The Aloresb dataset
    """
    BUILDER_CONFIGS = [
        AloresbConfig(name="fongbe", description="Fongbe aloresb dataset"),
        AloresbConfig(name="hausa", description="Hausa aloresb dataset"),
        AloresbConfig(name="ahmaric", description="Ahmaric aloresb dataset"),
        AloresbConfig(name="wolof", description="Wolof aloresb dataset"),
        AloresbConfig(name="swahili", description="Swahili aloresb dataset"),
    ]
    
    def _info(self):
        return datasets.DatasetInfo(
            description=_DESCRIPTION,
            features=datasets.Features(
                {
                    "file": datasets.Value("string"),
                    "text": datasets.Value("string"),
                    "audio_id": datasets.Value("string"),
                }
            ),
            supervised_keys=("file", "text"),
            task_templates=None,
        )
        
    def _split_generators(self, dl_manager):
        """
        Returns SplitGenerators.
        """
                
        if self.config.name in ["hausa", "wolof"]:
            transcripts = dl_manager.download({
                "train": self.config.data_root_url + "/train/transcripts.txt",
                "dev": self.config.data_root_url + "/dev/transcripts.txt",
                "test": self.config.data_root_url + "/test/transcripts.txt",
            })
            audio_filenames_paths = dl_manager.download({
            "train": self.config.data_root_url + "/train/audio_filenames.txt",
            "dev": self.config.data_root_url + "/dev/audio_filenames.txt",
            "test": self.config.data_root_url + "/test/audio_filenames.txt",
            })
        else:
            transcripts = dl_manager.download({
                "train": self.config.data_root_url + "/train/transcripts.txt",
                "test": self.config.data_root_url + "/test/transcripts.txt",
            })
            
            audio_filenames_paths = dl_manager.download({
                "train": self.config.data_root_url + "/train/audio_filenames.txt",
                "test": self.config.data_root_url + "/test/audio_filenames.txt",
            })
            
        
        
        audio_archives = {}
        for split in audio_filenames_paths:
            if os.path.exists(audio_filenames_paths[split]):
                with open(audio_filenames_paths[split], encoding="utf-8") as f:
                    audio_filenames = [line.strip() for line in f.readlines()]
                    audio_archives[split] = dl_manager.download([
                        self.config.data_root_url + "/" + split + "/audio/" + filename
                        for filename in audio_filenames
                    ])
        # (Optional) In non-streaming mode, we can extract the archive locally to have actual local audio files:
        local_extracted_archives = dl_manager.extract(audio_archives) if not dl_manager.is_streaming else {}
        
        train_splits = [
            datasets.SplitGenerator(
                name=datasets.Split.TRAIN,
                gen_kwargs={
                    "transcript_path": transcripts["train"],
                    "audio_archives": [dl_manager.iter_archive(archive) for archive in audio_archives["train"]],
                    "local_extracted_archive": local_extracted_archives.get("train"),
                }
            ),
        ]
        if self.config.name in ["hausa", "wolof"]:
            return train_splits + [
                datasets.SplitGenerator(
                    name=datasets.Split.VALIDATION, gen_kwargs={
                        "transcript_path": transcripts["dev"],
                        "audio_archives": [dl_manager.iter_archive(archive) for archive in audio_archives["dev"]],
                        "local_extracted_archive": local_extracted_archives.get("dev"),
                    }
                ),
                datasets.SplitGenerator(
                    name=datasets.Split.TEST, gen_kwargs={
                        "transcript_path": transcripts["test"],
                        "audio_archives": [dl_manager.iter_archive(archive) for archive in audio_archives["test"]],
                        "local_extracted_archive": local_extracted_archives.get("test"),
                    }
                ),
                ]
        print(train_splits)
        return train_splits + [
            datasets.SplitGenerator(
                name=datasets.Split.TEST, gen_kwargs={
                    "transcript_path": transcripts["test"],
                    "audio_archives": [dl_manager.iter_archive(archive) for archive in audio_archives["test"]],
                    "local_extracted_archive": local_extracted_archives.get("test"),
                }
            ),
        ]
        
    def _generate_examples(self, transcript_path, audio_archives, local_extracted_archive):
        """
        Generate examples as dicts.
        """
        transcripts = {}
        with open(transcript_path, encoding="utf-8") as f:
            for line in f:
                audio_id, text = line.strip().split("\t")
                transcripts[audio_id] = text
                
        for archive_idx, audio_archive in enumerate(audio_archives):
            for audio_filename, file in audio_archive:
                # get the audio_filename extension
                ext = os.path.splitext(audio_filename)[1]
                audio_id = audio_filename.split(ext)[0]
                audio_transcript = transcripts[audio_id]
                
                local_audio_file_path = os.path.join(
                    local_extracted_archive[archive_idx], audio_filename
                ) if local_extracted_archive else None
                yield audio_filename, {
                    "file": local_audio_file_path,
                    # "audio": {
                    #     "path": local_audio_file_path if local_audio_file_path else audio_filename,
                    #     "bytes": file.read()
                    # },
                    "text": audio_transcript,
                    "audio_id": audio_id
                }