Datasets:

Languages:
English
License:
File size: 19,141 Bytes
575441a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
693cf23
 
 
575441a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
693cf23
575441a
 
 
 
 
 
 
 
 
 
 
 
 
693cf23
 
 
575441a
 
 
693cf23
 
 
 
575441a
 
 
 
 
 
693cf23
575441a
 
 
 
693cf23
 
 
 
 
 
 
 
575441a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
693cf23
575441a
 
693cf23
575441a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
693cf23
 
575441a
 
693cf23
575441a
 
 
 
693cf23
575441a
693cf23
 
575441a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
# Copyright 2022 The HuggingFace Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

"""TED-LIUM speech recognition dataset."""

import os
import re
from collections import defaultdict
from io import BytesIO
from pathlib import Path

import numpy as np
import soundfile as sf

import datasets
from datasets.tasks import AutomaticSpeechRecognition


_DL_URL = "https://huggingface.co/datasets/LIUM/tedlium/resolve/main/"

_LICENSE = "licensed under Creative Commons BY-NC-ND 3.0 (http://creativecommons.org/licenses/by-nc-nd/3.0/deed.en)"

_WHISPER_TRANSCRIPT_URL = "https://huggingface.co/datasets/distil-whisper/tedlium/resolve/main/transcription_data/greedy_search/"
_WHISPER_TRANSCRIPT_URLs = _WHISPER_TRANSCRIPT_URL + "/{split}-transcription.txt"


class TedliumReleaseConfig(datasets.BuilderConfig):
    """BuilderConfig for a release of the TED-LIUM dataset."""

    def __init__(self, *, url, download_urls, split_paths, citation, **kwargs):
        super(TedliumReleaseConfig, self).__init__(version=datasets.Version("1.0.1"), **kwargs)
        self.url = url
        self.download_urls = download_urls
        # List of split, path pairs containing the relative path within the
        # extracted tarball to the data for each split.
        self.split_paths = split_paths
        self.citation = citation


def _make_builder_configs():
    """Creates builder configs for all supported Tedlium dataset releases."""
    release1 = TedliumReleaseConfig(
        name="release1",
        description="""\
        The TED-LIUM corpus is English-language TED talks, with transcriptions,
        sampled at 16kHz. It contains about 118 hours of speech.

        This is the TED-LIUM corpus release 1,
        licensed under Creative Commons BY-NC-ND 3.0
        (http://creativecommons.org/licenses/by-nc-nd/3.0/deed.en).
        """,
        citation="""\
        @inproceedings{rousseau2012tedlium,
          title={TED-LIUM: an Automatic Speech Recognition dedicated corpus},
          author={Rousseau, Anthony and Del{\\'e}glise, Paul and Est{\\`e}ve, Yannick},
          booktitle={Conference on Language Resources and Evaluation (LREC)},
          pages={125--129},
          year={2012}
        }
        """,
        url="https://www.openslr.org/7/",
        download_urls={
            "train": [_DL_URL + os.path.join("TEDLIUM_release1", "train.tar.gz")],
            "validation": [_DL_URL + os.path.join("TEDLIUM_release1", "dev.tar.gz")],
            "test": [_DL_URL + os.path.join("TEDLIUM_release1", "test.tar.gz")],
        },
        split_paths=[
            (datasets.Split.TRAIN, "train"),
            (datasets.Split.VALIDATION, "dev"),
            (datasets.Split.TEST, "test"),
        ],
    )

    release2 = TedliumReleaseConfig(
        name="release2",
        description="""\
        This is the TED-LIUM corpus release 2,
        licensed under Creative Commons BY-NC-ND 3.0
        (http://creativecommons.org/licenses/by-nc-nd/3.0/deed.en).

        All talks and text are property of TED Conferences LLC.

        The TED-LIUM corpus was made from audio talks and their transcriptions
        available on the TED website. We have prepared and filtered these data
        in order to train acoustic models to participate to the International
        Workshop on Spoken Language Translation 2011 (the LIUM English/French
        SLT system reached the first rank in the SLT task).

        Contains 1495 talks and transcripts.
        """,
        citation="""\
        @inproceedings{rousseau2014tedlium2,
          title={Enhancing the {TED-LIUM} Corpus with Selected Data for Language Modeling and More {TED} Talks},
          author={Rousseau, Anthony and Del{\\'e}glise, Paul and Est{\\`e}ve, Yannick},
          booktitle={Conference on Language Resources and Evaluation (LREC)},
          year={2014}
        }
        """,
        url="https://www.openslr.org/19/",
        download_urls={
            "train": [
                _DL_URL + os.path.join("TEDLIUM_release2", "train_1.tar.gz"),
                _DL_URL + os.path.join("TEDLIUM_release2", "train_2.tar.gz"),
            ],
            "validation": [_DL_URL + os.path.join("TEDLIUM_release2", "dev.tar.gz")],
            "test": [_DL_URL + os.path.join("TEDLIUM_release2", "test.tar.gz")],
        },
        split_paths=[
            (datasets.Split.TRAIN, "train"),
            (datasets.Split.VALIDATION, "dev"),
            (datasets.Split.TEST, "test"),
        ],
    )

    release3 = TedliumReleaseConfig(
        name="release3",
        description="""\
        This is the TED-LIUM corpus release 3, licensed under Creative Commons
        BY-NC-ND 3.0. This is the 'legacy' version of the corpus, in which the dev and test datasets are the same as in
        TED-LIUM 2 (and TED-LIUM 1).

        All talks and text are property of TED Conferences LLC.

        This new TED-LIUM release was made through a collaboration between the
        Ubiqus company and the LIUM (University of Le Mans, France)

        Contents:

        - 2351 audio talks in NIST sphere format (SPH), including talks from
          TED-LIUM 2: be careful, same talks but not same audio files (only
          these audio file must be used with the TED-LIUM 3 STM files)
        - 452 hours of audio
        - 2351 aligned automatic transcripts in STM format
        - TEDLIUM 2 dev and test data: 19 TED talks in SPH format with
          corresponding manual transcriptions.
        - Dictionary with pronunciations (159848 entries), same file as the one
          included in TED-LIUM 2
        - Selected monolingual data for language modeling from WMT12 publicly
          available corpora: these files come from the TED-LIUM 2 release, but
          have been modified to get a tokenization more relevant for English
          language

        """,
        citation="""\
        @inproceedings{hernandez2018tedlium3,
          title={TED-LIUM 3: twice as much data and corpus repartition for experiments on speaker adaptation},
          author={Hernandez, Fran{\\c{c}}ois and Nguyen, Vincent and Ghannay, Sahar and Tomashenko, Natalia and Est{\\`e}ve, Yannick},
          booktitle={International Conference on Speech and Computer},
          pages={198--208},
          year={2018},
          organization={Springer}
        }
        """,
        url="https://www.openslr.org/51/",
        download_urls={
            "train": [
                _DL_URL + os.path.join("TEDLIUM_release3", "legacy", "train_1.tar.gz"),
                _DL_URL + os.path.join("TEDLIUM_release3", "legacy", "train_2.tar.gz"),
            ],
            "validation": [_DL_URL + os.path.join("TEDLIUM_release3", "legacy", "dev.tar.gz")],
            "test": [_DL_URL + os.path.join("TEDLIUM_release3", "legacy", "test.tar.gz")],
        },
        split_paths=[
            (datasets.Split.TRAIN, "train"),
            (datasets.Split.VALIDATION, "dev"),
            (datasets.Split.TEST, "test"),
        ],
    )

    release3_speaker_adaptation = TedliumReleaseConfig(
        name="release3-speaker-adaptation",
        description="""\
            This is the TED-LIUM corpus release 3, licensed under Creative Commons
            BY-NC-ND 3.0. This is the 'speaker adaptation' version of the corpus, specially designed for experiments on
            speaker adaptation.

            All talks and text are property of TED Conferences LLC.

            This new TED-LIUM release was made through a collaboration between the
            Ubiqus company and the LIUM (University of Le Mans, France)
            """,
        citation="""\
            @inproceedings{hernandez2018tedlium3,
              title={TED-LIUM 3: twice as much data and corpus repartition for experiments on speaker adaptation},
              author={Hernandez, Fran{\\c{c}}ois and Nguyen, Vincent and Ghannay, Sahar and Tomashenko, Natalia and Est{\\`e}ve, Yannick},
              booktitle={International Conference on Speech and Computer},
              pages={198--208},
              year={2018},
              organization={Springer}
            }
            """,
        url="https://www.openslr.org/51/",
        download_urls={
            "train": [
                _DL_URL + os.path.join("TEDLIUM_release3", "speaker-adaptation", "train_1.tar.gz"),
                _DL_URL + os.path.join("TEDLIUM_release3", "speaker-adaptation", "train_2.tar.gz"),
            ],
            "validation": [_DL_URL + os.path.join("TEDLIUM_release3", "speaker-adaptation", "dev.tar.gz")],
            "test": [_DL_URL + os.path.join("TEDLIUM_release3", "speaker-adaptation", "test.tar.gz")],
        },
        split_paths=[
            (datasets.Split.TRAIN, "train"),
            (datasets.Split.VALIDATION, "dev"),
            (datasets.Split.TEST, "test"),
        ],
    )

    return [release1, release2, release3, release3_speaker_adaptation]


class TedLium(datasets.GeneratorBasedBuilder):
    """The TED-LIUM corpus is English-language TED talks, with transcriptions, sampled at 16kHz. It contains about 118 hours of speech."""

    VERSION = datasets.Version("1.1.0")

    BUILDER_CONFIGS = _make_builder_configs()

    def _info(self):
        features = datasets.Features(
            {
                "audio": datasets.features.Audio(sampling_rate=16_000),
                "text": datasets.Value("string"),
                "speaker_id": datasets.Value("string"),
                "gender": datasets.features.ClassLabel(names=["unknown", "female", "male"]),
                "file": datasets.Value("string"),
                "id": datasets.Value("string"),
                "whisper_transcript": datasets.Value("string"),
            }
        )
        return datasets.DatasetInfo(
            description=self.config.description,
            features=features,
            supervised_keys=("audio", "text"),
            homepage=self.config.url,
            license=_LICENSE,
            citation=self.config.citation,
            task_templates=[AutomaticSpeechRecognition(audio_column="audio", transcription_column="text")],
        )

    def _split_generators(self, dl_manager):
        if self.config.name != "release3":
            raise ValueError("This dataset is only compatible with the `release3` config.")

        archive_path = dl_manager.download(self.config.download_urls)
        # (Optional) In non-streaming mode, we can extract the archive locally to have actual local audio files:
        local_extracted_archive = dl_manager.extract(archive_path) if not dl_manager.is_streaming else {}

        transcription_urls = {split: _WHISPER_TRANSCRIPT_URLs.format(split=split) for split in ["train", "validation", "test"]}
        transcript_archive_path = dl_manager.download(transcription_urls)

        splits = []
        for split, path in self.config.split_paths:
            kwargs = {
                "filepath": [dl_manager.iter_archive(sharded_path) for sharded_path in archive_path[split]],
                "local_extracted_archive": local_extracted_archive.get(split),
                "split_path": path,
                "whisper_transcript": transcript_archive_path[split if split != "dev" else "validation"]
            }
            splits.append(datasets.SplitGenerator(name=split, gen_kwargs=kwargs))
        return splits

    def _generate_examples(self, filepath, local_extracted_archive, split_path, whisper_transcript):
        whisper_transcripts = []

        with open(whisper_transcript, encoding="utf-8") as f:
            for row in f:
                whisper_transcripts.append(row.rstrip("\n"))
        idx = 0

        """Generate examples from a TED-LIUM stm file."""
        if local_extracted_archive:
            for local_archive in local_extracted_archive:
                # The stm directory houses the speaker and transcription information in .stm format
                split_dir = os.path.join(local_archive, split_path)
                stm_files = [os.path.join(split_dir, f) for f in os.listdir(split_dir) if f.endswith(".stm")]
                for file in stm_files:
                    # the .sph speaker file almost always has the same file name as the .stm file
                    speaker_file = Path(file).stem
                    audio_file = os.path.join(split_dir, speaker_file + ".sph")
                    segment, sampling_rate = sf.read(audio_file, dtype=np.int16)
                    with open(file) as f:
                        for line in f:
                            line = line.strip()
                            fn, channel, speaker, start, end, label, transcript = line.split(" ", 6)
                            transcript = _maybe_trim_suffix(transcript)
                            if speaker_file != fn:
                                # handle the case where the stm file does not have the same file name as the transcript
                                speaker_file = fn
                                audio_file = os.path.join(split_dir, speaker_file + ".sph")
                                segment, sampling_rate = sf.read(audio_file, dtype=np.int16)
                            samples = _extract_audio_segment(segment, sampling_rate, float(start), float(end))
                            key = "-".join([speaker, start, end, label])
                            example = {
                                "audio": {"path": audio_file, "array": samples, "sampling_rate": sampling_rate},
                                "text": transcript,
                                "speaker_id": speaker,
                                "gender": _parse_gender(label),
                                "file": audio_file,
                                "id": key,
                                "whisper_transcript": whisper_transcripts[idx]
                            }
                            yield key, example
                            idx += 1

        else:
            audio_data = {}
            transcripts = defaultdict(list)
            for file in filepath:
                for path, f in file:
                    if path.endswith(".sph"):
                        # get the speaker id
                        fn = path.split("/")[-1].strip(".sph")
                        # read the audio data from raw byte form and add key-value pair to dict
                        audio_data[fn] = sf.read(BytesIO(f.read()), dtype=np.int16)
                    elif path.endswith(".stm"):
                        for line in f:
                            if line:
                                line = line.decode("utf-8").strip()
                                fn, channel, speaker, start, end, label, transcript = line.split(" ", 6)
                                transcript = _maybe_trim_suffix(transcript)
                                audio_file = path.replace("stm", "sph")
                                key = "-".join([speaker, start, end, label])
                                # append metadata information to the dict of transcripts for the associated speaker
                                transcripts[fn].append(
                                    {
                                        "text": transcript,
                                        "speaker_id": speaker,
                                        "gender": _parse_gender(label),
                                        "file": audio_file,
                                        "id": key,
                                        "start": start,
                                        "end": end,
                                        "channel": channel,
                                        "fn": fn,
                                    }
                                )

                    if audio_data and audio_data.keys() == transcripts.keys():
                        for fn, speaker in transcripts.items():
                            for transcript in speaker:
                                segment, sampling_rate = audio_data[transcript["fn"]]
                                samples = _extract_audio_segment(
                                    segment,
                                    sampling_rate,
                                    float(transcript["start"]),
                                    float(transcript["end"]),
                                )
                                audio = {"path": transcript["file"], "array": samples, "sampling_rate": sampling_rate}
                                key = transcript["id"]
                                transcript_text = transcript["text"]
                                whisper_transcription = whisper_transcripts[idx] if transcript_text != "ignore_time_segment_in_scoring" else "ignore_time_segment_in_scoring"
                                yield key, {
                                    "audio": audio,
                                    "text": transcript_text,
                                    "speaker_id": transcript["speaker_id"],
                                    "gender": transcript["gender"],
                                    "file": transcript["file"],
                                    "id": transcript["id"],
                                    "whisper_transcript": whisper_transcription
                                }
                                idx += 1

                        audio_data = {}
                        transcripts = defaultdict(list)


def _maybe_trim_suffix(transcript):
    # stm files for the TEDLIUM release 1 train split contain a key (enclosed in
    # parens) at the end.
    splits = transcript.rsplit(" ", 1)
    transcript = splits[0]
    if len(splits) > 1:
        suffix = splits[-1]
        if not suffix.startswith("("):
            transcript += " " + suffix
    return transcript


def _extract_audio_segment(segment, sampling_rate, start_sec, end_sec):
    """Extracts segment of audio samples (as an ndarray) from the given segment."""
    # The dataset only contains mono audio.
    start_sample = int(start_sec * sampling_rate)
    end_sample = min(int(end_sec * sampling_rate), segment.shape[0])
    samples = segment[start_sample:end_sample]
    return samples


def _parse_gender(label_str):
    """Parse gender string from STM "<label>" field."""
    gender = re.split(",|_", label_str)[-1][:-1]
    # Fix inconsistencies in the data.
    if not gender:
        gender = -1  # Missing label.
    elif gender == "<NA":  # In TEDLIUM release 3 training data.
        gender = -1  # Missing label.
    elif gender == "F":
        gender = "female"
    elif gender == "M":
        gender = "male"
    return gender