BrunoHays commited on
Commit
27a55e3
1 Parent(s): e6ce93e

Create multilingual-TEDX-fr

Browse files
Files changed (1) hide show
  1. multilingual-TEDX-fr +232 -0
multilingual-TEDX-fr ADDED
@@ -0,0 +1,232 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import re
3
+ from ctypes import Array
4
+ from dataclasses import dataclass
5
+ from typing import List, Tuple
6
+ from pathlib import Path
7
+ import xml.etree.ElementTree as ET
8
+ import ffmpeg
9
+ import csv
10
+ import datasets
11
+ import numpy as np
12
+
13
+ _CITATION = """\
14
+ @inproceedings{salesky2021mtedx,
15
+ title={Multilingual TEDx Corpus for Speech Recognition and Translation},
16
+ author={Elizabeth Salesky and Matthew Wiesner and Jacob Bremerman and Roldano Cattoni and Matteo Negri and Marco Turchi and Douglas W. Oard and Matt Post},
17
+ booktitle={Proceedings of Interspeech},
18
+ year={2021},
19
+ }
20
+ """
21
+
22
+ _DESCRIPTION = """\
23
+ French subpart of the multilingual TEDX dataset
24
+ """
25
+ SAMPLING_RATE = 16_000
26
+
27
+ @dataclass
28
+ class Utterance:
29
+ speaker_id: str
30
+ index: int
31
+ sentence: str
32
+ start_timestamp: float
33
+ end_timestamp: float
34
+
35
+
36
+ class TEDXConfig(datasets.BuilderConfig):
37
+ """BuilderConfig for TEDX."""
38
+
39
+ def __init__(self, name, **kwargs):
40
+ """
41
+ Args:
42
+ name: `string`, name of dataset config (=language)
43
+ **kwargs: keyword arguments forwarded to super.
44
+ """
45
+ super(TEDXConfig, self).__init__(
46
+ version=datasets.Version("2.14.5", ""), name=name, **kwargs
47
+ )
48
+ self.single_samples = (name == "single_samples")
49
+ self.max = (name == "max")
50
+ if not self.single_samples and not self.max:
51
+ self.max_duration = float(name.split("=")[1][:-1])
52
+ else:
53
+ self.max_duration = np.inf
54
+
55
+
56
+ class TEDX(datasets.GeneratorBasedBuilder):
57
+
58
+ BUILDER_CONFIGS = [
59
+ TEDXConfig(name="single_samples", description="all samples taken separately, can be very short and imprecise"),
60
+ TEDXConfig(name="max", description="all samples of a talk are merged together"),
61
+ TEDXConfig(name="max=30s", description="samples are merged in order to reach a max duration of 30 seconds."
62
+ "Does not remove single utterances that may exceed "
63
+ "the maximum duration"),
64
+
65
+ TEDXConfig(name="max=10s", description="samples are merged in order to reach a max duration of 10 seconds"
66
+ "Does not remove single utterances that may exceed "
67
+ "the maximum duration"),
68
+ ]
69
+
70
+ DEFAULT_CONFIG_NAME = "single_samples"
71
+
72
+ def _info(self):
73
+ return datasets.DatasetInfo(
74
+ description=_DESCRIPTION,
75
+ features=datasets.Features(
76
+ {
77
+ "file": datasets.Value("string"),
78
+ "audio": datasets.features.Audio(sampling_rate=SAMPLING_RATE),
79
+ "sentence": datasets.Value("string"),
80
+ "speaker_id": datasets.Value("string"),
81
+ "start_timestamp": datasets.Value("float"),
82
+ "end_timestamp": datasets.Value("float"),
83
+ "index": datasets.Value("int32"),
84
+ }
85
+ ),
86
+ citation=_CITATION,
87
+ )
88
+
89
+ def _split_by_audio_file(self, segments_path: str, sentences_path: str, split_name: str) -> Tuple[List[str], List[List[Utterance]]]:
90
+ speaker_paths = []
91
+ seen_ids = {}
92
+ segments = []
93
+ with open(segments_path, "r") as segments, open(sentences_path) as sentences:
94
+ segments_reader = csv.DictReader(segments, delimiter=' ', fieldnames=["segment_id", "speaker_id", "start_timestamp", "end_timestamp"])
95
+ sentences_list = sentences.readlines()
96
+ for segment, sentence in zip(segments_reader, sentences_list):
97
+ if segment["speaker_id"] not in seen_ids:
98
+ seen_ids.add(segment["speaker_id"])
99
+ speaker_paths.append(Path("data") / Path(split_name) / Path("wav") / Path(f"{segment['speaker_id']}.flac"))
100
+ segments.append([])
101
+ segments[-1].append(Utterance(speaker_id=segment["speaker_id"],
102
+ index=int(segment["segment_id"].split("_")[1]),
103
+ sentence=sentence,
104
+ start_timestamp=segment["segment_start_timestamp"],
105
+ end_timestamp=segment["segment_end_timestamp"]
106
+ ))
107
+ return speaker_paths, segments
108
+
109
+
110
+
111
+ def _split_generators(self, dl_manager):
112
+ segments = {
113
+ "train": dl_manager.download("data/train/txt/segments"),
114
+ "test": dl_manager.download("data/test/txt/segments"),
115
+ "dev": dl_manager.download("data/dev/txt/segments")
116
+ }
117
+ sentences = {
118
+ "train": dl_manager.download("data/train/txt/train.fr"),
119
+ "test": dl_manager.download("data/test/txt/test.fr"),
120
+ "dev": dl_manager.download("data/dev/txt/dev.fr"),
121
+ }
122
+
123
+ splitted_dataset = {}
124
+ segments = dl_manager.download(segments)
125
+ sentences = dl_manager.download(sentences)
126
+ print(segments)
127
+ for split in segments:
128
+ audios_path, utterances = self._split_by_audio_file(segments[split], sentences[split], split)
129
+ audios_path = dl_manager.download(audios_path)
130
+ splitted_dataset[split] = {
131
+ "audios_path": audios_path,
132
+ "utterances": utterances
133
+ }
134
+
135
+ splits = [
136
+ datasets.SplitGenerator(
137
+ name=datasets.Split.TRAIN,
138
+ gen_kwargs= splitted_dataset["train"]
139
+ ),
140
+ datasets.SplitGenerator(
141
+ name=datasets.Split.TEST,
142
+ gen_kwargs=splitted_dataset["test"]
143
+ ),
144
+ datasets.SplitGenerator(
145
+ name=datasets.Split.VALIDATION,
146
+ gen_kwargs=splitted_dataset["test"]
147
+ ),
148
+ ]
149
+
150
+ return splits
151
+
152
+ @staticmethod
153
+ def merge_utterances(utterance1: Utterance, utterance2: Utterance) -> Utterance:
154
+ assert(utterance1.speaker_id == utterance2.speaker_id)
155
+ assert(utterance2.index > utterance1.index)
156
+ return Utterance(
157
+ speaker_id=utterance1.speaker_id,
158
+ sentence=re.sub(r"\s+", " ", utterance1.sentence + " " + utterance2.sentence),
159
+ start_timestamp=utterance1.start_timestamp,
160
+ end_timestamp=utterance2.end_timestamp,
161
+ index = utterance1.index
162
+ )
163
+
164
+
165
+
166
+ def _merged_utterances_iterator(self, utterances: List[Utterance]):
167
+ utterances = iter(utterances)
168
+ if self.config.single_samples:
169
+ yield from utterances
170
+ merged_utterance = next(utterances)
171
+ start_time = merged_utterance.start_timestamp
172
+ while True:
173
+ try:
174
+ new_utterance = next(utterances)
175
+ except StopIteration:
176
+ yield merged_utterance
177
+ break
178
+ end_time = new_utterance.end_timestamp
179
+ if end_time - start_time > self.config.max_duration:
180
+ yield merged_utterance
181
+ merged_utterance = new_utterance
182
+ start_time = merged_utterance.start_timestamp
183
+ else:
184
+ merged_utterance = TEDX.merge_utterances(merged_utterance, new_utterance)
185
+
186
+
187
+
188
+ @staticmethod
189
+ def load_audio(file: str, sr: int = SAMPLING_RATE):
190
+ """
191
+ Open an audio file and read as mono waveform, resampling as necessary
192
+ Parameters
193
+ ----------
194
+ file:vThe audio file to read
195
+ sr: int
196
+ The sample rate to resample the audio if necessary
197
+ Returns
198
+ -------
199
+ A NumPy array containing the audio waveform, in float32 dtype.
200
+ """
201
+ try:
202
+ # This launches a subprocess to decode audio while down-mixing and resampling as necessary.
203
+ # Requires the ffmpeg CLI and `ffmpeg-python` package to be installed.
204
+ out, _ = (
205
+ ffmpeg.input(file)
206
+ .output('-', format='s16le', acodec='pcm_s16le', ac=1, ar=sr)
207
+ .run(capture_stdout=True, capture_stderr=True)
208
+ )
209
+ except ffmpeg.Error as e:
210
+ raise RuntimeError(f"Failed to load audio: {e.stderr.decode()}") from e
211
+ return np.frombuffer(out, np.int16).flatten().astype(np.float32) / 32768.0
212
+
213
+ @staticmethod
214
+ def _cut_audio(audio: Array, start_timestamp: float, end_timestamp: float):
215
+ return audio[int(round(start_timestamp * SAMPLING_RATE)): int(round(end_timestamp * SAMPLING_RATE)) + 1]
216
+
217
+ def _generate_examples(self, audios_path: List[str], utterances: List[List[Utterance]]):
218
+ """Generate examples from a Multilingual LibriSpeech data dir."""
219
+ for audio_path, utterances in zip(audios_path, utterances):
220
+ audio = self.load_audio(audio_path)
221
+ for utterance in self._merged_utterances_iterator(utterances):
222
+ transcript_name = f"{utterance.speaker_id}-{utterance.index}"
223
+ yield transcript_name, {
224
+ "file": transcript_name,
225
+ "index": utterance.index,
226
+ "sentence": utterance.sentence,
227
+ "start_timestamp": utterance.start_timestamp,
228
+ "end_timestamp": utterance.end_timestamp,
229
+ "speaker_id": utterance.speaker_id,
230
+ "audio": {"path": transcript_name,
231
+ "array": self._cut_audio(audio, utterance.start_timestamp, utterance.end_timestamp),
232
+ "sampling_rate": SAMPLING_RATE}}