Datasets:

Languages:
English
License:
sanchit-gandhi HF staff commited on
Commit
8e74f40
1 Parent(s): db6c031
Files changed (1) hide show
  1. tedlium.py +94 -345
tedlium.py CHANGED
@@ -11,220 +11,43 @@
11
  # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
  # See the License for the specific language governing permissions and
13
  # limitations under the License.
14
-
15
- """TED-LIUM speech recognition dataset."""
16
  import csv
17
- import os
18
- import re
19
- from collections import defaultdict
20
- from io import BytesIO
21
- from pathlib import Path
22
-
23
- import numpy as np
24
- import soundfile as sf
25
 
26
  import datasets
27
  from datasets.tasks import AutomaticSpeechRecognition
28
 
29
-
30
- _DL_URL = "https://huggingface.co/datasets/LIUM/tedlium/resolve/main/"
31
-
32
- _LICENSE = "licensed under Creative Commons BY-NC-ND 3.0 (http://creativecommons.org/licenses/by-nc-nd/3.0/deed.en)"
33
-
34
- _WHISPER_TRANSCRIPT_URL = "https://huggingface.co/datasets/distil-whisper/whisper_transcriptions_greedy/resolve/main/tedlium"
35
- _WHISPER_TRANSCRIPT_URLs = _WHISPER_TRANSCRIPT_URL + "/{split}-transcription.csv"
36
-
37
-
38
- class TedliumReleaseConfig(datasets.BuilderConfig):
39
- """BuilderConfig for a release of the TED-LIUM dataset."""
40
-
41
- def __init__(self, *, url, download_urls, split_paths, citation, **kwargs):
42
- super(TedliumReleaseConfig, self).__init__(version=datasets.Version("1.0.1"), **kwargs)
43
- self.url = url
44
- self.download_urls = download_urls
45
- # List of split, path pairs containing the relative path within the
46
- # extracted tarball to the data for each split.
47
- self.split_paths = split_paths
48
- self.citation = citation
49
 
50
 
51
- def _make_builder_configs():
52
- """Creates builder configs for all supported Tedlium dataset releases."""
53
- release1 = TedliumReleaseConfig(
54
- name="release1",
55
- description="""\
56
- The TED-LIUM corpus is English-language TED talks, with transcriptions,
57
- sampled at 16kHz. It contains about 118 hours of speech.
58
 
59
- This is the TED-LIUM corpus release 1,
60
- licensed under Creative Commons BY-NC-ND 3.0
61
- (http://creativecommons.org/licenses/by-nc-nd/3.0/deed.en).
62
- """,
63
- citation="""\
64
- @inproceedings{rousseau2012tedlium,
65
- title={TED-LIUM: an Automatic Speech Recognition dedicated corpus},
66
- author={Rousseau, Anthony and Del{\\'e}glise, Paul and Est{\\`e}ve, Yannick},
67
- booktitle={Conference on Language Resources and Evaluation (LREC)},
68
- pages={125--129},
69
- year={2012}
70
- }
71
- """,
72
- url="https://www.openslr.org/7/",
73
- download_urls={
74
- "train": [_DL_URL + os.path.join("TEDLIUM_release1", "train.tar.gz")],
75
- "validation": [_DL_URL + os.path.join("TEDLIUM_release1", "dev.tar.gz")],
76
- "test": [_DL_URL + os.path.join("TEDLIUM_release1", "test.tar.gz")],
77
- },
78
- split_paths=[
79
- (datasets.Split.TRAIN, "train"),
80
- (datasets.Split.VALIDATION, "dev"),
81
- (datasets.Split.TEST, "test"),
82
- ],
83
- )
84
-
85
- release2 = TedliumReleaseConfig(
86
- name="release2",
87
- description="""\
88
- This is the TED-LIUM corpus release 2,
89
- licensed under Creative Commons BY-NC-ND 3.0
90
- (http://creativecommons.org/licenses/by-nc-nd/3.0/deed.en).
91
-
92
- All talks and text are property of TED Conferences LLC.
93
-
94
- The TED-LIUM corpus was made from audio talks and their transcriptions
95
- available on the TED website. We have prepared and filtered these data
96
- in order to train acoustic models to participate to the International
97
- Workshop on Spoken Language Translation 2011 (the LIUM English/French
98
- SLT system reached the first rank in the SLT task).
99
-
100
- Contains 1495 talks and transcripts.
101
- """,
102
- citation="""\
103
- @inproceedings{rousseau2014tedlium2,
104
- title={Enhancing the {TED-LIUM} Corpus with Selected Data for Language Modeling and More {TED} Talks},
105
- author={Rousseau, Anthony and Del{\\'e}glise, Paul and Est{\\`e}ve, Yannick},
106
- booktitle={Conference on Language Resources and Evaluation (LREC)},
107
- year={2014}
108
- }
109
- """,
110
- url="https://www.openslr.org/19/",
111
- download_urls={
112
- "train": [
113
- _DL_URL + os.path.join("TEDLIUM_release2", "train_1.tar.gz"),
114
- _DL_URL + os.path.join("TEDLIUM_release2", "train_2.tar.gz"),
115
- ],
116
- "validation": [_DL_URL + os.path.join("TEDLIUM_release2", "dev.tar.gz")],
117
- "test": [_DL_URL + os.path.join("TEDLIUM_release2", "test.tar.gz")],
118
- },
119
- split_paths=[
120
- (datasets.Split.TRAIN, "train"),
121
- (datasets.Split.VALIDATION, "dev"),
122
- (datasets.Split.TEST, "test"),
123
- ],
124
- )
125
 
126
- release3 = TedliumReleaseConfig(
127
- name="release3",
128
- description="""\
129
- This is the TED-LIUM corpus release 3, licensed under Creative Commons
130
- BY-NC-ND 3.0. This is the 'legacy' version of the corpus, in which the dev and test datasets are the same as in
131
- TED-LIUM 2 (and TED-LIUM 1).
132
 
133
- All talks and text are property of TED Conferences LLC.
134
-
135
- This new TED-LIUM release was made through a collaboration between the
136
- Ubiqus company and the LIUM (University of Le Mans, France)
137
-
138
- Contents:
139
-
140
- - 2351 audio talks in NIST sphere format (SPH), including talks from
141
- TED-LIUM 2: be careful, same talks but not same audio files (only
142
- these audio file must be used with the TED-LIUM 3 STM files)
143
- - 452 hours of audio
144
- - 2351 aligned automatic transcripts in STM format
145
- - TEDLIUM 2 dev and test data: 19 TED talks in SPH format with
146
- corresponding manual transcriptions.
147
- - Dictionary with pronunciations (159848 entries), same file as the one
148
- included in TED-LIUM 2
149
- - Selected monolingual data for language modeling from WMT12 publicly
150
- available corpora: these files come from the TED-LIUM 2 release, but
151
- have been modified to get a tokenization more relevant for English
152
- language
153
-
154
- """,
155
- citation="""\
156
- @inproceedings{hernandez2018tedlium3,
157
- title={TED-LIUM 3: twice as much data and corpus repartition for experiments on speaker adaptation},
158
- author={Hernandez, Fran{\\c{c}}ois and Nguyen, Vincent and Ghannay, Sahar and Tomashenko, Natalia and Est{\\`e}ve, Yannick},
159
- booktitle={International Conference on Speech and Computer},
160
- pages={198--208},
161
- year={2018},
162
- organization={Springer}
163
- }
164
- """,
165
- url="https://www.openslr.org/51/",
166
- download_urls={
167
- "train": [
168
- _DL_URL + os.path.join("TEDLIUM_release3", "legacy", "train_1.tar.gz"),
169
- _DL_URL + os.path.join("TEDLIUM_release3", "legacy", "train_2.tar.gz"),
170
- ],
171
- "validation": [_DL_URL + os.path.join("TEDLIUM_release3", "legacy", "dev.tar.gz")],
172
- "test": [_DL_URL + os.path.join("TEDLIUM_release3", "legacy", "test.tar.gz")],
173
- },
174
- split_paths=[
175
- (datasets.Split.TRAIN, "train"),
176
- (datasets.Split.VALIDATION, "dev"),
177
- (datasets.Split.TEST, "test"),
178
- ],
179
- )
180
 
181
- release3_speaker_adaptation = TedliumReleaseConfig(
182
- name="release3-speaker-adaptation",
183
- description="""\
184
- This is the TED-LIUM corpus release 3, licensed under Creative Commons
185
- BY-NC-ND 3.0. This is the 'speaker adaptation' version of the corpus, specially designed for experiments on
186
- speaker adaptation.
187
-
188
- All talks and text are property of TED Conferences LLC.
189
 
190
- This new TED-LIUM release was made through a collaboration between the
191
- Ubiqus company and the LIUM (University of Le Mans, France)
192
- """,
193
- citation="""\
194
- @inproceedings{hernandez2018tedlium3,
195
- title={TED-LIUM 3: twice as much data and corpus repartition for experiments on speaker adaptation},
196
- author={Hernandez, Fran{\\c{c}}ois and Nguyen, Vincent and Ghannay, Sahar and Tomashenko, Natalia and Est{\\`e}ve, Yannick},
197
- booktitle={International Conference on Speech and Computer},
198
- pages={198--208},
199
- year={2018},
200
- organization={Springer}
201
- }
202
- """,
203
- url="https://www.openslr.org/51/",
204
- download_urls={
205
- "train": [
206
- _DL_URL + os.path.join("TEDLIUM_release3", "speaker-adaptation", "train_1.tar.gz"),
207
- _DL_URL + os.path.join("TEDLIUM_release3", "speaker-adaptation", "train_2.tar.gz"),
208
- ],
209
- "validation": [_DL_URL + os.path.join("TEDLIUM_release3", "speaker-adaptation", "dev.tar.gz")],
210
- "test": [_DL_URL + os.path.join("TEDLIUM_release3", "speaker-adaptation", "test.tar.gz")],
211
- },
212
- split_paths=[
213
- (datasets.Split.TRAIN, "train"),
214
- (datasets.Split.VALIDATION, "dev"),
215
- (datasets.Split.TEST, "test"),
216
- ],
217
- )
218
 
219
- return [release1, release2, release3, release3_speaker_adaptation]
220
 
 
221
 
222
- class TedLium(datasets.GeneratorBasedBuilder):
223
  """The TED-LIUM corpus is English-language TED talks, with transcriptions, sampled at 16kHz. It contains about 118 hours of speech."""
224
 
225
  VERSION = datasets.Version("1.1.0")
226
 
227
- BUILDER_CONFIGS = _make_builder_configs()
 
 
 
 
228
 
229
  def _info(self):
230
  features = datasets.Features(
@@ -239,170 +62,96 @@ class TedLium(datasets.GeneratorBasedBuilder):
239
  }
240
  )
241
  return datasets.DatasetInfo(
242
- description=self.config.description,
243
  features=features,
244
  supervised_keys=("audio", "text"),
245
- homepage=self.config.url,
246
  license=_LICENSE,
247
- citation=self.config.citation,
248
  task_templates=[AutomaticSpeechRecognition(audio_column="audio", transcription_column="text")],
249
  )
250
 
251
  def _split_generators(self, dl_manager):
252
- if self.config.name != "release3":
253
- raise ValueError("This dataset is only compatible with the `release3` config.")
254
 
255
- archive_path = dl_manager.download(self.config.download_urls)
256
- # (Optional) In non-streaming mode, we can extract the archive locally to have actual local audio files:
257
- local_extracted_archive = dl_manager.extract(archive_path) if not dl_manager.is_streaming else {}
258
 
259
- transcription_urls = {split: _WHISPER_TRANSCRIPT_URLs.format(split=split) for split in ["train", "validation", "test"]}
260
- transcript_archive_path = dl_manager.download(transcription_urls)
 
 
 
261
 
262
- splits = []
263
- for split, path in self.config.split_paths:
264
- kwargs = {
265
- "filepath": [dl_manager.iter_archive(sharded_path) for sharded_path in archive_path[split]],
266
- "local_extracted_archive": local_extracted_archive.get(split),
267
- "split_path": path,
268
- "whisper_transcript": transcript_archive_path[split if split != "dev" else "validation"]
269
- }
270
- splits.append(datasets.SplitGenerator(name=split, gen_kwargs=kwargs))
271
- return splits
272
 
273
- def _generate_examples(self, filepath, local_extracted_archive, split_path, whisper_transcript):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
274
  whisper_transcriptions = dict()
275
  with open(whisper_transcript, encoding="utf-8") as f:
276
  reader = csv.DictReader(f, delimiter=",")
277
  for line in reader:
278
  whisper_transcriptions[line["file_id"]] = line["whisper_transcript"]
279
 
280
- """Generate examples from a TED-LIUM stm file."""
281
- if local_extracted_archive:
282
- for local_archive in local_extracted_archive:
283
- # The stm directory houses the speaker and transcription information in .stm format
284
- split_dir = os.path.join(local_archive, split_path)
285
- stm_files = [os.path.join(split_dir, f) for f in os.listdir(split_dir) if f.endswith(".stm")]
286
- for file in stm_files:
287
- # the .sph speaker file almost always has the same file name as the .stm file
288
- speaker_file = Path(file).stem
289
- audio_file = os.path.join(split_dir, speaker_file + ".sph")
290
- segment, sampling_rate = sf.read(audio_file, dtype=np.int16)
291
- with open(file) as f:
292
- for line in f:
293
- line = line.strip()
294
- fn, channel, speaker, start, end, label, transcript = line.split(" ", 6)
295
- transcript = _maybe_trim_suffix(transcript)
296
- if speaker_file != fn:
297
- # handle the case where the stm file does not have the same file name as the transcript
298
- speaker_file = fn
299
- audio_file = os.path.join(split_dir, speaker_file + ".sph")
300
- segment, sampling_rate = sf.read(audio_file, dtype=np.int16)
301
- samples = _extract_audio_segment(segment, sampling_rate, float(start), float(end))
302
- key = "-".join([speaker, start, end, label])
303
- example = {
304
- "audio": {"path": audio_file, "array": samples, "sampling_rate": sampling_rate},
305
- "text": transcript,
306
- "speaker_id": speaker,
307
- "gender": _parse_gender(label),
308
- "file": audio_file,
309
- "id": key,
310
- "whisper_transcript": whisper_transcriptions.get(key, None)
311
- }
312
- yield key, example
313
-
314
- else:
315
- audio_data = {}
316
- transcripts = defaultdict(list)
317
- for file in filepath:
318
- for path, f in file:
319
- if path.endswith(".sph"):
320
- # get the speaker id
321
- fn = path.split("/")[-1].strip(".sph")
322
- # read the audio data from raw byte form and add key-value pair to dict
323
- audio_data[fn] = sf.read(BytesIO(f.read()), dtype=np.int16)
324
- elif path.endswith(".stm"):
325
- for line in f:
326
- if line:
327
- line = line.decode("utf-8").strip()
328
- fn, channel, speaker, start, end, label, transcript = line.split(" ", 6)
329
- transcript = _maybe_trim_suffix(transcript)
330
- audio_file = path.replace("stm", "sph")
331
- key = "-".join([speaker, start, end, label])
332
- # append metadata information to the dict of transcripts for the associated speaker
333
- transcripts[fn].append(
334
- {
335
- "text": transcript,
336
- "speaker_id": speaker,
337
- "gender": _parse_gender(label),
338
- "file": audio_file,
339
- "id": key,
340
- "start": start,
341
- "end": end,
342
- "channel": channel,
343
- "fn": fn,
344
- }
345
- )
346
-
347
- if audio_data and audio_data.keys() == transcripts.keys():
348
- for fn, speaker in transcripts.items():
349
- for transcript in speaker:
350
- segment, sampling_rate = audio_data[transcript["fn"]]
351
- samples = _extract_audio_segment(
352
- segment,
353
- sampling_rate,
354
- float(transcript["start"]),
355
- float(transcript["end"]),
356
- )
357
- audio = {"path": transcript["file"], "array": samples, "sampling_rate": sampling_rate}
358
- key = transcript["id"]
359
- transcript_text = transcript["text"]
360
- whisper_transcription = whisper_transcriptions.get(key, None) if transcript_text != "ignore_time_segment_in_scoring" else "ignore_time_segment_in_scoring"
361
- yield key, {
362
- "audio": audio,
363
- "text": transcript_text,
364
- "speaker_id": transcript["speaker_id"],
365
- "gender": transcript["gender"],
366
- "file": transcript["file"],
367
- "id": transcript["id"],
368
- "whisper_transcript": whisper_transcription
369
- }
370
-
371
- audio_data = {}
372
- transcripts = defaultdict(list)
373
-
374
-
375
- def _maybe_trim_suffix(transcript):
376
- # stm files for the TEDLIUM release 1 train split contain a key (enclosed in
377
- # parens) at the end.
378
- splits = transcript.rsplit(" ", 1)
379
- transcript = splits[0]
380
- if len(splits) > 1:
381
- suffix = splits[-1]
382
- if not suffix.startswith("("):
383
- transcript += " " + suffix
384
- return transcript
385
-
386
-
387
- def _extract_audio_segment(segment, sampling_rate, start_sec, end_sec):
388
- """Extracts segment of audio samples (as an ndarray) from the given segment."""
389
- # The dataset only contains mono audio.
390
- start_sample = int(start_sec * sampling_rate)
391
- end_sample = min(int(end_sec * sampling_rate), segment.shape[0])
392
- samples = segment[start_sample:end_sample]
393
- return samples
394
-
395
-
396
- def _parse_gender(label_str):
397
- """Parse gender string from STM "<label>" field."""
398
- gender = re.split(",|_", label_str)[-1][:-1]
399
- # Fix inconsistencies in the data.
400
- if not gender:
401
- gender = -1 # Missing label.
402
- elif gender == "<NA": # In TEDLIUM release 3 training data.
403
- gender = -1 # Missing label.
404
- elif gender == "F":
405
- gender = "female"
406
- elif gender == "M":
407
- gender = "male"
408
- return gender
 
11
  # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
  # See the License for the specific language governing permissions and
13
  # limitations under the License.
14
+ """The TEDLIUM dataset for automatic speech recognition."""
 
15
  import csv
 
 
 
 
 
 
 
 
16
 
17
  import datasets
18
  from datasets.tasks import AutomaticSpeechRecognition
19
 
20
+ from huggingface_hub import list_repo_files
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
21
 
22
 
23
+ import pyarrow.parquet as pq
24
+ import pyarrow as pa
 
 
 
 
 
25
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
26
 
27
+ _DESCRIPTION = """\
28
+ The TED-LIUM corpus is English-language TED talks, with transcriptions, sampled at 16kHz. It contains about 118 hours of speech.
29
+ """
 
 
 
30
 
31
+ _HOMEPAGE = "https://catalog.ldc.upenn.edu/LDC97S62"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
32
 
33
+ _LICENSE = "licensed under Creative Commons BY-NC-ND 3.0 (http://creativecommons.org/licenses/by-nc-nd/3.0/deed.en)"
 
 
 
 
 
 
 
34
 
35
+ _DATA_REPO_ID = "sanchit-gandhi/tedlium-data"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
36
 
37
+ _WHISPER_TRANSCRIPT_URL = "https://huggingface.co/datasets/distil-whisper/whisper_transcriptions_greedy/resolve/main/tedlium"
38
 
39
+ _WHISPER_TRANSCRIPT_URLs = _WHISPER_TRANSCRIPT_URL + "/{split}-transcription.csv"
40
 
41
+ class TedLium(datasets.ArrowBasedBuilder):
42
  """The TED-LIUM corpus is English-language TED talks, with transcriptions, sampled at 16kHz. It contains about 118 hours of speech."""
43
 
44
  VERSION = datasets.Version("1.1.0")
45
 
46
+ # This version of the dataset is hard-coded to work with release3 and release3 only.
47
+ DEFAULT_CONFIG_NAME = "release3"
48
+ BUILDER_CONFIGS = [
49
+ datasets.BuilderConfig(name="release3", version=VERSION, description=_DESCRIPTION),
50
+ ]
51
 
52
  def _info(self):
53
  features = datasets.Features(
 
62
  }
63
  )
64
  return datasets.DatasetInfo(
65
+ description=_DESCRIPTION,
66
  features=features,
67
  supervised_keys=("audio", "text"),
68
+ homepage=_HOMEPAGE,
69
  license=_LICENSE,
 
70
  task_templates=[AutomaticSpeechRecognition(audio_column="audio", transcription_column="text")],
71
  )
72
 
73
  def _split_generators(self, dl_manager):
74
+ data_repo_download = f"https://huggingface.co/datasets/{_DATA_REPO_ID}/resolve/main/"
75
+ all_files = list_repo_files(_DATA_REPO_ID, repo_type="dataset")
76
 
77
+ train_files = [file for file in all_files if file.startswith("data/train")]
78
+ validation_files = [file for file in all_files if file.startswith("data/validation")]
79
+ test_files = [file for file in all_files if file.startswith("data/test")]
80
 
81
+ split_to_ids = {
82
+ "train": train_files,
83
+ "validation": validation_files,
84
+ "test": test_files,
85
+ }
86
 
87
+ dl_urls = {}
88
+ for split, split_ids in split_to_ids.items():
89
+ dl_urls[split] = [data_repo_download + source_id for source_id in split_ids]
90
+ archive_paths = dl_manager.download(dl_urls)
 
 
 
 
 
 
91
 
92
+ local_extracted_archive_paths = (
93
+ dl_manager.extract(archive_paths)
94
+ if not dl_manager.is_streaming
95
+ else {split: [None] * len(archive_paths[split]) for split in split_to_ids}
96
+ )
97
+
98
+ transcription_urls = {split: _WHISPER_TRANSCRIPT_URLs.format(split=split.replace(".", "-")) for split in split_to_ids}
99
+ transcript_archive_path = dl_manager.download(transcription_urls)
100
+
101
+ train_split = [
102
+ datasets.SplitGenerator(
103
+ name=datasets.Split.TRAIN,
104
+ gen_kwargs={
105
+ "local_extracted_archive_paths": local_extracted_archive_paths["train"],
106
+ "archives": [dl_manager.iter_files(path) for path in archive_paths["train"]],
107
+ "whisper_transcript": transcript_archive_path["train"],
108
+ },
109
+ ),
110
+ ]
111
+ dev_split = [
112
+ datasets.SplitGenerator(
113
+ name=datasets.Split.VALIDATION,
114
+ gen_kwargs={
115
+ "local_extracted_archive_paths": local_extracted_archive_paths["validation"],
116
+ "archives": [dl_manager.iter_files(path) for path in archive_paths["validation"]],
117
+ "whisper_transcript": transcript_archive_path["validation"],
118
+ },
119
+ ),
120
+ ]
121
+ test_split = [
122
+ datasets.SplitGenerator(
123
+ name=datasets.Split.TEST,
124
+ gen_kwargs={
125
+ "local_extracted_archive_paths": local_extracted_archive_paths["test"],
126
+ "archives": [dl_manager.iter_files(path) for path in archive_paths["test"]],
127
+ "whisper_transcript": transcript_archive_path["test"],
128
+ },
129
+ ),
130
+ ]
131
+ return train_split + dev_split + test_split
132
+
133
+ def _generate_tables(self, local_extracted_archive_paths, archives, whisper_transcript):
134
  whisper_transcriptions = dict()
135
  with open(whisper_transcript, encoding="utf-8") as f:
136
  reader = csv.DictReader(f, delimiter=",")
137
  for line in reader:
138
  whisper_transcriptions[line["file_id"]] = line["whisper_transcript"]
139
 
140
+ idx = 0
141
+ for local_extracted_archive_path, archive in zip(local_extracted_archive_paths, archives):
142
+ # Here we iterate over all the files within the TAR archive:
143
+ for audio_file in archive:
144
+ with open(audio_file, "rb") as f:
145
+ pf = pq.ParquetFile(f)
146
+ for record_batch in pf.iter_batches():
147
+ pa_table = pa.Table.from_batches([record_batch])
148
+
149
+ batch_whisper_transcript = []
150
+ for text, file_id in zip(pa_table["text"], pa_table["id"]):
151
+ transcription = whisper_transcriptions.get(str(file_id), None)
152
+ batch_whisper_transcript.append(transcription if str(text) != "ignore_time_segment_in_scoring" else "ignore_time_segment_in_scoring")
153
+
154
+ batch_whisper_transcript = pa.array(batch_whisper_transcript, pa.string())
155
+ pa_table = pa_table.append_column("whisper_transcript", batch_whisper_transcript)
156
+ yield idx, pa_table
157
+ idx += 1