|
from io import BytesIO |
|
import multiprocessing as mp |
|
from dataclasses import dataclass |
|
import os |
|
from pathlib import Path |
|
import queue |
|
|
|
import pydub |
|
|
|
import soundfile as sf |
|
from tqdm import tqdm |
|
|
|
from metadata import MetadataItem, LockedMetadata |
|
from vad import remove_silence, get_vad_model_and_utils |
|
|
|
|
|
@dataclass |
|
class ProcessedFile: |
|
output: Path |
|
transcription: str |
|
speaker_id: str |
|
mic_id: str |
|
|
|
|
|
@dataclass |
|
class FileToProcess: |
|
input: Path |
|
input_txt: Path |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def pad_silence( |
|
input: Path | BytesIO, |
|
pad_length: int, |
|
format: str = 'wav', |
|
) -> Path | BytesIO: |
|
audio = pydub.AudioSegment.from_file(input, format=format) |
|
|
|
|
|
padded: pydub.AudioSegment = pydub.AudioSegment.silent(duration=pad_length) + audio + pydub.AudioSegment.silent(duration=pad_length) |
|
padded.export(input, format=format) |
|
|
|
return input |
|
|
|
|
|
def process_worker( |
|
work: mp.Queue, |
|
output: mp.Queue, |
|
) -> None: |
|
vad_models_and_utils = get_vad_model_and_utils(use_cuda=False, use_onnx=False) |
|
|
|
while work.qsize() > 0: |
|
try: |
|
nitem = work.get(timeout=1) |
|
except queue.Empty: |
|
break |
|
|
|
result = process_file( |
|
vad_models_and_utils=vad_models_and_utils, |
|
inp=nitem.input, |
|
inp_txt=nitem.input_txt, |
|
output_directory=Path('dataset'), |
|
pad_length=25, |
|
) |
|
|
|
output.put(result) |
|
|
|
print(f"Worker {mp.current_process().name} finished processing.") |
|
|
|
|
|
def process_file( |
|
vad_models_and_utils: tuple, |
|
inp: Path, |
|
inp_txt: Path, |
|
output_directory: Path, |
|
pad_length: int = 25, |
|
) -> ProcessedFile | None: |
|
output_fpath = output_directory / f"{inp.stem}.wav" |
|
|
|
if not inp.exists(): |
|
return None |
|
|
|
if not inp_txt.exists(): |
|
return None |
|
|
|
transcription = ( |
|
inp_txt |
|
.read_text() |
|
.strip() |
|
) |
|
|
|
speaker_id = inp.parent.name |
|
mic_id = inp.stem.split('_')[-1] |
|
|
|
audio_mem = BytesIO() |
|
|
|
|
|
audio: pydub.AudioSegment = pydub.AudioSegment.from_file(inp) |
|
audio.export(audio_mem, format='wav') |
|
audio_mem.seek(0) |
|
|
|
silent_audio_mem = BytesIO() |
|
|
|
|
|
|
|
|
|
|
|
_, _ = remove_silence( |
|
vad_models_and_utils, |
|
audio_path=audio_mem, |
|
out_path=silent_audio_mem, |
|
trim_just_beginning_and_end=True, |
|
format='wav', |
|
) |
|
|
|
silent_audio_mem.seek(0) |
|
|
|
|
|
output_audio = pad_silence(silent_audio_mem, pad_length) |
|
assert isinstance(output_audio, BytesIO), "Output audio should be a BytesIO object" |
|
|
|
|
|
with open(output_fpath, 'wb') as f: |
|
f.write(output_audio.getbuffer()) |
|
|
|
return ProcessedFile( |
|
output=output_fpath, |
|
transcription=transcription, |
|
speaker_id=speaker_id, |
|
mic_id=mic_id, |
|
) |
|
|
|
|
|
def main() -> None: |
|
txt = Path('txt') |
|
wav = Path('wav48_silence_trimmed') |
|
output_directory = Path('dataset') |
|
metadata_fpath = output_directory / 'metadata.csv' |
|
num_workers = os.cpu_count() or 1 |
|
|
|
|
|
mp.set_start_method("spawn", force=True) |
|
|
|
print(f"Using {num_workers} workers for processing") |
|
|
|
if not txt.exists() or not wav.exists(): |
|
raise ValueError("Input directories do not exist") |
|
|
|
if not output_directory.exists(): |
|
output_directory.mkdir(parents=True, exist_ok=True) |
|
|
|
|
|
metadata = LockedMetadata(key_field='id') |
|
|
|
if metadata_fpath.exists(): |
|
metadata = LockedMetadata.load(metadata_fpath, key_field='id') |
|
|
|
files_to_process: list[FileToProcess] = [] |
|
files = list(wav.glob('**/*.flac')) |
|
|
|
|
|
|
|
for file in files: |
|
stem = file.stem |
|
|
|
if stem in metadata: |
|
continue |
|
|
|
text = stem |
|
|
|
|
|
if stem.endswith('_mic1') or stem.endswith('_mic2'): |
|
text = stem[:-5] |
|
|
|
|
|
directory = file.parent.name |
|
input_txt = txt / directory / f"{text}.txt" |
|
|
|
files_to_process.append( |
|
FileToProcess( |
|
input=file, |
|
input_txt=input_txt, |
|
) |
|
) |
|
|
|
work_queue: mp.Queue[FileToProcess] = mp.Queue() |
|
output_queue: mp.Queue[ProcessedFile | None] = mp.Queue() |
|
|
|
|
|
for file in files_to_process: |
|
work_queue.put(file) |
|
|
|
|
|
|
|
|
|
get_vad_model_and_utils(use_cuda=False, use_onnx=False) |
|
|
|
processes = [ |
|
mp.Process( |
|
target=process_worker, |
|
args=(work_queue, output_queue), |
|
) |
|
for _ in range(num_workers) |
|
] |
|
|
|
|
|
results: list[ProcessedFile] = [] |
|
|
|
try: |
|
results: list[ProcessedFile] = [] |
|
|
|
for w in processes: |
|
w.start() |
|
|
|
for _ in tqdm(range(len(files_to_process)), desc="Processing files", unit="file"): |
|
result = output_queue.get() |
|
|
|
if result is None: |
|
continue |
|
|
|
results.append(result) |
|
|
|
|
|
for w in processes: |
|
w.join() |
|
finally: |
|
for result in results: |
|
metadata.add( |
|
MetadataItem( |
|
id=result.output.stem, |
|
text=result.transcription, |
|
speaker_id=result.speaker_id, |
|
file_name=result.output.name, |
|
mic_id=result.mic_id, |
|
) |
|
) |
|
|
|
metadata.save(metadata_fpath) |
|
|
|
|
|
if __name__ == '__main__': |
|
main() |
|
|