vctk / process.py
jspaulsen's picture
Create process.py
b9f67de verified
from io import BytesIO
import multiprocessing as mp
from dataclasses import dataclass
import os
from pathlib import Path
import queue
import pydub
# import noisereduce as nr
import soundfile as sf
from tqdm import tqdm
from metadata import MetadataItem, LockedMetadata
from vad import remove_silence, get_vad_model_and_utils
@dataclass
class ProcessedFile:
output: Path
transcription: str
speaker_id: str
mic_id: str
@dataclass
class FileToProcess:
input: Path
input_txt: Path
# def noise_reduce(
# input: Path,
# ) -> Path:
# waveform, sample_rate = sf.read(input)
# reduced_noise = nr.reduce_noise(y=waveform, sr=sample_rate, stationary=True, prop_decrease=0.8)
# sf.write(input, reduced_noise, sample_rate)
# return input
def pad_silence(
input: Path | BytesIO,
pad_length: int,
format: str = 'wav',
) -> Path | BytesIO:
audio = pydub.AudioSegment.from_file(input, format=format)
# Add silence padding to the start and end
padded: pydub.AudioSegment = pydub.AudioSegment.silent(duration=pad_length) + audio + pydub.AudioSegment.silent(duration=pad_length)
padded.export(input, format=format)
return input
def process_worker(
work: mp.Queue,
output: mp.Queue,
) -> None:
vad_models_and_utils = get_vad_model_and_utils(use_cuda=False, use_onnx=False)
while work.qsize() > 0:
try:
nitem = work.get(timeout=1)
except queue.Empty:
break
result = process_file(
vad_models_and_utils=vad_models_and_utils,
inp=nitem.input,
inp_txt=nitem.input_txt,
output_directory=Path('dataset'),
pad_length=25,
)
output.put(result)
print(f"Worker {mp.current_process().name} finished processing.")
def process_file(
vad_models_and_utils: tuple,
inp: Path,
inp_txt: Path,
output_directory: Path,
pad_length: int = 25,
) -> ProcessedFile | None:
output_fpath = output_directory / f"{inp.stem}.wav"
if not inp.exists():
return None
if not inp_txt.exists():
return None
transcription = (
inp_txt
.read_text()
.strip()
)
speaker_id = inp.parent.name
mic_id = inp.stem.split('_')[-1] # Assuming the mic_id is the last part of the stem
audio_mem = BytesIO()
# Convert file to wav
audio: pydub.AudioSegment = pydub.AudioSegment.from_file(inp)
audio.export(audio_mem, format='wav')
audio_mem.seek(0)
silent_audio_mem = BytesIO()
# Noise Reduction
# output_fpath = noise_reduce(output_fpath)
# Trim silence and remove leading/trailing silence
_, _ = remove_silence(
vad_models_and_utils,
audio_path=audio_mem,
out_path=silent_audio_mem,
trim_just_beginning_and_end=True,
format='wav',
)
silent_audio_mem.seek(0)
# Pad silence
output_audio = pad_silence(silent_audio_mem, pad_length)
assert isinstance(output_audio, BytesIO), "Output audio should be a BytesIO object"
# Actually save the processed audio to the output path
with open(output_fpath, 'wb') as f:
f.write(output_audio.getbuffer())
return ProcessedFile(
output=output_fpath,
transcription=transcription,
speaker_id=speaker_id,
mic_id=mic_id,
)
def main() -> None:
txt = Path('txt')
wav = Path('wav48_silence_trimmed')
output_directory = Path('dataset')
metadata_fpath = output_directory / 'metadata.csv'
num_workers = os.cpu_count() or 1
# num_workers = int(num_workers * 1.5) # Use 75% of available CPU cores
mp.set_start_method("spawn", force=True)
print(f"Using {num_workers} workers for processing")
if not txt.exists() or not wav.exists():
raise ValueError("Input directories do not exist")
if not output_directory.exists():
output_directory.mkdir(parents=True, exist_ok=True)
# file_name,text,mic_id
metadata = LockedMetadata(key_field='id')
if metadata_fpath.exists():
metadata = LockedMetadata.load(metadata_fpath, key_field='id')
files_to_process: list[FileToProcess] = []
files = list(wav.glob('**/*.flac'))
# stem maps to id
# if the stem of the
for file in files:
stem = file.stem
if stem in metadata:
continue
text = stem
# Remove the _mic1 or _mic2 suffix from the stem
if stem.endswith('_mic1') or stem.endswith('_mic2'):
text = stem[:-5]
# get the directory of the file
directory = file.parent.name
input_txt = txt / directory / f"{text}.txt"
files_to_process.append(
FileToProcess(
input=file,
input_txt=input_txt,
)
)
work_queue: mp.Queue[FileToProcess] = mp.Queue()
output_queue: mp.Queue[ProcessedFile | None] = mp.Queue()
# fill the work queue with files to process
for file in files_to_process:
work_queue.put(file)
# Before processing the files, ensure that the VAD model is downloaded.
# This will ensure that the model is available for processing.
get_vad_model_and_utils(use_cuda=False, use_onnx=False)
processes = [
mp.Process(
target=process_worker,
args=(work_queue, output_queue),
)
for _ in range(num_workers)
]
# Process each file.
results: list[ProcessedFile] = []
try:
results: list[ProcessedFile] = []
for w in processes:
w.start()
for _ in tqdm(range(len(files_to_process)), desc="Processing files", unit="file"):
result = output_queue.get()
if result is None:
continue
results.append(result)
# Wait for workers to finish
for w in processes:
w.join()
finally:
for result in results:
metadata.add(
MetadataItem(
id=result.output.stem,
text=result.transcription,
speaker_id=result.speaker_id,
file_name=result.output.name,
mic_id=result.mic_id,
)
)
metadata.save(metadata_fpath)
if __name__ == '__main__':
main()