Spaces:
Running
Running
File size: 4,893 Bytes
4f6613a |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 |
"""
Used to transcribe all audio files in one folder into another folder.
e.g.
Directory structure:
--pre_data_root
----SP_1
------01.wav
------02.wav
------......
----SP_2
------01.wav
------02.wav
------......
Use
python tools/whisper_asr.py --audio-dir pre_data_root/SP_1 --save-dir data/SP_1
to transcribe the first speaker.
Use
python tools/whisper_asr.py --audio-dir pre_data_root/SP_2 --save-dir data/SP_2
to transcribe the second speaker.
Note: Be aware of your audio sample rate, which defaults to 44.1kHz.
"""
import re
from pathlib import Path
import click
import soundfile as sf
from faster_whisper import WhisperModel
from loguru import logger
from pydub import AudioSegment
from tqdm import tqdm
from tools.file import AUDIO_EXTENSIONS, list_files
@click.command()
@click.option("--model-size", default="large-v3", help="Size of the Whisper model")
@click.option(
"--compute-type",
default="float16",
help="Computation Precision of the Whisper model [float16 / int8_float16 / int8]",
)
@click.option("--audio-dir", required=True, help="Directory containing audio files")
@click.option(
"--save-dir", required=True, help="Directory to save processed audio files"
)
@click.option(
"--sample-rate",
default=44100,
type=int,
help="Output sample rate, default to input sample rate",
)
@click.option("--device", default="cuda", help="Device to use [cuda / cpu]")
@click.option("--language", default="auto", help="Language of the transcription")
@click.option("--initial-prompt", default=None, help="Initial prompt for transcribing")
def main(
model_size,
compute_type,
audio_dir,
save_dir,
sample_rate,
device,
language,
initial_prompt,
):
logger.info("Loading / Downloading Faster Whisper model...")
model = WhisperModel(
model_size,
device=device,
compute_type=compute_type,
download_root="faster_whisper",
)
logger.info("Model loaded.")
save_path = Path(save_dir)
save_path.mkdir(parents=True, exist_ok=True)
audio_files = list_files(
path=audio_dir, extensions=AUDIO_EXTENSIONS, recursive=True
)
for file_path in tqdm(audio_files, desc="Processing audio file"):
file_stem = file_path.stem
file_suffix = file_path.suffix
rel_path = Path(file_path).relative_to(audio_dir)
(save_path / rel_path.parent).mkdir(parents=True, exist_ok=True)
audio = AudioSegment.from_file(file_path)
segments, info = model.transcribe(
file_path,
beam_size=5,
language=None if language == "auto" else language,
initial_prompt=initial_prompt,
)
print(
"Detected language '%s' with probability %f"
% (info.language, info.language_probability)
)
print("Total len(ms): ", len(audio))
whole_text = None
for segment in segments:
id, start, end, text = (
segment.id,
segment.start,
segment.end,
segment.text,
)
print("Segment %03d [%.2fs -> %.2fs] %s" % (id, start, end, text))
if not whole_text:
whole_text = text
else:
whole_text += ", " + text
whole_text += "."
audio_save_path = save_path / rel_path.parent / f"{file_stem}{file_suffix}"
audio.export(audio_save_path, format=file_suffix[1:])
print(f"Exported {audio_save_path}")
transcript_save_path = save_path / rel_path.parent / f"{file_stem}.lab"
with open(
transcript_save_path,
"w",
encoding="utf-8",
) as f:
f.write(whole_text)
if __name__ == "__main__":
main()
exit(0)
audio = AudioSegment.from_wav(
r"D:\PythonProject\原神语音中文\胡桃\vo_hutao_draw_appear.wav"
)
model_size = "large-v3"
model = WhisperModel(
model_size,
device="cuda",
compute_type="float16",
download_root="faster_whisper",
)
segments, info = model.transcribe(
r"D:\PythonProject\原神语音中文\胡桃\vo_hutao_draw_appear.wav",
beam_size=5,
)
print(
"Detected language '%s' with probability %f"
% (info.language, info.language_probability)
)
print("Total len(ms): ", len(audio))
for i, segment in enumerate(segments):
print(
"Segment %03d [%.2fs -> %.2fs] %s"
% (i, segment.start, segment.end, segment.text)
)
start_ms = int(segment.start * 1000)
end_ms = int(segment.end * 1000)
segment_audio = audio[start_ms:end_ms]
segment_audio.export(f"segment_{i:03d}.wav", format="wav")
print(f"Exported segment_{i:03d}.wav")
print("All segments have been exported.")
|