Spaces:
Sleeping
Sleeping
File size: 1,242 Bytes
c0c1d35 99f2b87 c0c1d35 62e6f5b f91092b b1927c9 c0c1d35 ecc74bd 3786422 c0c1d35 b1927c9 c0c1d35 f91092b 99f2b87 f91092b 99f2b87 f91092b c0c1d35 99f2b87 c0c1d35 b1927c9 c0c1d35 b1927c9 99f2b87 c0c1d35 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 |
import gradio as gr
from faster_whisper import WhisperModel
model_size = 'large-v3'
def load_model(model_size):
if torch.cuda.is_available():
model = WhisperModel(model_size, device="cuda", compute_type="float16")
# model = WhisperModel(model_size, device="cuda", compute_type="int8_float16")
else:
model = WhisperModel(model_size, device="cpu", compute_type="int8")
def speech_to_text(audio_file, _model_size):
global model_size, model
if model_size != _model_size:
model_size = _model_size
model = load_model(model_size)
with torch.no_grad():
segments, info = model.transcribe(
audio_file,
verbose=True,
language='japanese',
beam_size=5,
vad_filter=True,
without_timestamps=False,
)
text = ''
for segment in segments:
text += "{segment.start:.2f}\t{segment.end:.2f}\t{segment.text}\n"
load_model(model_size)
gr.Interface(
fn=speech_to_text,
inputs=[
gr.Audio(sources="upload", type="filepath"),
gr.Dropdown(value=model_size, choices=["tiny", "base", "small", "medium", "large", "large-v2", "large-v3"]),
],
outputs="text").launch()
|