Spaces:
Sleeping
Sleeping
File size: 1,175 Bytes
c0c1d35 8c3ebd8 c0c1d35 8c3ebd8 659d1cf 02ad4ab 659d1cf 02ad4ab c0c1d35 ecc74bd 3786422 c0c1d35 8c3ebd8 c0c1d35 8c3ebd8 c0c1d35 8c3ebd8 c0c1d35 ecc74bd 8c3ebd8 c0c1d35 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 |
import gradio as gr
#import whisper
from faster_whisper import WhisperModel
model_size = 'aka7774/whisper-large-v3-ct2'
#model = whisper.load_model(model_size)
#model = WhisperModel(model_size, device="cuda", compute_type="float16")
# or run on GPU with INT8
# model = WhisperModel(model_size, device="cuda", compute_type="int8_float16")
# or run on CPU with INT8
model = WhisperModel(model_size, device="cpu", compute_type="int8")
def speech_to_text(audio_file, _model_size):
global model_size, model
if model_size != _model_size:
model_size = _model_size
#model = whisper.load_model(model_size)
model = WhisperModel(model_size, compute_type="float16")
#result = model.transcribe(audio_file)
segments, info = model.transcribe(audio_file, beam_size=5)
#return result["text"]
return "".join([segment.text for segment in segments])
gr.Interface(
fn=speech_to_text,
inputs=[
gr.Audio(source="upload", type="filepath"),
gr.Dropdown(value=model_size, choices=["tiny", "base", "small", "medium", "large", "large-v2", "large-v3", "aka7774/whisper-large-v3-ct2"]),
],
outputs="text").launch()
|