ViWhisper-CT2 / app.py
unknown user
init
5282b42
import gradio as gr
from AinaTheme import theme
from faster_whisper import WhisperModel
import torch
device, torch_dtype = ("cuda", "float32") if torch.cuda.is_available() else ("cpu", "int8")
MODEL_NAME = "suzii/vi-whisper-large-v3-turbo-v1-ct2"
print("Loading model ...")
model = WhisperModel(MODEL_NAME, compute_type=torch_dtype)
print("Loading model done.")
def transcribe(inputs):
print("transcribe()")
if inputs is None:
raise gr.Error("Cap fitxer d'àudio introduit! Si us plau pengeu un fitxer "\
"o enregistreu un àudio abans d'enviar la vostra sol·licitud")
segments, _ = model.transcribe(
inputs,
chunk_length=30,
task="transcribe",
word_timestamps=True,
repetition_penalty=1.1,
temperature=[0.0, 0.1, 0.2, 0,3, 0.4, 0.6, 0.8, 1.0],
)
text = ""
for segment in segments:
text += " " + segment.text.strip()
return text
def clear():
return (None)
with gr.Blocks(theme=theme) as demo:
gr.Markdown("CPU - type: int8")
with gr.Row():
with gr.Column(scale=1):
input = gr.Audio(sources=["upload", "microphone"], type="filepath", label="Audio")
with gr.Column(scale=1):
output = gr.Textbox(label="Output", lines=8)
with gr.Row(variant="panel"):
clear_btn = gr.Button("Clear")
submit_btn = gr.Button("Submit", variant="primary")
submit_btn.click(fn=transcribe, inputs=[input], outputs=[output])
clear_btn.click(fn=clear,inputs=[], outputs=[input], queue=False,)
if __name__ == "__main__":
demo.launch()