import os import numpy as np import gradio as gr #try: # import tensorflow # required in Colab to avoid protobuf compatibility issues #except ImportError: # pass #import torch #import pandas as pd import whisper #import torchaudio #from tqdm.notebook import tqdm #DEVICE = "cuda" if torch.cuda.is_available() else "CPU" model = whisper.load_model("base.en") def fun(audio): result = model.transcribe(audio) return result["text"] # predict without timestamps for short-form transcription #options = whisper.DecodingOptions(language="en", without_timestamps=True) #for mels, texts in tqdm(loader): # results = model.decode(mels, options) # hypotheses.extend([result.text for result in results]) # references.extend(texts) gr.Interface( title = 'Testing Whisper', fn=fun, inputs=[ gr.inputs.Audio(source="microphone", streaming = "True" ) #,type="filepath") ], outputs=[ "textbox" ], live=True).launch()