import torch from transformers import pipeline import numpy as np pipe = pipeline( "automatic-speech-recognition", model="openai/whisper-base" ) def transcribe(audio): sr, y = audio # Pasamos el array de muestras a tipo NumPy de 32 bits y = y.astype(np.float32) y /= np.max(np.abs(y)) return pipe({"sampling_rate": sr, "raw": y})["text"] import gradio as gr demo = gr.Interface( transcribe, gr.Audio(sources=["microphone"]), "text", ) demo.launch(share=True)