fawadrashid's picture
Upload 3 files
c4f5e53 verified
import gradio as gr
from transformers import pipeline
import numpy as np
import librosa
transcriber = pipeline("automatic-speech-recognition", model="openai/whisper-base.en")
def convert_to_mono(y, sr):
# Check if the audio is already mono
if len(y.shape) == 1:
return y
# Convert stereo to mono
mono_y = librosa.to_mono(y.T)
return mono_y
def transcribe(stream, new_chunk):
sr, y = new_chunk
y = y.astype(np.float32)
y /= np.max(np.abs(y))
# Convert audio to mono if it is stereo
y = convert_to_mono(y, sr)
if stream is not None:
stream = np.concatenate([stream, y])
else:
stream = y
return stream, transcriber({"sampling_rate": sr, "raw": stream})["text"]
demo = gr.Interface(
transcribe,
["state", gr.Audio(sources=["microphone"], streaming=True)],
["state", "text"],
live=True,
)
demo.launch()