Xhaheen commited on
Commit
7773805
1 Parent(s): d2ec801

Create new file

Browse files
Files changed (1) hide show
  1. app.py +36 -0
app.py ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import whisper
2
+ import gradio as gr
3
+
4
+ model = whisper.load_model("small")
5
+
6
+ def transcribe(audio):
7
+
8
+ #time.sleep(3)
9
+ # load audio and pad/trim it to fit 30 seconds
10
+ audio = whisper.load_audio(audio)
11
+ audio = whisper.pad_or_trim(audio)
12
+
13
+ # make log-Mel spectrogram and move to the same device as the model
14
+ mel = whisper.log_mel_spectrogram(audio).to(model.device)
15
+
16
+ # detect the spoken language
17
+ _, probs = model.detect_language(mel)
18
+ print(f"Detected language: {max(probs, key=probs.get)}")
19
+
20
+ # decode the audio
21
+ options = whisper.DecodingOptions(fp16 = False)
22
+ result = whisper.decode(model, mel, options)
23
+ return result.text
24
+
25
+
26
+
27
+ gr.Interface(
28
+ title = 'OpenAI Whisper ASR Gradio Web UI',
29
+ fn=transcribe,
30
+ inputs=[
31
+ gr.inputs.Audio(source="microphone", type="filepath")
32
+ ],
33
+ outputs=[
34
+ "textbox"
35
+ ],
36
+ live=True).launch()