azamat commited on
Commit
632d4bf
1 Parent(s): 2a0be35
Files changed (2) hide show
  1. app.py +36 -0
  2. requirements.txt +1 -0
app.py ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import whisper
2
+ import gradio as gr
3
+
4
+ model = whisper.load_model("tiny")
5
+
6
+ def transcribe(audio):
7
+
8
+ # load audio and pad/trim it to fit 30 seconds
9
+ audio = whisper.load_audio(audio)
10
+ audio = whisper.pad_or_trim(audio)
11
+
12
+ # make log-Mel spectrogram and move to the same device as the model
13
+ mel = whisper.log_mel_spectrogram(audio).to(model.device)
14
+
15
+ # detect the spoken language
16
+ _, probs = model.detect_language(mel)
17
+
18
+ print(f"Detected language: {max(probs, key=probs.get)}")
19
+
20
+ # decode the audio
21
+ options = whisper.DecodingOptions(fp16 = False, language="en")
22
+ result = whisper.decode(model, mel, options)
23
+ return result.text
24
+
25
+
26
+
27
+ gr.Interface(
28
+ title = 'EN Whisper ASR With Auto Punctuation',
29
+ fn=transcribe,
30
+ inputs=[
31
+ gr.inputs.Audio(source="microphone", type="filepath")
32
+ ],
33
+ outputs=[
34
+ "textbox"
35
+ ]
36
+ ).launch()
requirements.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ git+https://github.com/openai/whisper.git