datasciencedojo commited on
Commit
13b9ce8
1 Parent(s): c572992

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +57 -0
app.py ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+
3
+ import gradio as gr
4
+ import pytube as pt
5
+ from transformers import pipeline
6
+ from huggingface_hub import model_info
7
+
8
+ MODEL_NAME = "openai/whisper-small" #this always needs to stay in line 8 :D sorry for the hackiness
9
+ lang = "en"
10
+
11
+ device = 0 if torch.cuda.is_available() else "cpu"
12
+ pipe = pipeline(
13
+ task="automatic-speech-recognition",
14
+ model=MODEL_NAME,
15
+ chunk_length_s=30,
16
+ device=device,
17
+ )
18
+
19
+ pipe.model.config.forced_decoder_ids = pipe.tokenizer.get_decoder_prompt_ids(language=lang, task="transcribe")
20
+
21
+ def transcribe(microphone, file_upload):
22
+ warn_output = ""
23
+ if (microphone is not None) and (file_upload is not None):
24
+ warn_output = (
25
+ "WARNING: You've uploaded an audio file and used the microphone. "
26
+ "The recorded file from the microphone will be used and the uploaded audio will be discarded.\n"
27
+ )
28
+
29
+ elif (microphone is None) and (file_upload is None):
30
+ return "ERROR: You have to either use the microphone or upload an audio file"
31
+
32
+ file = microphone if microphone is not None else file_upload
33
+
34
+ text = pipe(file)["text"]
35
+
36
+ return warn_output + text
37
+
38
+ demo = gr.Blocks()
39
+
40
+ examples = [
41
+ ['TestAudio1.mp3'], ['TestAudio2.wav'], ['TestAudio3.wav'], ['TestAudio4.wav'], ['TestAudio5.wav'], ['TestAudio6.wav'], ['TestAudio7.wav'], ['TestAudio8.wav'], ['TestAudio9.wav'], ['TestAudio10.wav']
42
+ ]
43
+
44
+ mf_transcribe = gr.Interface(
45
+ fn=transcribe,
46
+ inputs=[
47
+ gr.inputs.Audio(source="microphone", type="filepath", optional=True),
48
+ gr.inputs.Audio(source="upload", type="filepath", optional=True)
49
+ ],
50
+ outputs="text",
51
+ layout="horizontal",
52
+ theme="huggingface",
53
+ allow_flagging="never",
54
+ examples = examples
55
+ ).launch(enable_queue=True)
56
+
57
+ #used openai/whisper model