GAS17 commited on
Commit
30dc1e1
1 Parent(s): 3ef67b0

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +76 -48
app.py CHANGED
@@ -1,55 +1,83 @@
 
 
 
 
1
  import gradio as gr
2
- from pytube import YouTube
3
  import whisper
 
 
 
 
4
 
5
- # define function for transcription
6
- def whisper_transcript(model_size, url, audio_file):
7
- if url:
8
- link = YouTube(url)
9
- source = link.streams.filter(only_audio=True)[0].download(filename="audio.mp4")
10
 
 
 
 
 
 
 
 
 
11
  else:
12
- source = audio_file
13
 
14
- if model_size.endswith(".en"):
15
- language = "english"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
16
 
17
- else:
18
- language = None
19
-
20
- options = whisper.DecodingOptions(without_timestamps=True)
21
-
22
- loaded_model = whisper.load_model(model_size)
23
- transcript = loaded_model.transcribe(source, language=language)
24
-
25
- return transcript["text"]
26
-
27
- # define Gradio app interface
28
- gradio_ui = gr.Interface(
29
- fn=whisper_transcript,
30
- title="Transcribe multi-lingual audio clips with Whisper",
31
- description="**How to use**: Select a model, paste in a Youtube link or upload an audio clip, then click submit. If your clip is **100% in English, select models ending in ‘.en’**. If the clip is in other languages, or a mix of languages, select models without ‘.en’",
32
- article="**Note**: The larger the model size selected or the longer the audio clip, the more time it would take to process the transcript.",
33
- inputs=[
34
- gr.Dropdown(
35
- label="Select Model",
36
- choices=[
37
- "tiny.en",
38
- "base.en",
39
- "small.en",
40
- "medium.en",
41
- "tiny",
42
- "base",
43
- "small",
44
- "medium",
45
- "large",
46
- ],
47
- value="base",
48
- ),
49
- gr.Textbox(label="Paste YouTube link here"),
50
- gr.Audio(label="Upload Audio File", source="upload", type="filepath"),
51
- ],
52
- outputs=gr.outputs.Textbox(label="Whisper Transcript"),
53
- )
54
-
55
- gradio_ui.queue().launch()
1
+ import os
2
+ os.system("pip install git+https://github.com/openai/whisper.git")
3
+
4
+
5
  import gradio as gr
 
6
  import whisper
7
+ import io
8
+ import os
9
+ import numpy as np
10
+ from datetime import datetime
11
 
12
+ import assets
 
 
 
 
13
 
14
+ def sendToWhisper(audio_record, audio_upload, task, models_selected, language_toggle, language_selected, without_timestamps):
15
+ results = []
16
+
17
+ audio = None
18
+ if audio_record is not None:
19
+ audio = audio_record
20
+ elif audio_upload is not None:
21
+ audio = audio_upload
22
  else:
23
+ return [["Invalid input"]*5]
24
 
25
+ audio = whisper.load_audio(audio)
26
+ audio = whisper.pad_or_trim(audio)
27
+
28
+ for model_name in models_selected:
29
+ start = datetime.now()
30
+ model = whisper.load_model(model_name)
31
+ mel = whisper.log_mel_spectrogram(audio).to(model.device)
32
+ options = whisper.DecodingOptions(fp16 = False, without_timestamps=without_timestamps, task=task)
33
+ if language_toggle:
34
+ options = whisper.DecodingOptions(fp16 = False, without_timestamps=without_timestamps, task=task, language=language_selected)
35
+ language = ""
36
+ prob = 0
37
+ if model_name in assets.lang_detect:
38
+ _, probs = model.detect_language(mel)
39
+ language = max(probs, key=probs.get)
40
+ prob = probs[language]
41
+ else:
42
+ language="en"
43
+ options = whisper.DecodingOptions(fp16 = False, without_timestamps=without_timestamps, task=task, language="en")
44
+ output_text = whisper.decode(model, mel, options)
45
+ results.append([model_name, output_text.text, language, str(prob), str((datetime.now() - start).total_seconds())])
46
+ return results
47
+
48
+ avail_models = whisper.available_models()
49
+
50
+
51
+
52
+ with gr.Blocks(css=assets.css) as demo:
53
+ gr.Markdown("This is a demo to use Open AI's Speech to Text (ASR) Model: Whisper. Learn more about the models here on [Github](https://github.com/openai/whisper/search?q=DecodingOptions&type=) FYI: The larger models take a lot longer to transcribe the text :)")
54
+ gr.Markdown("Here are sample audio files to try out: [Sample Audio](https://drive.google.com/drive/folders/1qYek06ZVeKr9f5Jf35eqi-9CnjNIp98u?usp=sharing)")
55
+ gr.Markdown("Built by:[@davidtsong](https://twitter.com/davidtsong)")
56
+
57
+ # with gr.Row():
58
+ with gr.Column():
59
+
60
+ # with gr.Column():
61
+ gr.Markdown("## Input")
62
+
63
+ with gr.Row():
64
+ audio_record = gr.Audio(source="microphone", label="Audio to transcribe", type="filepath",elem_id="audio_inputs")
65
+ audio_upload = gr.Audio(source="upload", type="filepath", interactive=True,elem_id="audio_inputs")
66
+
67
+ models_selected = gr.CheckboxGroup(avail_models, label="Models to use")
68
+ with gr.Accordion("Settings", open=False):
69
+ task = gr.Dropdown(["transcribe", "translate"], label="Task", value="transcribe")
70
+ language_toggle = gr.Dropdown(["Automatic", "Manual"], label="Language Selection", value="Automatic")
71
+ language_selected = gr.Dropdown(list(assets.LANGUAGES.keys()), label="Language")
72
+ without_timestamps = gr.Checkbox(label="Without timestamps",value=True)
73
+ submit = gr.Button(label="Run")
74
+
75
+ # with gr.Row():
76
+ # with gr.Column():
77
+ gr.Markdown("## Output")
78
+ output = gr.Dataframe(headers=["Model", "Text", "Language", "Language Confidence","Time(s)"], label="Results", wrap=True)
79
+
80
+ submit.click(fn=sendToWhisper, inputs=[audio_record, audio_upload, task, models_selected, language_toggle, language_selected, without_timestamps], outputs=output)
81
+
82
+ demo.launch()
83