gstaff commited on
Commit
6acf91d
β€’
1 Parent(s): 39cf431

Clear audio recording after submission, switch to distil-whisper model from transformers for speech to text.

Browse files
Files changed (2) hide show
  1. app.py +28 -7
  2. requirements.txt +2 -1
app.py CHANGED
@@ -2,7 +2,8 @@ import os
2
  from pathlib import Path
3
  import gradio as gr
4
  import re
5
- import whisper
 
6
  import requests
7
 
8
  HF_TOKEN = os.getenv("HF_TOKEN")
@@ -10,6 +11,29 @@ HF_TOKEN = os.getenv("HF_TOKEN")
10
  API_URL = "https://api-inference.huggingface.co/models/HuggingFaceH4/zephyr-7b-beta"
11
  headers = {"Authorization": f"Bearer {HF_TOKEN}"}
12
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
13
  code_pattern = r'```python\n(.*?)```'
14
 
15
  starting_app_code = """import gradio as gr
@@ -140,12 +164,9 @@ def generate_text(code, prompt):
140
  return assistant_reply, new_code
141
 
142
 
143
- model = whisper.load_model('medium')
144
-
145
-
146
  def transcribe(audio):
147
- result = model.transcribe(audio, language='en', verbose=False)
148
- return result["text"]
149
 
150
 
151
  def copy_notify(code):
@@ -173,7 +194,7 @@ with gr.Blocks() as demo:
173
  update_btn = gr.Button("Update App", variant="primary")
174
  update_btn.click(None, inputs=code_area, outputs=None, _js=update_iframe_js)
175
  in_prompt.submit(generate_text, [code_area, in_prompt], [out_text, code_area]).then(None, inputs=code_area, outputs=None, _js=update_iframe_js)
176
- in_audio.stop_recording(transcribe, [in_audio], [in_prompt]).then(generate_text, [code_area, in_prompt], [out_text, code_area]).then(None, inputs=code_area, outputs=None, _js=update_iframe_js)
177
  with gr.Row():
178
  with gr.Column():
179
  gr.Markdown("## 3. Export your app to share!")
 
2
  from pathlib import Path
3
  import gradio as gr
4
  import re
5
+ import torch
6
+ from transformers import AutoModelForSpeechSeq2Seq, AutoProcessor, pipeline
7
  import requests
8
 
9
  HF_TOKEN = os.getenv("HF_TOKEN")
 
11
  API_URL = "https://api-inference.huggingface.co/models/HuggingFaceH4/zephyr-7b-beta"
12
  headers = {"Authorization": f"Bearer {HF_TOKEN}"}
13
 
14
+ def init_speech_to_text_model():
15
+ device = "cuda:0" if torch.cuda.is_available() else "cpu"
16
+ torch_dtype = torch.float16 if torch.cuda.is_available() else torch.float32
17
+
18
+ model_id = "distil-whisper/distil-large-v2"
19
+ model = AutoModelForSpeechSeq2Seq.from_pretrained(
20
+ model_id, torch_dtype=torch_dtype, low_cpu_mem_usage=True, use_safetensors=True
21
+ )
22
+ model.to(device)
23
+ processor = AutoProcessor.from_pretrained(model_id)
24
+ return pipeline(
25
+ "automatic-speech-recognition",
26
+ model=model,
27
+ tokenizer=processor.tokenizer,
28
+ feature_extractor=processor.feature_extractor,
29
+ max_new_tokens=128,
30
+ torch_dtype=torch_dtype,
31
+ device=device,
32
+ )
33
+
34
+
35
+ whisper_pipe = init_speech_to_text_model()
36
+
37
  code_pattern = r'```python\n(.*?)```'
38
 
39
  starting_app_code = """import gradio as gr
 
164
  return assistant_reply, new_code
165
 
166
 
 
 
 
167
  def transcribe(audio):
168
+ result = whisper_pipe(audio)
169
+ return result["text"], None
170
 
171
 
172
  def copy_notify(code):
 
194
  update_btn = gr.Button("Update App", variant="primary")
195
  update_btn.click(None, inputs=code_area, outputs=None, _js=update_iframe_js)
196
  in_prompt.submit(generate_text, [code_area, in_prompt], [out_text, code_area]).then(None, inputs=code_area, outputs=None, _js=update_iframe_js)
197
+ in_audio.stop_recording(transcribe, [in_audio], [in_prompt, in_audio]).then(generate_text, [code_area, in_prompt], [out_text, code_area]).then(None, inputs=code_area, outputs=None, _js=update_iframe_js)
198
  with gr.Row():
199
  with gr.Column():
200
  gr.Markdown("## 3. Export your app to share!")
requirements.txt CHANGED
@@ -1 +1,2 @@
1
- openai-whisper
 
 
1
+ torch
2
+ transformers