fffiloni commited on
Commit
4b4ce6b
β€’
1 Parent(s): ac77c22

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +348 -40
app.py CHANGED
@@ -1,6 +1,7 @@
1
  import gradio as gr
2
  import torch
3
  import whisper
 
4
  from PIL import Image
5
 
6
  import os
@@ -8,6 +9,12 @@ MY_SECRET_TOKEN=os.environ.get('HF_TOKEN_SD')
8
 
9
  from diffusers import StableDiffusionPipeline
10
 
 
 
 
 
 
 
11
  whisper_model = whisper.load_model("small")
12
 
13
  device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
@@ -15,55 +22,356 @@ device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
15
  pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4", use_auth_token=MY_SECRET_TOKEN)
16
  pipe.to(device)
17
 
18
- def get_transcribe(audio):
19
- audio = whisper.load_audio(audio)
20
- audio = whisper.pad_or_trim(audio)
21
 
22
- mel = whisper.log_mel_spectrogram(audio).to(whisper_model.device)
 
 
23
 
24
- _, probs = whisper_model.detect_language(mel)
25
 
26
- options = whisper.DecodingOptions(task="translate", fp16 = False)
27
- result = whisper.decode(whisper_model, mel, options)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
28
 
29
- print(result)
30
- print(result.text)
31
- return result.text
32
-
33
- def get_images(audio):
34
- prompt = get_transcribe(audio)
35
- #image = pipe(prompt, init_image=init_image)["sample"][0]
36
- images_list = pipe([prompt] * 2)
37
  images = []
38
  safe_image = Image.open(r"unsafe.png")
 
39
  for i, image in enumerate(images_list["sample"]):
40
  if(images_list["nsfw_content_detected"][i]):
41
  images.append(safe_image)
42
  else:
43
  images.append(image)
44
 
45
- return prompt, images
46
- #inputs
47
- audio = gr.Audio(label="Input Audio of an image description", show_label=True, source="microphone", type="filepath")
48
- #outputs
49
- translated_prompt = gr.Textbox(label="Translated audio", lines=6)
50
- gallery = gr.Gallery(label="Generated images", show_label=False, elem_id="gallery").style(grid=[1], height="auto")
51
- title="Whisper to Stable Diffusion"
52
- description="""
53
- <p style='text-align: center;'>
54
- This demo is running on CPU 🐒. Offered by Sylvain <a href='https://twitter.com/fffiloni' target='_blank'>@fffiloni</a> β€’ <img id='visitor-badge' alt='visitor badge' src='https://visitor-badge.glitch.me/badge?page_id=gradio-blocks.whisper-to-stable-diffusion' style='display: inline-block' /><br />
55
- Record an audio description of an image, stop recording, then hit the Submit button to get 2 images from Stable Diffusion.<br />
56
- Your audio will be translated to English through OpenAI's Whisper, then sent as a prompt to Stable Diffusion.
57
- Try it in French ! ;)<br />
58
- β€”
59
- </p>
60
- """
61
-
62
- article="""
63
- <p style='text-align: center;'>β€”<br />
64
- Whisper is a general-purpose speech recognition model. <br />
65
- It is trained on a large dataset of diverse audio and is also a multi-task model that can perform<br />multilingual speech recognition as well as speech translation and language identification.<br />
66
- Model by <a href="https://github.com/openai/whisper" style="text-decoration: underline;" target="_blank">OpenAI</a>
67
- </p>
68
- """
69
- gr.Interface(fn=get_images, inputs=audio, outputs=[translated_prompt, gallery], title=title, description=description, article=article).queue(max_size=1000).launch(enable_queue=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import gradio as gr
2
  import torch
3
  import whisper
4
+
5
  from PIL import Image
6
 
7
  import os
 
9
 
10
  from diffusers import StableDiffusionPipeline
11
 
12
+ ### β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”
13
+
14
+ title="Whisper to Stable Diffusion"
15
+
16
+ ### β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”
17
+
18
  whisper_model = whisper.load_model("small")
19
 
20
  device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
 
22
  pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4", use_auth_token=MY_SECRET_TOKEN)
23
  pipe.to(device)
24
 
25
+ ### β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”
26
+
27
+ def magic_whisper_to_sd(audio, guidance_scale, nb_iterations, seed):
28
 
29
+ whisper_results = translate(audio)
30
+ prompt = whisper_results[2]
31
+ images = diffuse(prompt, guidance_scale, nb_iterations, seed)
32
 
33
+ return whisper_results[0], whisper_results[1], whisper_results[2], images
34
 
35
+ def diffuse(prompt, guidance_scale, nb_iterations, seed):
36
+
37
+ generator = torch.Generator(device=device).manual_seed(int(seed))
38
+
39
+ print("Sending prompt to Stable Diffusion ... ")
40
+ print("prompt: " + prompt)
41
+ print("guidance scale: " + str(guidance_scale))
42
+ print("inference steps: " + str(nb_iterations))
43
+ print("seed: " + str(seed))
44
+
45
+ images_list = pipe(
46
+ [prompt] * 2,
47
+ guidance_scale=guidance_scale,
48
+ num_inference_steps=nb_iterations,
49
+ generator=generator
50
+ )
51
 
 
 
 
 
 
 
 
 
52
  images = []
53
  safe_image = Image.open(r"unsafe.png")
54
+
55
  for i, image in enumerate(images_list["sample"]):
56
  if(images_list["nsfw_content_detected"][i]):
57
  images.append(safe_image)
58
  else:
59
  images.append(image)
60
 
61
+ print("Stable Diffusion has finished")
62
+
63
+ return images
64
+
65
+ def translate(audio):
66
+ audio = whisper.load_audio(audio)
67
+ audio = whisper.pad_or_trim(audio)
68
+
69
+ mel = whisper.log_mel_spectrogram(audio).to(whisper_model.device)
70
+
71
+ _, probs = whisper_model.detect_language(mel)
72
+
73
+ transcript_options = whisper.DecodingOptions(task="transcribe", fp16 = False)
74
+ translate_options = whisper.DecodingOptions(task="translate", fp16 = False)
75
+
76
+ transcription = whisper.decode(whisper_model, mel, transcript_options)
77
+ translation = whisper.decode(whisper_model, mel, translate_options)
78
+
79
+ print("language spoken: " + transcription.language)
80
+ print(transcription.text)
81
+ print(translation.text)
82
+
83
+ return transcription.language, transcription.text, translation.text
84
+
85
+ ### β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”
86
+
87
+ css = """
88
+ .container {
89
+ max-width: 1280px;
90
+ margin: auto;
91
+ padding-top: 1.5rem;
92
+ }
93
+ a {
94
+ text-decoration: underline;
95
+ }
96
+ h1 {
97
+ font-weight: 900;
98
+ margin-bottom: 7px;
99
+ text-align: center;
100
+ font-size: 2em;
101
+ margin-bottom: 1em;
102
+ }
103
+ #w2sd_container{
104
+ margin-top: 20px;
105
+ }
106
+ .footer {
107
+ margin-bottom: 45px;
108
+ margin-top: 35px;
109
+ text-align: center;
110
+ border-bottom: 1px solid #e5e5e5;
111
+ }
112
+ .footer>p {
113
+ font-size: .8rem;
114
+ display: inline-block;
115
+ padding: 0 10px;
116
+ transform: translateY(10px);
117
+ background: white;
118
+ }
119
+ .dark .footer {
120
+ border-color: #303030;
121
+ }
122
+ .dark .footer>p {
123
+ background: #0b0f19;
124
+ }
125
+ .tabitem {
126
+ border-bottom-left-radius: 10px;
127
+ border-bottom-right-radius: 10px;
128
+ }
129
+ #record_tab, #upload_tab {
130
+ font-size: 1.2em;
131
+ }
132
+ #record_btn{
133
+
134
+ }
135
+ audio {
136
+ margin-bottom: 10px;
137
+ }
138
+ div#record_btn > .mt-6{
139
+ margin-top: 0!important;
140
+ }
141
+ div#record_btn > .mt-6 button {
142
+ font-size: 2em;
143
+ width: 100%;
144
+ padding: 20px;
145
+ height: 160px;
146
+ }
147
+ #spoken_lang{
148
+
149
+ }
150
+ div#spoken_lang textarea {
151
+ font-size: 4em;
152
+ line-height: 1em;
153
+ text-align: center;
154
+ }
155
+ div#transcripted {
156
+ flex: 4;
157
+ }
158
+ div#translated textarea {
159
+ font-size: 1.5em;
160
+ line-height: 1.25em;
161
+ }
162
+ #sd_settings {
163
+ margin-bottom: 20px;
164
+ }
165
+ #diffuse_btn {
166
+ font-size: 2em;
167
+ padding: 20px;
168
+ }
169
+ #notice {
170
+ padding: 20px 14px 10px;
171
+ display: flex;
172
+ align-content: space-evenly;
173
+ gap: 20px;
174
+ line-height: 1em;
175
+ font-size: .8em;
176
+ border: 1px solid #374151;
177
+ border-radius: 10px;
178
+ }
179
+ #about {
180
+ padding: 20px;
181
+ }
182
+ #notice > div {
183
+ flex: 1;
184
+ }
185
+
186
+ """
187
+
188
+ ### β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”
189
+
190
+ with gr.Blocks(css=css) as demo:
191
+ gr.HTML('''
192
+ <h1>
193
+ Whisper to Stable Diffusion
194
+ </h1>
195
+ <p style='text-align: center;'>
196
+ Ask stable diffusion for images by speaking (or singing πŸ€—) in your native language ! Try it in French πŸ˜‰
197
+ </p>
198
+
199
+ <p style='text-align: center;'>
200
+ This demo is running on 🐒 CPU. Offered by Sylvain <a href='https://twitter.com/fffiloni' target='_blank'>@fffiloni</a> β€’ <img id='visitor-badge' alt='visitor badge' src='https://visitor-badge.glitch.me/badge?page_id=gradio-blocks.whisper-to-stable-diffusion' style='display: inline-block' /><br />
201
+ β€”
202
+ </p>
203
+
204
+ ''')
205
+ with gr.Row(elem_id="w2sd_container"):
206
+ with gr.Column():
207
+
208
+ gr.Markdown(
209
+ """
210
+
211
+ ## 1. Record audio or Upload an audio file:
212
+ """
213
+ )
214
+
215
+ with gr.Tab(label="Record audio input", elem_id="record_tab"):
216
+ with gr.Column():
217
+ record_input = gr.Audio(
218
+ source="microphone",
219
+ type="filepath",
220
+ show_label=False,
221
+ elem_id="record_btn"
222
+ )
223
+ with gr.Row():
224
+ audio_r_translate = gr.Button("Check Whisper first ? πŸ‘")
225
+ audio_r_direct_sd = gr.Button("Magic Whisper β€”β€Ί SD right now ! 🀠")
226
+
227
+ with gr.Tab(label="Upload audio input", elem_id="upload_tab"):
228
+ with gr.Column():
229
+ upload_input = gr.Audio(source="upload", type="filepath", show_label=False)
230
+ with gr.Row():
231
+ audio_u_translate = gr.Button("Check Whisper first ?")
232
+ audio_u_direct_sd = gr.Button("Magic Whisper β€”β€Ί SD right now !")
233
+
234
+ with gr.Accordion(label="Stable Diffusion Settings", elem_id="sd_settings"):
235
+ with gr.Row():
236
+ guidance_scale = gr.Slider(2, 15, value = 7, label = 'Guidance Scale')
237
+ nb_iterations = gr.Slider(10, 50, value = 25, step = 1, label = 'Number of Iterations')
238
+ seed = gr.Slider(label = "Seed", minimum = 0, maximum = 2147483647, step = 1, randomize = True)
239
+
240
+ gr.Markdown(
241
+ """
242
+ ## 2. Check Whisper output, correct it if necessary:
243
+ """
244
+ )
245
+
246
+ with gr.Row():
247
+
248
+ transcripted_output = gr.Textbox(
249
+ label="Transcription in your detected spoken language",
250
+ lines=3,
251
+ elem_id="transcripted"
252
+ )
253
+ language_detected_output = gr.Textbox(label="Native language", elem_id="spoken_lang",lines=3)
254
+
255
+ with gr.Column():
256
+ translated_output = gr.Textbox(
257
+ label="Transcript translated in English by Whisper",
258
+ lines=4,
259
+ elem_id="translated"
260
+ )
261
+ diffuse_btn = gr.Button(value="OK, Diffuse this prompt !", elem_id="diffuse_btn")
262
+
263
+
264
+
265
+
266
+
267
+
268
+ with gr.Column():
269
+
270
+
271
+
272
+ gr.Markdown("""
273
+ ## 3. Wait for Stable Diffusion Results β˜•οΈ
274
+ Inference time is about ~5-10 minutes, when it's your turn 😬
275
+ """
276
+ )
277
+ sd_output = gr.Gallery().style(grid=2, height="auto")
278
+
279
+ gr.Markdown("""
280
+ ### πŸ“Œ About the models
281
+ <p style='font-size: 1em;line-height: 1.5em;'>
282
+ <strong>Whisper</strong> is a general-purpose speech recognition model.<br /><br />
283
+ It is trained on a large dataset of diverse audio and is also a multi-task model that can perform multilingual speech recognition as well as speech translation and language identification. <br />
284
+ β€”
285
+ </p>
286
+ <p style='font-size: 1em;line-height: 1.5em;'>
287
+ <strong>Stable Diffusion</strong> is a state of the art text-to-image model that generates images from text.
288
+ </p>
289
+ <div id="notice">
290
+ <div>
291
+ LICENSE
292
+ <p style='font-size: 0.8em;'>
293
+ The model is licensed with a <a href="https://huggingface.co/spaces/CompVis/stable-diffusion-license" target="_blank">CreativeML Open RAIL-M</a> license.</p>
294
+ <p style='font-size: 0.8em;'>
295
+ The authors claim no rights on the outputs you generate, you are free to use them and are accountable for their use which must not go against the provisions set in this license.</p>
296
+ <p style='font-size: 0.8em;'>
297
+ The license forbids you from sharing any content that violates any laws, produce any harm to a person, disseminate any personal information that would be meant for harm, spread misinformation and target vulnerable groups.</p>
298
+ <p style='font-size: 0.8em;'>
299
+ For the full list of restrictions please <a href="https://huggingface.co/spaces/CompVis/stable-diffusion-license" target="_blank" target="_blank">read the license</a>.
300
+ </p>
301
+ </div>
302
+ <div>
303
+ Biases and content acknowledgment
304
+ <p style='font-size: 0.8em;'>
305
+ Despite how impressive being able to turn text into image is, beware to the fact that this model may output content that reinforces or exacerbates societal biases, as well as realistic faces, pornography and violence.</p>
306
+ <p style='font-size: 0.8em;'>
307
+ The model was trained on the <a href="https://laion.ai/blog/laion-5b/" target="_blank">LAION-5B dataset</a>, which scraped non-curated image-text-pairs from the internet (the exception being the removal of illegal content) and is meant for research purposes.</p>
308
+ <p style='font-size: 0.8em;'> You can read more in the <a href="https://huggingface.co/CompVis/stable-diffusion-v1-4" target="_blank">model card</a>.
309
+ </p>
310
+ </div>
311
+ </div>
312
+
313
+ """, elem_id="about")
314
+
315
+ audio_r_translate.click(translate,
316
+ inputs = record_input,
317
+ outputs = [
318
+ language_detected_output,
319
+ transcripted_output,
320
+ translated_output
321
+ ])
322
+
323
+ audio_u_translate.click(translate,
324
+ inputs = upload_input,
325
+ outputs = [
326
+ language_detected_output,
327
+ transcripted_output,
328
+ translated_output
329
+ ])
330
+
331
+ audio_r_direct_sd.click(magic_whisper_to_sd,
332
+ inputs = [
333
+ record_input,
334
+ guidance_scale,
335
+ nb_iterations,
336
+ seed
337
+ ],
338
+ outputs = [
339
+ language_detected_output,
340
+ transcripted_output,
341
+ translated_output,
342
+ sd_output
343
+ ])
344
+
345
+ audio_u_direct_sd.click(magic_whisper_to_sd,
346
+ inputs = [
347
+ upload_input,
348
+ guidance_scale,
349
+ nb_iterations,
350
+ seed
351
+ ],
352
+ outputs = [
353
+ language_detected_output,
354
+ transcripted_output,
355
+ translated_output,
356
+ sd_output
357
+ ])
358
+
359
+ diffuse_btn.click(diffuse,
360
+ inputs = [
361
+ translated_output,
362
+ guidance_scale,
363
+ nb_iterations,
364
+ seed
365
+ ],
366
+ outputs = sd_output
367
+ )
368
+ gr.HTML('''
369
+ <div class="footer">
370
+ <p>Whisper by <a href="https://github.com/openai/whisper" target="_blank">OpenAI</a> - Stable Diffusion by <a href="https://huggingface.co/CompVis" target="_blank">CompVis</a> and <a href="https://huggingface.co/stabilityai" target="_blank">Stability AI</a>
371
+ </p>
372
+ </div>
373
+ ''')
374
+
375
+
376
+ if __name__ == "__main__":
377
+ demo.launch()