sanchit-gandhi HF staff commited on
Commit
d03edfa
1 Parent(s): 5cda646

Swap to HF diffusers

Browse files
Files changed (1) hide show
  1. app.py +196 -228
app.py CHANGED
@@ -1,197 +1,139 @@
1
  import gradio as gr
2
- import numpy as np
3
- from audioldm import text_to_audio, build_model
4
  from share_btn import community_icon_html, loading_icon_html, share_js
5
 
6
- model_id="haoheliu/AudioLDM-S-Full"
7
 
8
- audioldm = None
9
- current_model_name = None
10
 
11
- # def predict(input, history=[]):
12
- # # tokenize the new input sentence
13
- # new_user_input_ids = tokenizer.encode(input + tokenizer.eos_token, return_tensors='pt')
 
 
 
 
14
 
15
- # # append the new user input tokens to the chat history
16
- # bot_input_ids = torch.cat([torch.LongTensor(history), new_user_input_ids], dim=-1)
 
 
17
 
18
- # # generate a response
19
- # history = model.generate(bot_input_ids, max_length=1000, pad_token_id=tokenizer.eos_token_id).tolist()
 
20
 
21
- # # convert the tokens to text, and then split the responses into lines
22
- # response = tokenizer.decode(history[0]).split("<|endoftext|>")
23
- # response = [(response[i], response[i+1]) for i in range(0, len(response)-1, 2)] # convert to tuples of list
24
- # return response, history
25
-
26
- def text2audio(text, duration, guidance_scale, random_seed, n_candidates, model_name="audioldm-m-text-ft"):
27
- global audioldm, current_model_name
28
-
29
- if audioldm is None or model_name != current_model_name:
30
- audioldm=build_model(model_name=model_name)
31
- current_model_name = model_name
32
-
33
- # print(text, length, guidance_scale)
34
- waveform = text_to_audio(
35
- latent_diffusion=audioldm,
36
- text=text,
37
- seed=random_seed,
38
- duration=duration,
39
  guidance_scale=guidance_scale,
40
- n_candidate_gen_per_text=int(n_candidates),
41
- ) # [bs, 1, samples]
42
- waveform = [
43
- gr.make_waveform((16000, wave[0]), bg_image="bg.png") for wave in waveform
44
- ]
45
- # waveform = [(16000, np.random.randn(16000)), (16000, np.random.randn(16000))]
46
- if(len(waveform) == 1):
47
- waveform = waveform[0]
48
- return waveform
 
 
 
49
 
50
- # iface = gr.Interface(fn=text2audio, inputs=[
51
- # gr.Textbox(value="A man is speaking in a huge room", max_lines=1),
52
- # gr.Slider(2.5, 10, value=5, step=2.5),
53
- # gr.Slider(0, 5, value=2.5, step=0.5),
54
- # gr.Number(value=42)
55
- # ], outputs=[gr.Audio(label="Output", type="numpy"), gr.Audio(label="Output", type="numpy")],
56
- # allow_flagging="never"
57
- # )
58
- # iface.launch(share=True)
59
 
60
 
61
  css = """
62
  a {
63
- color: inherit;
64
- text-decoration: underline;
65
- }
66
- .gradio-container {
67
  font-family: 'IBM Plex Sans', sans-serif;
68
- }
69
- .gr-button {
70
- color: white;
71
- border-color: #000000;
72
- background: #000000;
73
- }
74
- input[type='range'] {
75
  accent-color: #000000;
76
- }
77
- .dark input[type='range'] {
78
  accent-color: #dfdfdf;
79
- }
80
- .container {
81
- max-width: 730px;
82
- margin: auto;
83
- padding-top: 1.5rem;
84
- }
85
- #gallery {
86
- min-height: 22rem;
87
- margin-bottom: 15px;
88
- margin-left: auto;
89
- margin-right: auto;
90
- border-bottom-right-radius: .5rem !important;
91
- border-bottom-left-radius: .5rem !important;
92
- }
93
- #gallery>div>.h-full {
94
  min-height: 20rem;
95
- }
96
- .details:hover {
97
  text-decoration: underline;
98
- }
99
- .gr-button {
100
  white-space: nowrap;
101
- }
102
- .gr-button:focus {
103
- border-color: rgb(147 197 253 / var(--tw-border-opacity));
104
- outline: none;
105
- box-shadow: var(--tw-ring-offset-shadow), var(--tw-ring-shadow), var(--tw-shadow, 0 0 #0000);
106
- --tw-border-opacity: 1;
107
- --tw-ring-offset-shadow: var(--tw-ring-inset) 0 0 0 var(--tw-ring-offset-width) var(--tw-ring-offset-color);
108
- --tw-ring-shadow: var(--tw-ring-inset) 0 0 0 calc(3px var(--tw-ring-offset-width)) var(--tw-ring-color);
109
- --tw-ring-color: rgb(191 219 254 / var(--tw-ring-opacity));
110
- --tw-ring-opacity: .5;
111
- }
112
- #advanced-btn {
113
- font-size: .7rem !important;
114
- line-height: 19px;
115
- margin-top: 12px;
116
- margin-bottom: 12px;
117
- padding: 2px 8px;
118
  border-radius: 14px !important;
119
- }
120
- #advanced-options {
121
  margin-bottom: 20px;
122
- }
123
- .footer {
124
- margin-bottom: 45px;
125
- margin-top: 35px;
126
- text-align: center;
127
- border-bottom: 1px solid #e5e5e5;
128
- }
129
- .footer>p {
130
- font-size: .8rem;
131
- display: inline-block;
132
- padding: 0 10px;
133
- transform: translateY(10px);
134
- background: white;
135
- }
136
- .dark .footer {
137
  border-color: #303030;
138
- }
139
- .dark .footer>p {
140
  background: #0b0f19;
141
- }
142
- .acknowledgments h4{
143
- margin: 1.25em 0 .25em 0;
144
- font-weight: bold;
145
- font-size: 115%;
146
- }
147
- #container-advanced-btns{
148
- display: flex;
149
- flex-wrap: wrap;
150
- justify-content: space-between;
151
- align-items: center;
152
- }
153
- .animate-spin {
154
  animation: spin 1s linear infinite;
155
- }
156
- @keyframes spin {
157
  from {
158
  transform: rotate(0deg);
159
- }
160
- to {
161
  transform: rotate(360deg);
162
  }
163
- }
164
- #share-btn-container {
165
- display: flex; padding-left: 0.5rem !important; padding-right: 0.5rem !important; background-color: #000000; justify-content: center; align-items: center; border-radius: 9999px !important; width: 13rem;
166
- margin-top: 10px;
167
- margin-left: auto;
168
- }
169
- #share-btn {
170
- all: initial; color: #ffffff;font-weight: 600; cursor:pointer; font-family: 'IBM Plex Sans', sans-serif; margin-left: 0.5rem !important; padding-top: 0.25rem !important; padding-bottom: 0.25rem !important;right:0;
171
- }
172
- #share-btn * {
173
  all: unset;
174
- }
175
- #share-btn-container div:nth-child(-n+2){
176
- width: auto !important;
177
- min-height: 0px !important;
178
- }
179
- #share-btn-container .wrap {
180
  display: none !important;
181
- }
182
- .gr-form{
183
  flex: 1 1 50%; border-top-right-radius: 0; border-bottom-right-radius: 0;
184
- }
185
- #prompt-container{
186
  gap: 0;
187
- }
188
- #generated_id{
189
  min-height: 700px
190
- }
191
- #setting_id{
192
- margin-bottom: 12px;
193
- text-align: center;
194
- font-weight: 900;
195
  }
196
  """
197
  iface = gr.Blocks(css=css)
@@ -202,56 +144,72 @@ with iface:
202
  <div style="text-align: center; max-width: 700px; margin: 0 auto;">
203
  <div
204
  style="
205
- display: inline-flex;
206
- align-items: center;
207
- gap: 0.8rem;
208
- font-size: 1.75rem;
209
  "
210
  >
211
  <h1 style="font-weight: 900; margin-bottom: 7px; line-height: normal;">
212
  AudioLDM: Text-to-Audio Generation with Latent Diffusion Models
213
  </h1>
214
- </div>
215
- <p style="margin-bottom: 10px; font-size: 94%">
216
- <a href="https://arxiv.org/abs/2301.12503">[Paper]</a> <a href="https://audioldm.github.io/">[Project page]</a>
 
217
  </p>
218
  </div>
219
  """
220
  )
221
- gr.HTML("""
222
- <h1 style="font-weight: 900; margin-bottom: 7px;">
223
- AudioLDM: Text-to-Audio Generation with Latent Diffusion Models
224
- </h1>
225
- <p>For faster inference without waiting in queue, you may duplicate the space and upgrade to GPU in settings.
226
- <br/>
227
- <a href="https://huggingface.co/spaces/haoheliu/audioldm-text-to-audio-generation?duplicate=true">
228
- <img style="margin-top: 0em; margin-bottom: 0em" src="https://bit.ly/3gLdBN6" alt="Duplicate Space"></a>
229
- <p/>
230
- """)
231
  with gr.Group():
232
  with gr.Box():
233
- ############# Input
234
- textbox = gr.Textbox(value="A hammer is hitting a wooden surface", max_lines=1, label="Input your text here. Your text is important for the audio quality. Please ensure it is descriptive by using more adjectives.", elem_id="prompt-in")
 
 
 
 
 
 
 
 
 
 
 
 
235
 
236
  with gr.Accordion("Click to modify detailed configurations", open=False):
237
- seed = gr.Number(value=45, label="Change this value (any integer number) will lead to a different generation result.")
238
- duration = gr.Slider(2.5, 10, value=5, step=2.5, label="Duration (seconds)")
239
- guidance_scale = gr.Slider(0, 4, value=2.5, step=0.5, label="Guidance scale (Large => better quality and relavancy to text; Small => better diversity)")
240
- n_candidates = gr.Slider(1, 3, value=3, step=1, label="Automatic quality control. This number control the number of candidates (e.g., generate three audios and choose the best to show you). A Larger value usually lead to better quality with heavier computation")
241
- # model_name = gr.Dropdown(
242
- # ["audioldm-m-text-ft", "audioldm-s-text-ft", "audioldm-m-full","audioldm-s-full-v2", "audioldm-s-full", "audioldm-l-full"], value="audioldm-m-full", label="Choose the model to use. audioldm-m-text-ft and audioldm-s-text-ft are recommanded. -s- means small, -m- means medium and -l- means large",
243
- # )
244
- ############# Output
245
- # outputs=gr.Audio(label="Output", type="numpy")
246
- outputs=gr.Video(label="Output", elem_id="output-video")
247
-
248
- # with gr.Group(elem_id="container-advanced-btns"):
249
- # # advanced_button = gr.Button("Advanced options", elem_id="advanced-btn")
250
- # with gr.Group(elem_id="share-btn-container"):
251
- # community_icon = gr.HTML(community_icon_html, visible=False)
252
- # loading_icon = gr.HTML(loading_icon_html, visible=False)
253
- # share_button = gr.Button("Share to community", elem_id="share-btn", visible=False)
254
- # outputs=[gr.Audio(label="Output", type="numpy"), gr.Audio(label="Output", type="numpy")]
 
 
 
 
 
 
255
  btn = gr.Button("Submit").style(full_width=True)
256
 
257
  with gr.Group(elem_id="share-btn-container", visible=False):
@@ -259,51 +217,61 @@ with iface:
259
  loading_icon = gr.HTML(loading_icon_html)
260
  share_button = gr.Button("Share to community", elem_id="share-btn")
261
 
262
- # btn.click(text2audio, inputs=[
263
- # textbox, duration, guidance_scale, seed, n_candidates, model_name], outputs=[outputs])
264
- btn.click(text2audio, inputs=[
265
- textbox, duration, guidance_scale, seed, n_candidates], outputs=[outputs])
266
-
 
267
  share_button.click(None, [], [], _js=share_js)
268
- gr.HTML('''
 
269
  <div class="footer" style="text-align: center; max-width: 700px; margin: 0 auto;">
270
- <p>Follow the latest update of AudioLDM on our<a href="https://github.com/haoheliu/AudioLDM" style="text-decoration: underline;" target="_blank"> Github repo</a>
271
- </p>
272
- <br>
273
- <p>Model by <a href="https://twitter.com/LiuHaohe" style="text-decoration: underline;" target="_blank">Haohe Liu</a></p>
274
- <br>
275
  </div>
276
- ''')
277
- gr.Examples([
278
- ["A hammer is hitting a wooden surface", 5, 2.5, 45, 3, "audioldm-m-full"],
279
- ["Peaceful and calming ambient music with singing bowl and other instruments.", 5, 2.5, 45, 3, "audioldm-m-full"],
280
- ["A man is speaking in a small room.", 5, 2.5, 45, 3, "audioldm-m-full"],
281
- ["A female is speaking followed by footstep sound", 5, 2.5, 45, 3, "audioldm-m-full"],
282
- ["Wooden table tapping sound followed by water pouring sound.", 5, 2.5, 45, 3, "audioldm-m-full"],
283
- ],
 
 
284
  fn=text2audio,
285
- # inputs=[textbox, duration, guidance_scale, seed, n_candidates, model_name],
286
- inputs=[textbox, duration, guidance_scale, seed, n_candidates],
287
  outputs=[outputs],
288
  cache_examples=True,
289
  )
290
- gr.HTML('''
291
- <div class="acknowledgements">
292
- <p>Essential Tricks for Enhancing the Quality of Your Generated Audio</p>
293
- <p>1. Try to use more adjectives to describe your sound. For example: "A man is speaking clearly and slowly in a large room" is better than "A man is speaking". This can make sure AudioLDM understands what you want.</p>
294
- <p>2. Try to use different random seeds, which can affect the generation quality significantly sometimes.</p>
295
- <p>3. It's better to use general terms like 'man' or 'woman' instead of specific names for individuals or abstract objects that humans may not be familiar with, such as 'mummy'.</p>
296
- </div>
297
- ''')
 
 
 
 
298
  with gr.Accordion("Additional information", open=False):
299
  gr.HTML(
300
  """
301
  <div class="acknowledgments">
302
- <p> We build the model with data from <a href="http://research.google.com/audioset/">AudioSet</a>, <a href="https://freesound.org/">Freesound</a> and <a href="https://sound-effects.bbcrewind.co.uk/">BBC Sound Effect library</a>. We share this demo based on the <a href="https://assets.publishing.service.gov.uk/government/uploads/system/uploads/attachment_data/file/375954/Research.pdf">UK copyright exception</a> of data for academic research. </p>
 
 
 
 
 
303
  </div>
304
  """
305
  )
306
  # <p>This demo is strictly for research demo purpose only. For commercial use please <a href="haoheliu@gmail.com">contact us</a>.</p>
307
 
308
  iface.queue(max_size=10).launch(debug=True)
309
- # iface.launch(debug=True, share=True)
 
1
  import gradio as gr
2
+ import torch
3
+ from diffusers import AudioLDMPipeline
4
  from share_btn import community_icon_html, loading_icon_html, share_js
5
 
6
+ from transformers import AutoProcessor, ClapModel
7
 
 
 
8
 
9
+ # make Space compatible with CPU duplicates
10
+ if torch.cuda.is_available():
11
+ device = "cuda"
12
+ torch_dtype = torch.float16
13
+ else:
14
+ device = "cpu"
15
+ torch_dtype = torch.float32
16
 
17
+ # load the diffusers pipeline
18
+ repo_id = "cvssp/audioldm-m-full"
19
+ pipe = AudioLDMPipeline.from_pretrained(repo_id, torch_dtype=torch_dtype).to(device)
20
+ pipe.unet = torch.compile(pipe.unet)
21
 
22
+ # CLAP model (only required for automatic scoring)
23
+ clap_model = ClapModel.from_pretrained("sanchit-gandhi/clap-htsat-unfused-m-full").to(device)
24
+ processor = AutoProcessor.from_pretrained("sanchit-gandhi/clap-htsat-unfused-m-full")
25
 
26
+ generator = torch.Generator(device)
27
+
28
+
29
+ def text2audio(text, negative_prompt, duration, guidance_scale, random_seed, n_candidates):
30
+ if text is None:
31
+ raise gr.Error("Please provide a text input.")
32
+
33
+ waveforms = pipe(
34
+ text,
35
+ audio_length_in_s=duration,
 
 
 
 
 
 
 
 
36
  guidance_scale=guidance_scale,
37
+ negative_prompt=negative_prompt,
38
+ num_waveforms_per_prompt=n_candidates if n_candidates else 1,
39
+ generator=generator.manual_seed(int(random_seed)),
40
+ )["audios"]
41
+
42
+ if waveforms.shape[0] > 1:
43
+ waveform = score_waveforms(text, waveforms)
44
+ else:
45
+ waveform = waveforms[0]
46
+
47
+ return gr.make_waveform((16000, waveform), bg_image="bg.png")
48
+
49
 
50
+ def score_waveforms(text, waveforms):
51
+ inputs = processor(text=text, audios=list(waveforms), return_tensors="pt", padding=True)
52
+ inputs = {key: inputs[key].to(device) for key in inputs}
53
+ with torch.no_grad():
54
+ logits_per_text = clap_model(**inputs).logits_per_text # this is the audio-text similarity score
55
+ probs = logits_per_text.softmax(dim=-1) # we can take the softmax to get the label probabilities
56
+ most_probable = torch.argmax(probs) # and now select the most likely audio waveform
57
+ waveform = waveforms[most_probable]
58
+ return waveform
59
 
60
 
61
  css = """
62
  a {
63
+ color: inherit; text-decoration: underline;
64
+ } .gradio-container {
 
 
65
  font-family: 'IBM Plex Sans', sans-serif;
66
+ } .gr-button {
67
+ color: white; border-color: #000000; background: #000000;
68
+ } input[type='range'] {
 
 
 
 
69
  accent-color: #000000;
70
+ } .dark input[type='range'] {
 
71
  accent-color: #dfdfdf;
72
+ } .container {
73
+ max-width: 730px; margin: auto; padding-top: 1.5rem;
74
+ } #gallery {
75
+ min-height: 22rem; margin-bottom: 15px; margin-left: auto; margin-right: auto; border-bottom-right-radius:
76
+ .5rem !important; border-bottom-left-radius: .5rem !important;
77
+ } #gallery>div>.h-full {
 
 
 
 
 
 
 
 
 
78
  min-height: 20rem;
79
+ } .details:hover {
 
80
  text-decoration: underline;
81
+ } .gr-button {
 
82
  white-space: nowrap;
83
+ } .gr-button:focus {
84
+ border-color: rgb(147 197 253 / var(--tw-border-opacity)); outline: none; box-shadow:
85
+ var(--tw-ring-offset-shadow), var(--tw-ring-shadow), var(--tw-shadow, 0 0 #0000); --tw-border-opacity: 1;
86
+ --tw-ring-offset-shadow: var(--tw-ring-inset) 0 0 0 var(--tw-ring-offset-width)
87
+ var(--tw-ring-offset-color); --tw-ring-shadow: var(--tw-ring-inset) 0 0 0 calc(3px
88
+ var(--tw-ring-offset-width)) var(--tw-ring-color); --tw-ring-color: rgb(191 219 254 /
89
+ var(--tw-ring-opacity)); --tw-ring-opacity: .5;
90
+ } #advanced-btn {
91
+ font-size: .7rem !important; line-height: 19px; margin-top: 12px; margin-bottom: 12px; padding: 2px 8px;
 
 
 
 
 
 
 
 
92
  border-radius: 14px !important;
93
+ } #advanced-options {
 
94
  margin-bottom: 20px;
95
+ } .footer {
96
+ margin-bottom: 45px; margin-top: 35px; text-align: center; border-bottom: 1px solid #e5e5e5;
97
+ } .footer>p {
98
+ font-size: .8rem; display: inline-block; padding: 0 10px; transform: translateY(10px); background: white;
99
+ } .dark .footer {
 
 
 
 
 
 
 
 
 
 
100
  border-color: #303030;
101
+ } .dark .footer>p {
 
102
  background: #0b0f19;
103
+ } .acknowledgments h4{
104
+ margin: 1.25em 0 .25em 0; font-weight: bold; font-size: 115%;
105
+ } #container-advanced-btns{
106
+ display: flex; flex-wrap: wrap; justify-content: space-between; align-items: center;
107
+ } .animate-spin {
 
 
 
 
 
 
 
 
108
  animation: spin 1s linear infinite;
109
+ } @keyframes spin {
 
110
  from {
111
  transform: rotate(0deg);
112
+ } to {
 
113
  transform: rotate(360deg);
114
  }
115
+ } #share-btn-container {
116
+ display: flex; padding-left: 0.5rem !important; padding-right: 0.5rem !important; background-color:
117
+ #000000; justify-content: center; align-items: center; border-radius: 9999px !important; width: 13rem;
118
+ margin-top: 10px; margin-left: auto;
119
+ } #share-btn {
120
+ all: initial; color: #ffffff;font-weight: 600; cursor:pointer; font-family: 'IBM Plex Sans', sans-serif;
121
+ margin-left: 0.5rem !important; padding-top: 0.25rem !important; padding-bottom: 0.25rem
122
+ !important;right:0;
123
+ } #share-btn * {
 
124
  all: unset;
125
+ } #share-btn-container div:nth-child(-n+2){
126
+ width: auto !important; min-height: 0px !important;
127
+ } #share-btn-container .wrap {
 
 
 
128
  display: none !important;
129
+ } .gr-form{
 
130
  flex: 1 1 50%; border-top-right-radius: 0; border-bottom-right-radius: 0;
131
+ } #prompt-container{
 
132
  gap: 0;
133
+ } #generated_id{
 
134
  min-height: 700px
135
+ } #setting_id{
136
+ margin-bottom: 12px; text-align: center; font-weight: 900;
 
 
 
137
  }
138
  """
139
  iface = gr.Blocks(css=css)
 
144
  <div style="text-align: center; max-width: 700px; margin: 0 auto;">
145
  <div
146
  style="
147
+ display: inline-flex; align-items: center; gap: 0.8rem; font-size: 1.75rem;
 
 
 
148
  "
149
  >
150
  <h1 style="font-weight: 900; margin-bottom: 7px; line-height: normal;">
151
  AudioLDM: Text-to-Audio Generation with Latent Diffusion Models
152
  </h1>
153
+ </div> <p style="margin-bottom: 10px; font-size: 94%">
154
+ <a href="https://arxiv.org/abs/2301.12503">[Paper]</a> <a href="https://audioldm.github.io/">[Project
155
+ page]</a> <a href="https://huggingface.co/docs/diffusers/main/en/api/pipelines/audioldm">[🧨
156
+ Diffusers]</a>
157
  </p>
158
  </div>
159
  """
160
  )
161
+ gr.HTML(
162
+ """
163
+ <p>This is the demo for AudioLDM, powered by 🧨 Diffusers. Demo uses the checkpoint <a
164
+ href="https://huggingface.co/cvssp/audioldm-m-full"> audioldm-m-full </a>. For faster inference without waiting in
165
+ queue, you may duplicate the space and upgrade to a GPU in the settings. <br/> <a
166
+ href="https://huggingface.co/spaces/haoheliu/audioldm-text-to-audio-generation?duplicate=true"> <img
167
+ style="margin-top: 0em; margin-bottom: 0em" src="https://bit.ly/3gLdBN6" alt="Duplicate Space"></a> <p/>
168
+ """
169
+ )
170
+
171
  with gr.Group():
172
  with gr.Box():
173
+ textbox = gr.Textbox(
174
+ value="A hammer is hitting a wooden surface",
175
+ max_lines=1,
176
+ label="Input text",
177
+ info="Your text is important for the audio quality. Please ensure it is descriptive by using more adjectives.",
178
+ elem_id="prompt-in",
179
+ )
180
+ negative_textbox = gr.Textbox(
181
+ value="low quality, average quality",
182
+ max_lines=1,
183
+ label="Negative prompt",
184
+ info="Enter a negative prompt not to guide the audio generation. Selecting appropriate negative prompts can improve the audio quality significantly.",
185
+ elem_id="prompt-in",
186
+ )
187
 
188
  with gr.Accordion("Click to modify detailed configurations", open=False):
189
+ seed = gr.Number(
190
+ value=45,
191
+ label="Seed",
192
+ info="Change this value (any integer number) will lead to a different generation result.",
193
+ )
194
+ duration = gr.Slider(2.5, 10, value=5, step=2.5, label="Duration (seconds)")
195
+ guidance_scale = gr.Slider(
196
+ 0,
197
+ 4,
198
+ value=2.5,
199
+ step=0.5,
200
+ label="Guidance scale",
201
+ info="Large => better quality and relevancy to text; Small => better diversity",
202
+ )
203
+ n_candidates = gr.Slider(
204
+ 1,
205
+ 3,
206
+ value=3,
207
+ step=1,
208
+ label="Number waveforms to generate",
209
+ info="Automatic quality control. This number control the number of candidates (e.g., generate three audios and choose the best to show you). A Larger value usually lead to better quality with heavier computation",
210
+ )
211
+
212
+ outputs = gr.Video(label="Output", elem_id="output-video")
213
  btn = gr.Button("Submit").style(full_width=True)
214
 
215
  with gr.Group(elem_id="share-btn-container", visible=False):
 
217
  loading_icon = gr.HTML(loading_icon_html)
218
  share_button = gr.Button("Share to community", elem_id="share-btn")
219
 
220
+ btn.click(
221
+ text2audio,
222
+ inputs=[textbox, negative_textbox, duration, guidance_scale, seed, n_candidates],
223
+ outputs=[outputs],
224
+ )
225
+
226
  share_button.click(None, [], [], _js=share_js)
227
+ gr.HTML(
228
+ """
229
  <div class="footer" style="text-align: center; max-width: 700px; margin: 0 auto;">
230
+ <p>Follow the latest update of AudioLDM on our<a href="https://github.com/haoheliu/AudioLDM"
231
+ style="text-decoration: underline;" target="_blank"> Github repo</a> </p> <br> <p>Model by <a
232
+ href="https://twitter.com/LiuHaohe" style="text-decoration: underline;" target="_blank">Haohe
233
+ Liu</a>. Code and demo by 🤗 Hugging Face.</p> <br>
 
234
  </div>
235
+ """
236
+ )
237
+ gr.Examples(
238
+ [
239
+ ["A hammer is hitting a wooden surface", "low quality, average quality", 5, 2.5, 45, 3],
240
+ ["Peaceful and calming ambient music with singing bowl and other instruments.", "low quality, average quality", 5, 2.5, 45, 3],
241
+ ["A man is speaking in a small room.", "low quality, average quality", 5, 2.5, 45, 3],
242
+ ["A female is speaking followed by footstep sound", "low quality, average quality", 5, 2.5, 45, 3],
243
+ ["Wooden table tapping sound followed by water pouring sound.", "low quality, average quality", 5, 2.5, 45, 3],
244
+ ],
245
  fn=text2audio,
246
+ inputs=[textbox, negative_textbox, duration, guidance_scale, seed, n_candidates],
 
247
  outputs=[outputs],
248
  cache_examples=True,
249
  )
250
+ gr.HTML(
251
+ """
252
+ <div class="acknowledgements"> <p>Essential Tricks for Enhancing the Quality of Your Generated
253
+ Audio</p> <p>1. Try to use more adjectives to describe your sound. For example: "A man is speaking
254
+ clearly and slowly in a large room" is better than "A man is speaking". This can make sure AudioLDM
255
+ understands what you want.</p> <p>2. Try to use different random seeds, which can affect the generation
256
+ quality significantly sometimes.</p> <p>3. It's better to use general terms like 'man' or 'woman'
257
+ instead of specific names for individuals or abstract objects that humans may not be familiar with,
258
+ such as 'mummy'.</p> <p>4. Using a negative prompt to not guide the diffusion process can improve the
259
+ audio quality significantly. Try using negative prompts like 'low quality'.</p> </div>
260
+ """
261
+ )
262
  with gr.Accordion("Additional information", open=False):
263
  gr.HTML(
264
  """
265
  <div class="acknowledgments">
266
+ <p> We build the model with data from <a href="http://research.google.com/audioset/">AudioSet</a>,
267
+ <a href="https://freesound.org/">Freesound</a> and <a
268
+ href="https://sound-effects.bbcrewind.co.uk/">BBC Sound Effect library</a>. We share this demo
269
+ based on the <a
270
+ href="https://assets.publishing.service.gov.uk/government/uploads/system/uploads/attachment_data/file/375954/Research.pdf">UK
271
+ copyright exception</a> of data for academic research. </p>
272
  </div>
273
  """
274
  )
275
  # <p>This demo is strictly for research demo purpose only. For commercial use please <a href="haoheliu@gmail.com">contact us</a>.</p>
276
 
277
  iface.queue(max_size=10).launch(debug=True)