Spaces:
Running
Running
Fabrice-TIERCELIN
commited on
Commit
•
ac60278
1
Parent(s):
9ffd6b8
Description
Browse files- demos/musicgen_app.py +19 -6
demos/musicgen_app.py
CHANGED
@@ -178,7 +178,7 @@ def predict_batched(texts, melodies):
|
|
178 |
return res
|
179 |
|
180 |
|
181 |
-
def predict_full(model, model_path, decoder, text, melody, duration, topk, topp, temperature, cfg_coef, progress=gr.Progress()):
|
182 |
global INTERRUPTING
|
183 |
global USE_DIFFUSION
|
184 |
INTERRUPTING = False
|
@@ -228,12 +228,19 @@ def predict_full(model, model_path, decoder, text, melody, duration, topk, topp,
|
|
228 |
|
229 |
def toggle_audio_src(choice):
|
230 |
if choice == "mic":
|
231 |
-
return gr.update(source="microphone", value=None, label="Microphone")
|
232 |
else:
|
233 |
-
return gr.update(source="upload", value=None, label="File")
|
234 |
|
235 |
|
236 |
def toggle_diffusion(choice):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
237 |
if choice == "MultiBand_Diffusion":
|
238 |
return [gr.update(visible=True)] * 2
|
239 |
else:
|
@@ -252,13 +259,18 @@ def ui_full(launch_kwargs):
|
|
252 |
<p style="text-align: center;">Generates up to 2 minutes of music freely, without account and without watermark that you can download</p>
|
253 |
<br/>
|
254 |
<br/>
|
255 |
-
|
256 |
presented at: <a href="https://huggingface.co/papers/2306.05284">"Simple and Controllable Music Generation"</a>.
|
257 |
MusicGen gets better results than other AIs like WaveFormer.
|
258 |
If you are looking for sound effect rather than music, I recommend you AudioGen or AudioLDM2.
|
259 |
The generated tracks tend to be very monotone so I advise you to add an original track to force the AI to make variations.
|
|
|
|
|
|
|
260 |
You can duplicate this space on a free account, it works on CPU.
|
261 |
<a href="https://huggingface.co/spaces/Fabrice-TIERCELIN/Text-to-Music?duplicate=true"><img src="https://img.shields.io/badge/-Duplicate%20Space-blue?labelColor=white&style=flat&logo=data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAAAXNSR0IArs4c6QAAAP5JREFUOE+lk7FqAkEURY+ltunEgFXS2sZGIbXfEPdLlnxJyDdYB62sbbUKpLbVNhyYFzbrrA74YJlh9r079973psed0cvUD4A+4HoCjsA85X0Dfn/RBLBgBDxnQPfAEJgBY+A9gALA4tcbamSzS4xq4FOQAJgCDwV2CPKV8tZAJcAjMMkUe1vX+U+SMhfAJEHasQIWmXNN3abzDwHUrgcRGmYcgKe0bxrblHEB4E/pndMazNpSZGcsZdBlYJcEL9Afo75molJyM2FxmPgmgPqlWNLGfwZGG6UiyEvLzHYDmoPkDDiNm9JR9uboiONcBXrpY1qmgs21x1QwyZcpvxt9NS09PlsPAAAAAElFTkSuQmCC&logoWidth=14"></a>
|
|
|
|
|
262 |
|
263 |
"""
|
264 |
)
|
@@ -302,9 +314,10 @@ def ui_full(launch_kwargs):
|
|
302 |
diffusion_output = gr.Video(label="MultiBand Diffusion Decoder")
|
303 |
audio_diffusion = gr.Audio(label="MultiBand Diffusion Decoder (wav)", type='filepath')
|
304 |
submit.click(toggle_diffusion, decoder, [diffusion_output, audio_diffusion], queue=False,
|
|
|
305 |
show_progress=False).then(predict_full, inputs=[model, model_path, decoder, text, melody, duration, topk, topp,
|
306 |
-
temperature, cfg_coef],
|
307 |
-
outputs=[output, audio_output, diffusion_output, audio_diffusion])
|
308 |
radio.change(toggle_audio_src, radio, [melody], queue=False, show_progress=False)
|
309 |
|
310 |
gr.Examples(
|
|
|
178 |
return res
|
179 |
|
180 |
|
181 |
+
def predict_full(model, model_path, decoder, text, melody, duration, topk, topp, temperature, cfg_coef, output_hint, progress=gr.Progress()):
|
182 |
global INTERRUPTING
|
183 |
global USE_DIFFUSION
|
184 |
INTERRUPTING = False
|
|
|
228 |
|
229 |
def toggle_audio_src(choice):
|
230 |
if choice == "mic":
|
231 |
+
return gr.update(source = "microphone", value = None, label = "Microphone")
|
232 |
else:
|
233 |
+
return gr.update(source = "upload", value = None, label = "File")
|
234 |
|
235 |
|
236 |
def toggle_diffusion(choice):
|
237 |
+
if choice == "MultiBand_Diffusion":
|
238 |
+
return [gr.update(visible = True)] * 2
|
239 |
+
else:
|
240 |
+
return [gr.update(visible = False)] * 2
|
241 |
+
|
242 |
+
|
243 |
+
def toggle_hint(choice):
|
244 |
if choice == "MultiBand_Diffusion":
|
245 |
return [gr.update(visible=True)] * 2
|
246 |
else:
|
|
|
259 |
<p style="text-align: center;">Generates up to 2 minutes of music freely, without account and without watermark that you can download</p>
|
260 |
<br/>
|
261 |
<br/>
|
262 |
+
Powered by <a href="https://github.com/facebookresearch/audiocraft">MusicGen</a>,
|
263 |
presented at: <a href="https://huggingface.co/papers/2306.05284">"Simple and Controllable Music Generation"</a>.
|
264 |
MusicGen gets better results than other AIs like WaveFormer.
|
265 |
If you are looking for sound effect rather than music, I recommend you AudioGen or AudioLDM2.
|
266 |
The generated tracks tend to be very monotone so I advise you to add an original track to force the AI to make variations.
|
267 |
+
<br/>
|
268 |
+
🐌 Slow process... ~6 hours for 2 minutes of music.
|
269 |
+
I advise you to use the identical <a href='https://huggingface.co/spaces/tomandandy/MusicGen5'>tomandandy's space</a> that runs faster on A10G.
|
270 |
You can duplicate this space on a free account, it works on CPU.
|
271 |
<a href="https://huggingface.co/spaces/Fabrice-TIERCELIN/Text-to-Music?duplicate=true"><img src="https://img.shields.io/badge/-Duplicate%20Space-blue?labelColor=white&style=flat&logo=data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAAAXNSR0IArs4c6QAAAP5JREFUOE+lk7FqAkEURY+ltunEgFXS2sZGIbXfEPdLlnxJyDdYB62sbbUKpLbVNhyYFzbrrA74YJlh9r079973psed0cvUD4A+4HoCjsA85X0Dfn/RBLBgBDxnQPfAEJgBY+A9gALA4tcbamSzS4xq4FOQAJgCDwV2CPKV8tZAJcAjMMkUe1vX+U+SMhfAJEHasQIWmXNN3abzDwHUrgcRGmYcgKe0bxrblHEB4E/pndMazNpSZGcsZdBlYJcEL9Afo75molJyM2FxmPgmgPqlWNLGfwZGG6UiyEvLzHYDmoPkDDiNm9JR9uboiONcBXrpY1qmgs21x1QwyZcpvxt9NS09PlsPAAAAAElFTkSuQmCC&logoWidth=14"></a>
|
272 |
+
<br/>
|
273 |
+
🄯 If you use the standard models, you can use, modify and share the generated musics but not for commercial uses.
|
274 |
|
275 |
"""
|
276 |
)
|
|
|
314 |
diffusion_output = gr.Video(label="MultiBand Diffusion Decoder")
|
315 |
audio_diffusion = gr.Audio(label="MultiBand Diffusion Decoder (wav)", type='filepath')
|
316 |
submit.click(toggle_diffusion, decoder, [diffusion_output, audio_diffusion], queue=False,
|
317 |
+
show_progress=False).then(toggle_hint, decoder, [output_hint, audio_diffusion], queue=False,
|
318 |
show_progress=False).then(predict_full, inputs=[model, model_path, decoder, text, melody, duration, topk, topp,
|
319 |
+
temperature, cfg_coef, output_hint],
|
320 |
+
outputs=[output, audio_output, diffusion_output, audio_diffusion], scroll_to_output = True)
|
321 |
radio.change(toggle_audio_src, radio, [melody], queue=False, show_progress=False)
|
322 |
|
323 |
gr.Examples(
|