|
""" |
|
Copyright (c) Meta Platforms, Inc. and affiliates. |
|
All rights reserved. |
|
|
|
This source code is licensed under the license found in the |
|
LICENSE file in the root directory of this source tree. |
|
""" |
|
|
|
from tempfile import NamedTemporaryFile |
|
import torch |
|
import gradio as gr |
|
from scipy.io.wavfile import write |
|
|
|
from audiocraft.models import MusicGen |
|
|
|
import os |
|
from audiocraft.data.audio import audio_write |
|
|
|
|
|
MODEL = None |
|
|
|
def split_process(audio, chosen_out_track): |
|
os.makedirs("out", exist_ok=True) |
|
write('test.wav', audio[0], audio[1]) |
|
os.system("python3 -m demucs.separate -n mdx_extra_q -d cpu test.wav -o out") |
|
|
|
if chosen_out_track == "vocals": |
|
return "./out/mdx_extra_q/test/vocals.wav" |
|
elif chosen_out_track == "bass": |
|
return "./out/mdx_extra_q/test/bass.wav" |
|
elif chosen_out_track == "drums": |
|
return "./out/mdx_extra_q/test/drums.wav" |
|
elif chosen_out_track == "other": |
|
return "./out/mdx_extra_q/test/other.wav" |
|
elif chosen_out_track == "all-in": |
|
return "test.wav" |
|
|
|
def load_model(version): |
|
print("Loading model", version) |
|
return MusicGen.get_pretrained(version) |
|
|
|
|
|
def predict(music_prompt, melody, duration, model): |
|
text = music_prompt |
|
global MODEL |
|
topk = int(250) |
|
if MODEL is None or MODEL.name != model: |
|
MODEL = load_model(model) |
|
|
|
if duration > MODEL.lm.cfg.dataset.segment_duration: |
|
raise gr.Error("MusicGen currently supports durations of up to 30 seconds!") |
|
MODEL.set_generation_params( |
|
use_sampling=True, |
|
top_k=250, |
|
top_p=0, |
|
temperature=1.0, |
|
cfg_coef=3.0, |
|
duration=duration, |
|
) |
|
|
|
if melody: |
|
sr, melody = melody[0], torch.from_numpy(melody[1]).to(MODEL.device).float().t().unsqueeze(0) |
|
print(melody.shape) |
|
if melody.dim() == 2: |
|
melody = melody[None] |
|
melody = melody[..., :int(sr * MODEL.lm.cfg.dataset.segment_duration)] |
|
output = MODEL.generate_with_chroma( |
|
descriptions=[text], |
|
melody_wavs=melody, |
|
melody_sample_rate=sr, |
|
progress=False |
|
) |
|
else: |
|
output = MODEL.generate(descriptions=[text], progress=False) |
|
|
|
output = output.detach().cpu().float()[0] |
|
with NamedTemporaryFile("wb", suffix=".wav", delete=False) as file: |
|
audio_write(file.name, output, MODEL.sample_rate, strategy="loudness", add_suffix=False) |
|
|
|
return file.name |
|
|
|
css=""" |
|
#col-container {max-width: 910px; margin-left: auto; margin-right: auto;} |
|
a {text-decoration-line: underline; font-weight: 600;} |
|
""" |
|
|
|
with gr.Blocks(css=css) as demo: |
|
with gr.Column(elem_id="col-container"): |
|
gr.Markdown( |
|
""" |
|
# Split Audio Tracks to MusicGen |
|
Upload an audio file, split audio tracks with Demucs, choose a track as conditional sound for MusicGen, get a remix ! |
|
*** Careful, MusicGen model loaded here can only handle up to 30 second audio, please use the audio component gradio feature to edit your audio before conditioning *** |
|
<br/> |
|
[![Duplicate this Space](https://huggingface.co/datasets/huggingface/badges/raw/main/duplicate-this-space-sm.svg)](https://huggingface.co/spaces/fffiloni/SplitTrack2MusicGen?duplicate=true) |
|
for longer audio, more control and no queue.</p> |
|
""" |
|
) |
|
|
|
with gr.Column(): |
|
uploaded_sound = gr.Audio(type="numpy", label="Input", source="upload") |
|
with gr.Row(): |
|
chosen_track = gr.Radio(["vocals", "bass", "drums", "other", "all-in"], label="Track", info="Which track from your audio do you want to mashup ?", value="vocals") |
|
load_sound_btn = gr.Button('Load your chosen track') |
|
|
|
|
|
|
|
|
|
|
|
with gr.Row(): |
|
music_prompt = gr.Textbox(label="Musical Prompt", info="Describe what kind of music you wish for", interactive=True, placeholder="lofi slow bpm electro chill with organic samples") |
|
melody = gr.Audio(source="upload", type="numpy", label="Track Condition (from previous step)", interactive=False) |
|
with gr.Row(): |
|
model = gr.Radio(["melody", "medium", "small", "large"], label="MusicGen Model", value="melody", interactive=True) |
|
duration = gr.Slider(minimum=1, maximum=30, value=10, step=1, label="Generated Music Duration", interactive=True) |
|
with gr.Row(): |
|
submit = gr.Button("Submit") |
|
|
|
|
|
|
|
|
|
|
|
|
|
output = gr.Audio(label="Generated Music") |
|
|
|
gr.Examples( |
|
fn=predict, |
|
examples=[ |
|
[ |
|
"An 80s driving pop song with heavy drums and synth pads in the background", |
|
None, |
|
10, |
|
"melody" |
|
], |
|
[ |
|
"A cheerful country song with acoustic guitars", |
|
None, |
|
10, |
|
"melody" |
|
], |
|
[ |
|
"90s rock song with electric guitar and heavy drums", |
|
None, |
|
], |
|
[ |
|
"a light and cheerly EDM track, with syncopated drums, aery pads, and strong emotions bpm: 130", |
|
None, |
|
10, |
|
"melody" |
|
], |
|
[ |
|
"lofi slow bpm electro chill with organic samples", |
|
None, |
|
], |
|
], |
|
inputs=[music_prompt, melody, duration, model], |
|
outputs=[output] |
|
) |
|
load_sound_btn.click(split_process, inputs=[uploaded_sound, chosen_track], outputs=[melody]) |
|
submit.click(predict, inputs=[music_prompt, melody, duration, model], outputs=[output]) |
|
|
|
|
|
demo.queue(max_size=32).launch() |
|
|