artificialguybr's picture
Update app.py
efd2061 verified
import gradio as gr
import torchaudio
from audiocraft.models import MusicGen
from audiocraft.data.audio import audio_write
def generate_music(description, melody_audio):
model = MusicGen.get_pretrained('nateraw/musicgen-songstarter-v0.2')
model.set_generation_params(duration=8) # generate 8 seconds.
if description:
descriptions = [description] * 3
if melody_audio:
melody, sr = torchaudio.load(melody_audio)
wav = model.generate_with_chroma(descriptions, melody[None].expand(3, -1, -1), sr)
else:
wav = model.generate(descriptions) # generates 3 samples.
else:
wav = model.generate_unconditional(4) # generates 4 unconditional audio samples
output_audios = []
for idx, one_wav in enumerate(wav):
output_path = f'output_{idx}.wav'
audio_write(output_path, one_wav.cpu(), model.sample_rate, strategy="loudness", loudness_compression=-14)
output_audios.append(output_path)
return output_audios
description = gr.Textbox(label="Description", placeholder="acoustic, guitar, melody, trap, d minor, 90 bpm")
melody_audio = gr.Audio(label="Melody Audio (optional)", type="filepath")
output_audio = gr.Audio(label="Generated Music", type="filepath")
gr.Interface(
fn=generate_music,
inputs=[description, melody_audio],
outputs=output_audio,
title="MusicGen Demo",
description="Generate music using the MusicGen model.",
examples=[
["trap, synthesizer, songstarters, dark, G# minor, 140 bpm", "./assets/kalhonaho.mp3"],
["upbeat, electronic, synth, dance, 120 bpm", None]
]
).launch()