artificialguybr's picture
Update app.py
a4709e0 verified
raw
history blame
2.69 kB
import gradio as gr
import torchaudio
from audiocraft.models import MusicGen
from audiocraft.data.audio import audio_write
import spaces
import logging
import os
import uuid
from torch.cuda.amp import autocast
ZERO_GPU_PATCH_TORCH_DEVICE = 1
# Configura o logging
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
logging.info("Carregando o modelo pré-treinado.")
model = MusicGen.get_pretrained('nateraw/musicgen-songstarter-v0.2')
model.set_generation_params(duration=8)
@spaces.GPU(duration=120)
def generate_music(description, melody_audio):
with autocast():
logging.info("Iniciando a geração de música.")
if description:
description = [description]
if melody_audio:
logging.info(f"Carregando a melodia de áudio de: {melody_audio}")
melody, sr = torchaudio.load(melody_audio)
logging.info("Gerando música com descrição e melodia.")
wav = model.generate_with_chroma(description, melody[None], sr)
else:
logging.info("Gerando música apenas com descrição.")
wav = model.generate(description)
else:
logging.info("Gerando música de forma incondicional.")
wav = model.generate_unconditional(1)
filename = f'{str(uuid.uuid4())}.wav'
output_path = os.path.join('./', filename) # Salva o arquivo no diretório atual
logging.info(f"Salvando a música gerada em: {output_path}")
audio_write(output_path, wav[0].cpu().to(torch.float32), model.sample_rate, strategy="loudness", loudness_compressor=True)
# Verifica a forma do tensor de áudio e se foi salvo corretamente
logging.info(f"A forma do tensor de áudio gerado: {wav[0].shape}")
logging.info("Música gerada e salva com sucesso.")
if not os.path.exists(output_path):
raise ValueError(f'Failed to save audio to {output_path}')
return output_path
# Define a interface Gradio
description = gr.Textbox(label="Description", placeholder="acoustic, guitar, melody, trap, d minor, 90 bpm")
melody_audio = gr.Audio(label="Melody Audio (optional)", type="filepath")
output_path = gr.Audio(label="Generated Music", type="filepath")
gr.Interface(
fn=generate_music,
inputs=[description, melody_audio],
outputs=output_path,
title="MusicGen Demo",
description="Generate music using the MusicGen model.",
examples=[
["trap, synthesizer, songstarters, dark, G# minor, 140 bpm", "./assets/kalhonaho.mp3"],
["upbeat, electronic, synth, dance, 120 bpm", None]
]
).launch()