Spaces:
Running
Running
import torch | |
import torchaudio | |
import gradio as gr | |
import os | |
from demucs.pretrained import get_model | |
from demucs.apply import apply_model | |
# Load mdx_extra model | |
model = get_model('mdx_extra') | |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu") | |
model.to(device) | |
output_dir = "separated_mdx_extra" | |
os.makedirs(output_dir, exist_ok=True) | |
def separate_audio(audio_path): | |
wav, sr = torchaudio.load(audio_path) | |
wav = wav.to(device) | |
sources = apply_model(model, wav[None], device=device) | |
sources_dict = {} | |
stems = ["vocals", "instrumental"] | |
for source, stem in zip(sources[0], stems): | |
stem_path = os.path.join(output_dir, f"{stem}.wav") | |
torchaudio.save(stem_path, source.cpu(), sr) | |
sources_dict[stem] = stem_path | |
return sources_dict["vocals"], sources_dict["instrumental"] | |
# Gradio Interface | |
interface = gr.Interface( | |
fn=separate_audio, | |
inputs=gr.Audio(type="filepath"), | |
outputs=[gr.Audio(label="Vocals"), gr.Audio(label="Instrumental")], | |
title="AI Music Separator (Demucs)", | |
description="Upload a song, and AI will separate vocals and instrumental." | |
) | |
if __name__ == "__main__": | |
interface.launch() |