|
import torch |
|
import torchaudio |
|
import gradio as gr |
|
import stempeg |
|
|
|
torch.hub.download_url_to_file('https://github.com/AK391/open-unmix-pytorch/blob/master/test.wav?raw=true', 'test.wav') |
|
|
|
use_cuda = torch.cuda.is_available() |
|
device = torch.device("cuda" if use_cuda else "cpu") |
|
|
|
|
|
separator = torch.hub.load('sigsep/open-unmix-pytorch', 'umxhq') |
|
|
|
def inference(audio): |
|
audio, rate = stempeg.read_stems( |
|
audio, |
|
sample_rate=44100 |
|
) |
|
audio = torch.as_tensor(audio).float().T |
|
audio = audio[None] |
|
estimates = separator(audio) |
|
estimates = separator.to_dict(estimates) |
|
|
|
estimates_numpy = {} |
|
for target, estimate in estimates.items(): |
|
estimates_numpy[target] = torch.squeeze(estimate).detach().cpu().numpy().T |
|
target_path = str("target.wav") |
|
stempeg.write_stems( |
|
target_path, |
|
estimates_numpy, |
|
sample_rate=rate, |
|
writer=stempeg.FilesWriter(multiprocess=True, output_sample_rate=44100), |
|
) |
|
return 'vocals.wav', 'drums.wav', 'bass.wav', 'other.wav' |
|
|
|
inputs = gr.inputs.Audio(label="Input Audio", type="filepath") |
|
outputs = [gr.outputs.Audio(label="Vocals", type="file"), |
|
gr.outputs.Audio(label="Drums", type="file"), |
|
gr.outputs.Audio(label="Bass", type="file"), |
|
gr.outputs.Audio(label="Other Audio", type="file")] |
|
|
|
|
|
title = "OPEN-UNMIX" |
|
description = "gradio demo for OPEN-UNMIX, reference implementation for music source separation. To use it, simply add your audio, or click one of the examples to load them. Read more at the links below." |
|
article = "<p style='text-align: center'><a href='https://joss.theoj.org/papers/10.21105/joss.01667'>Open-Unmix - A Reference Implementation for Music Source Separation</a> | <a href='https://github.com/sigsep/open-unmix-pytorch'>Github Repo</a></p>" |
|
|
|
examples = [['test.wav']] |
|
|
|
gr.Interface(inference, inputs, outputs, title=title, description=description, article=article, examples=examples).launch(enable_queue=True) |
|
|