open-unmix / app.py
Ahsen Khaliq
Create app.py
d6aa604
raw history blame
No virus
2.09 kB
import torch
import torchaudio
import numpy as np
import scipy
import stempeg
import os
from openunmix import predict
import gradio as gr
import stempeg
torch.hub.download_url_to_file('https://github.com/AK391/open-unmix-pytorch/blob/master/test.wav?raw=true', 'test.wav')
use_cuda = torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu")
def inference(audio):
start = 0
stop = 7
audio, rate = stempeg.read_stems(
audio.name,
sample_rate=44100,
start=start,
duration=stop-start,
)
estimates = predict.separate(
audio=torch.as_tensor(audio).float(),
rate=44100,
device=device,
)
target_path = str("target.wav")
estimates_numpy = {}
for target, estimate in estimates.items():
estimates_numpy[target] = torch.squeeze(estimate).detach().cpu().numpy().T
stempeg.write_stems(
target_path,
estimates_numpy,
sample_rate=rate,
writer=stempeg.FilesWriter(multiprocess=True, output_sample_rate=44100),
)
return 'vocals.wav', 'drums.wav', 'bass.wav', 'other.wav'
inputs = gr.inputs.Audio(label="Input Audio", type="file")
outputs = [gr.outputs.Audio(label="Vocals", type="file"),
gr.outputs.Audio(label="Drums", type="file"),
gr.outputs.Audio(label="Bass", type="file"),
gr.outputs.Audio(label="Other Audio", type="file")]
title = "OPEN-UNMIX"
description = "gradio demo for OPEN-UNMIX, reference implementation for music source separation. To use it, simply add your audio, or click one of the examples to load them. Read more at the links below."
article = "<p style='text-align: center'><a href='https://joss.theoj.org/papers/10.21105/joss.01667'>Open-Unmix - A Reference Implementation for Music Source Separation</a> | <a href='https://github.com/sigsep/open-unmix-pytorch'>Github Repo</a></p>"
examples = [['test.wav']]
gr.Interface(inference, inputs, outputs, title=title, description=description, article=article, examples=examples, analytics_enabled=False).launch()