File size: 2,032 Bytes
d6aa604
 
 
 
 
 
 
 
 
 
5f64e50
 
d6aa604
 
 
5f64e50
974a6eb
d6aa604
5f64e50
 
 
 
d6aa604
 
 
 
5f64e50
d6aa604
 
 
 
5f093b1
d6aa604
5f64e50
d6aa604
5f64e50
d6aa604
 
 
 
 
 
 
 
 
 
 
 
a8dcda3
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
import torch
import torchaudio
import gradio as gr
import stempeg

torch.hub.download_url_to_file('https://github.com/AK391/open-unmix-pytorch/blob/master/test.wav?raw=true', 'test.wav')

use_cuda = torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu")

# loading umxhq four target separator
separator = torch.hub.load('sigsep/open-unmix-pytorch', 'umxhq')

def inference(audio):
    audio, rate = stempeg.read_stems(
        audio,
        sample_rate=44100
    )
    audio = torch.as_tensor(audio).float().T
    audio = audio[None]
    estimates = separator(audio)
    estimates = separator.to_dict(estimates)

    estimates_numpy = {}
    for target, estimate in estimates.items():
        estimates_numpy[target] = torch.squeeze(estimate).detach().cpu().numpy().T
    target_path = str("target.wav")
    stempeg.write_stems(
        target_path,
        estimates_numpy,
        sample_rate=rate,
        writer=stempeg.FilesWriter(multiprocess=True, output_sample_rate=44100),
    )
    return 'vocals.wav', 'drums.wav', 'bass.wav', 'other.wav'   
    
inputs = gr.inputs.Audio(label="Input Audio", type="filepath")
outputs =  [gr.outputs.Audio(label="Vocals", type="file"),
            gr.outputs.Audio(label="Drums", type="file"),
            gr.outputs.Audio(label="Bass", type="file"),
            gr.outputs.Audio(label="Other Audio", type="file")]


title = "OPEN-UNMIX"
description = "gradio demo for OPEN-UNMIX, reference implementation for music source separation. To use it, simply add your audio, or click one of the examples to load them. Read more at the links below."
article = "<p style='text-align: center'><a href='https://joss.theoj.org/papers/10.21105/joss.01667'>Open-Unmix - A Reference Implementation for Music Source Separation</a> | <a href='https://github.com/sigsep/open-unmix-pytorch'>Github Repo</a></p>"

examples = [['test.wav']]

gr.Interface(inference, inputs, outputs, title=title, description=description, article=article, examples=examples).launch(enable_queue=True)