Ahsen Khaliq
Update app.py
4eba02e
raw history blame
No virus
1.56 kB
import os
os.system('pip install gradio==2.3.0a0')
os.system('pip freeze')
import sys
sys.path.append('.')
import gradio as gr
os.system('pip install -U torchtext==0.8.0')
os.system('./separate_scripts/download_checkpoints.sh')
def inference(audio):
#os.system('./separate_scripts/separate_vocals.sh ' + audio.name + ' "sep_vocals.mp3"')
#os.system('./separate_scripts/separate_accompaniment.sh ' + audio.name + ' "sep_accompaniment.mp3"')
os.system('python separate_scripts/separate.py --audio_path=' +audio.name+' --source_type="accompaniment"')
os.system('python separate_scripts/separate.py --audio_path=' +audio.name+' --source_type="vocals"')
return 'sep_vocals.mp3', 'sep_accompaniment.mp3'
title = "Music Source Separation"
description = "Gradio demo for Music Source Separation. To use it, simply add your audio, or click one of the examples to load them. Read more at the links below. Currently supports .wav files"
article = "<p style='text-align: center'><a href='https://arxiv.org/abs/2109.05418'>Decoupling Magnitude and Phase Estimation with Deep ResUNet for Music Source Separation</a> | <a href='https://github.com/bytedance/music_source_separation'>Github Repo</a></p>"
examples = [['example.wav']]
gr.Interface(
inference,
gr.inputs.Audio(type="file", label="Input"),
[gr.outputs.Audio(type="file", label="Vocals"),gr.outputs.Audio(type="file", label="Accompaniment")],
title=title,
description=description,
article=article,
enable_queue=True,
examples=examples
).launch(debug=True)