import sys sys.path.append('.') import gradio as gr import os os.system('pip install -U torchtext==0.8.0') os.system('./separate_scripts/download_checkpoints.sh') def inference(audio): os.system('./separate_scripts/separate_vocals.sh ' + audio.name + ' "sep_vocals.mp3"') os.system('./separate_scripts/separate_accompaniment.sh ' + audio.name + ' "sep_accompaniment.mp3"') return 'sep_vocals.mp3', 'sep_accompaniment.mp3' title = "Music Source Separation" description = "Gradio demo for Music Source Separation. To use it, simply add your audio, or click one of the examples to load them. Read more at the links below." article = "

Decoupling Magnitude and Phase Estimation with Deep ResUNet for Music Source Separation | Github Repo

" gr.Interface( inference, gr.inputs.Audio(type="file", label="Input"), [gr.outputs.Audio(type="file", label="Vocals"),gr.outputs.Audio(type="file", label="Accompaniment")], title=title, description=description, article=article, enable_queue=True ).launch(debug=True)