Spaces:
Runtime error
Runtime error
import os | |
os.system('pip install gradio==2.3.0a0') | |
os.system('pip freeze') | |
import sys | |
sys.path.append('.') | |
import gradio as gr | |
os.system('pip install -U torchtext==0.8.0') | |
#os.system('python setup.py install --install-dir .') | |
from scipy.io import wavfile | |
os.system('chmod a+x ./separate_scripts/*.sh') | |
os.system('chmod a+x ./scripts/*.sh') | |
os.system('chmod a+x ./scripts/*/*.sh') | |
os.system('./separate_scripts/download_checkpoints.sh') | |
def inference(audio): | |
input_path = audio.name | |
print(f"The audio file name is: {audio.name}") | |
if input_path.endswith(".mp3"): | |
output_path = os.path.splitext(input_path)[0] + ".wav" | |
os.system(f"ffmpeg -y -loglevel panic -i {input_path} -acodec pcm_s16le -ar 44100 {output_path}") | |
input_path = output_path | |
# read the file and get the sample rate and data | |
rate, data = wavfile.read(input_path) | |
# save the result | |
wavfile.write('foo_left.wav', rate, data) | |
os.system("""python bytesep/inference.py --config_yaml=./scripts/4_train/musdb18/configs/vocals-accompaniment,resunet_subbandtime.yaml --checkpoint_path=./downloaded_checkpoints/resunet143_subbtandtime_vocals_8.8dB_350k_steps.pth --audio_path=foo_left.wav --output_path=sep_vocals.mp3""") | |
#os.system('./separate_scripts/separate_vocals.sh ' + audio.name + ' "sep_vocals.mp3"') | |
os.system("""python bytesep/inference.py --config_yaml=./scripts/4_train/musdb18/configs/accompaniment-vocals,resunet_subbandtime.yaml --checkpoint_path=./downloaded_checkpoints/resunet143_subbtandtime_accompaniment_16.4dB_350k_steps.pth --audio_path=foo_left.wav --output_path=sep_accompaniment.mp3""") | |
#os.system('./separate_scripts/separate_accompaniment.sh ' + audio.name + ' "sep_accompaniment.mp3"') | |
#os.system('python separate_scripts/separate.py --audio_path=' +audio.name+' --source_type="accompaniment"') | |
#os.system('python separate_scripts/separate.py --audio_path=' +audio.name+' --source_type="vocals"') | |
return 'sep_vocals.mp3', 'sep_accompaniment.mp3' | |
title = "Music Source Separation" | |
description = "Gradio demo for Music Source Separation. To use it, simply add your audio, or click one of the examples to load them. Currently supports .wav files. Read more at the links below." | |
article = "<p style='text-align: center'><a href='https://arxiv.org/abs/2109.05418'>Decoupling Magnitude and Phase Estimation with Deep ResUNet for Music Source Separation</a> | <a href='https://github.com/bytedance/music_source_separation'>Github Repo</a></p>" | |
examples = [['example.wav']] | |
gr.Interface( | |
inference, | |
gr.inputs.Audio(type="file", label="Input"), | |
[gr.outputs.Audio(type="file", label="Vocals"),gr.outputs.Audio(type="file", label="Accompaniment")], | |
title=title, | |
description=description, | |
article=article, | |
enable_queue=True, | |
examples=examples | |
).launch(debug=True) |