Spaces:
Runtime error
Runtime error
File size: 3,029 Bytes
75c6e9a 7145294 75c6e9a 4927f1c 915d50b 0b1a866 4927f1c 75c6e9a 0b1a866 75c6e9a 799b3c1 75c6e9a |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 |
import os
os.system('pip install gradio==2.3.0a0')
os.system('pip freeze')
import sys
sys.path.append('.')
import gradio as gr
os.system('pip install -U torchtext==0.8.0')
#os.system('python setup.py install --install-dir .')
from scipy.io import wavfile
os.system('chmod a+x ./separate_scripts/*.sh')
os.system('chmod a+x ./scripts/*.sh')
os.system('chmod a+x ./scripts/*/*.sh')
os.system('./separate_scripts/download_checkpoints.sh')
def inference(audio):
input_path = audio.name
print(f"The audio file name is: {audio.name}")
output_path = os.path.splitext(input_path)[0] + ".wav"
os.system(f"ffmpeg -y -loglevel panic -i {input_path} -acodec pcm_s16le -ar 44100 {output_path}")
# read the file and get the sample rate and data
# rate, data = wavfile.read(output_path)
try:
# try to read the file and get the sample rate and data
rate, data = wavfile.read(output_path)
except:
# if an exception occurs, read the original file instead
rate, data = wavfile.read(input_path)
# save the result
wavfile.write('foo_left.wav', rate, data)
os.system("""python bytesep/inference.py --config_yaml=./scripts/4_train/musdb18/configs/vocals-accompaniment,resunet_subbandtime.yaml --checkpoint_path=./downloaded_checkpoints/resunet143_subbtandtime_vocals_8.8dB_350k_steps.pth --audio_path=foo_left.wav --output_path=sep_vocals.mp3""")
#os.system('./separate_scripts/separate_vocals.sh ' + audio.name + ' "sep_vocals.mp3"')
os.system("""python bytesep/inference.py --config_yaml=./scripts/4_train/musdb18/configs/accompaniment-vocals,resunet_subbandtime.yaml --checkpoint_path=./downloaded_checkpoints/resunet143_subbtandtime_accompaniment_16.4dB_350k_steps.pth --audio_path=foo_left.wav --output_path=sep_accompaniment.mp3""")
#os.system('./separate_scripts/separate_accompaniment.sh ' + audio.name + ' "sep_accompaniment.mp3"')
#os.system('python separate_scripts/separate.py --audio_path=' +audio.name+' --source_type="accompaniment"')
#os.system('python separate_scripts/separate.py --audio_path=' +audio.name+' --source_type="vocals"')
return 'sep_vocals.mp3', 'sep_accompaniment.mp3'
title = "Music Source Separation"
description = "Gradio demo for Music Source Separation. To use it, simply add your audio, or click one of the examples to load them. Currently supports .wav files. Read more at the links below."
article = "<p style='text-align: center'><a href='https://arxiv.org/abs/2109.05418'>Decoupling Magnitude and Phase Estimation with Deep ResUNet for Music Source Separation</a> | <a href='https://github.com/bytedance/music_source_separation'>Github Repo</a></p>"
examples = [['example.wav']]
gr.Interface(
inference,
gr.inputs.Audio(type="file", label="Input"),
[gr.outputs.Audio(type="file", label="Vocals"),gr.outputs.Audio(type="file", label="Accompaniment")],
title=title,
description=description,
article=article,
enable_queue=True,
examples=examples
).launch(debug=True) |