Ahsen Khaliq commited on
Commit
0648b6d
1 Parent(s): dbcd721

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +18 -3
app.py CHANGED
@@ -6,19 +6,34 @@ sys.path.append('.')
6
  import gradio as gr
7
  os.system('pip install -U torchtext==0.8.0')
8
  #os.system('python setup.py install --install-dir .')
 
 
 
 
9
 
10
  os.system('./separate_scripts/download_checkpoints.sh')
11
 
12
  def inference(audio):
13
- os.system("""python bytesep/inference.py --config_yaml=./scripts/4_train/musdb18/configs/vocals-accompaniment,resunet_subbandtime.yaml --checkpoint_path=./downloaded_checkpoints/resunet143_subbtandtime_vocals_8.8dB_350k_steps.pth --audio_path="""+audio.name+""" --output_path=sep_vocals.mp3""")
 
 
 
 
 
 
 
 
 
 
 
14
  #os.system('./separate_scripts/separate_vocals.sh ' + audio.name + ' "sep_vocals.mp3"')
15
- os.system("""python bytesep/inference.py --config_yaml=./scripts/4_train/musdb18/configs/accompaniment-vocals,resunet_subbandtime.yaml --checkpoint_path=./downloaded_checkpoints/resunet143_subbtandtime_accompaniment_16.4dB_350k_steps.pth --audio_path="""+audio.name+""" --output_path=sep_accompaniment.mp3""")
16
  #os.system('./separate_scripts/separate_accompaniment.sh ' + audio.name + ' "sep_accompaniment.mp3"')
17
  #os.system('python separate_scripts/separate.py --audio_path=' +audio.name+' --source_type="accompaniment"')
18
  #os.system('python separate_scripts/separate.py --audio_path=' +audio.name+' --source_type="vocals"')
19
  return 'sep_vocals.mp3', 'sep_accompaniment.mp3'
20
  title = "Music Source Separation"
21
- description = "Gradio demo for Music Source Separation. To use it, simply add your audio, or click one of the examples to load them. Please trim audio files to 10 seconds or less for faster inference time on cpu. Read more at the links below."
22
  article = "<p style='text-align: center'><a href='https://arxiv.org/abs/2109.05418'>Decoupling Magnitude and Phase Estimation with Deep ResUNet for Music Source Separation</a> | <a href='https://github.com/bytedance/music_source_separation'>Github Repo</a></p>"
23
 
24
  examples = [['example.wav']]
 
6
  import gradio as gr
7
  os.system('pip install -U torchtext==0.8.0')
8
  #os.system('python setup.py install --install-dir .')
9
+ from scipy.io import wavfile
10
+
11
+ # the timestamp to split at (in seconds)
12
+ split_at_timestamp = 6
13
 
14
  os.system('./separate_scripts/download_checkpoints.sh')
15
 
16
  def inference(audio):
17
+ # read the file and get the sample rate and data
18
+ rate, data = wavfile.read(audio.name)
19
+
20
+ # get the frame to split at
21
+ split_at_frame = rate * split_at_timestamp
22
+
23
+ # split
24
+ left_data, right_data = data[:split_at_frame-1], data[split_at_frame:] # split
25
+
26
+ # save the result
27
+ wavfile.write('foo_left.wav', rate, left_data)
28
+ os.system("""python bytesep/inference.py --config_yaml=./scripts/4_train/musdb18/configs/vocals-accompaniment,resunet_subbandtime.yaml --checkpoint_path=./downloaded_checkpoints/resunet143_subbtandtime_vocals_8.8dB_350k_steps.pth --audio_path=foo_left.wav --output_path=sep_vocals.mp3""")
29
  #os.system('./separate_scripts/separate_vocals.sh ' + audio.name + ' "sep_vocals.mp3"')
30
+ os.system("""python bytesep/inference.py --config_yaml=./scripts/4_train/musdb18/configs/accompaniment-vocals,resunet_subbandtime.yaml --checkpoint_path=./downloaded_checkpoints/resunet143_subbtandtime_accompaniment_16.4dB_350k_steps.pth --audio_path=foo_left.wav --output_path=sep_accompaniment.mp3""")
31
  #os.system('./separate_scripts/separate_accompaniment.sh ' + audio.name + ' "sep_accompaniment.mp3"')
32
  #os.system('python separate_scripts/separate.py --audio_path=' +audio.name+' --source_type="accompaniment"')
33
  #os.system('python separate_scripts/separate.py --audio_path=' +audio.name+' --source_type="vocals"')
34
  return 'sep_vocals.mp3', 'sep_accompaniment.mp3'
35
  title = "Music Source Separation"
36
+ description = "Gradio demo for Music Source Separation. To use it, simply add your audio, or click one of the examples to load them. Please trim audio files to 10 seconds or less for faster inference time on cpu. Currently supports .wav files. Read more at the links below."
37
  article = "<p style='text-align: center'><a href='https://arxiv.org/abs/2109.05418'>Decoupling Magnitude and Phase Estimation with Deep ResUNet for Music Source Separation</a> | <a href='https://github.com/bytedance/music_source_separation'>Github Repo</a></p>"
38
 
39
  examples = [['example.wav']]