fffiloni commited on
Commit
e1c09af
1 Parent(s): c1d97d2

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +15 -7
app.py CHANGED
@@ -19,12 +19,19 @@ from audiocraft.data.audio import audio_write
19
 
20
  MODEL = None
21
 
22
- def split_process(audio):
23
- os.makedirs("out", exist_ok=True)
24
- write('test.wav', audio[0], audio[1])
25
- os.system("python3 -m demucs.separate -n mdx_extra_q -d cpu test.wav -o out")
26
- #return "./out/mdx_extra_q/test/vocals.wav","./out/mdx_extra_q/test/bass.wav","./out/mdx_extra_q/test/drums.wav","./out/mdx_extra_q/test/other.wav"
27
- return "./out/mdx_extra_q/test/vocals.wav"
 
 
 
 
 
 
 
28
 
29
  def load_model(version):
30
  print("Loading model", version)
@@ -88,6 +95,7 @@ with gr.Blocks() as demo:
88
  with gr.Column():
89
  with gr.Row():
90
  uploaded_sound = gr.Audio(type="numpy", label="Input", source="microphone")
 
91
  load_sound_btn = gr.Button('Load sound')
92
  #split_vocals = gr.Audio(type="filepath", label="Vocals")
93
  #split_bass = gr.Audio(type="filepath", label="Bass")
@@ -109,7 +117,7 @@ with gr.Blocks() as demo:
109
  # cfg_coef = gr.Number(label="Classifier Free Guidance", value=3.0, interactive=True)
110
  with gr.Column():
111
  output = gr.Audio(label="Generated Music")
112
- load_sound_btn.click(split_process, inputs=[uploaded_sound], outputs=[melody])
113
  submit.click(predict, inputs=[music_prompt, melody, duration], outputs=[output])
114
 
115
  gr.Markdown(
 
19
 
20
  MODEL = None
21
 
22
+ def split_process(audio, chosen_out_track):
23
+ os.makedirs("out", exist_ok=True)
24
+ write('test.wav', audio[0], audio[1])
25
+ os.system("python3 -m demucs.separate -n mdx_extra_q -d cpu test.wav -o out")
26
+ #return "./out/mdx_extra_q/test/vocals.wav","./out/mdx_extra_q/test/bass.wav","./out/mdx_extra_q/test/drums.wav","./out/mdx_extra_q/test/other.wav"
27
+ if chosen_out_track == "vocals":
28
+ return "./out/mdx_extra_q/test/vocals.wav"
29
+ elif chosen_out_track == "bass":
30
+ return "./out/mdx_extra_q/test/bass.wav"
31
+ elif chosen_out_track == "drums":
32
+ return "./out/mdx_extra_q/test/drums.wav"
33
+ elif chosen_out_track == "other":
34
+ return "./out/mdx_extra_q/test/other.wav"
35
 
36
  def load_model(version):
37
  print("Loading model", version)
 
95
  with gr.Column():
96
  with gr.Row():
97
  uploaded_sound = gr.Audio(type="numpy", label="Input", source="microphone")
98
+ chosen_track = gr.Radio(["vocals", "bass", "drums", "other"], label="Track", info="Which track do you want to mashup ?", value="vocals")
99
  load_sound_btn = gr.Button('Load sound')
100
  #split_vocals = gr.Audio(type="filepath", label="Vocals")
101
  #split_bass = gr.Audio(type="filepath", label="Bass")
 
117
  # cfg_coef = gr.Number(label="Classifier Free Guidance", value=3.0, interactive=True)
118
  with gr.Column():
119
  output = gr.Audio(label="Generated Music")
120
+ load_sound_btn.click(split_process, inputs=[uploaded_sound, chosen_track], outputs=[melody])
121
  submit.click(predict, inputs=[music_prompt, melody, duration], outputs=[output])
122
 
123
  gr.Markdown(