Spaces:
Sleeping
Sleeping
helloWorld199
commited on
Commit
•
e4bdb20
1
Parent(s):
34564e2
Fixed bug + added comments
Browse files- src/main.py +18 -14
- src/mdx.py +2 -1
- src/webui.py +3 -8
src/main.py
CHANGED
@@ -191,6 +191,7 @@ def preprocess_song(song_input, mdx_model_params, song_id, is_webui, input_type,
|
|
191 |
|
192 |
return orig_song_path, vocals_path, instrumentals_path, main_vocals_path, backup_vocals_path, main_vocals_dereverb_path
|
193 |
|
|
|
194 |
def preprocess_vocals_only(song_input, mdx_model_params, song_id, is_webui, input_type, progress=None):
|
195 |
orig_song_path = song_input
|
196 |
|
@@ -217,6 +218,7 @@ def voice_change(voice_model, vocals_path, output_path, pitch_change, f0_method,
|
|
217 |
|
218 |
|
219 |
def add_audio_effects(audio_path, reverb_rm_size, reverb_wet, reverb_dry, reverb_damping):
|
|
|
220 |
output_path = f'{os.path.splitext(audio_path)[0]}_mixed_covervocals.wav'
|
221 |
|
222 |
# Initialize audio effects plugins
|
@@ -245,6 +247,8 @@ def combine_audio(audio_paths, output_path, main_gain, backup_gain, inst_gain, o
|
|
245 |
instrumental_audio = AudioSegment.from_wav(audio_paths[2]) - 7 + inst_gain
|
246 |
main_vocal_audio.overlay(backup_vocal_audio).overlay(instrumental_audio).export(output_path, format=output_format)
|
247 |
|
|
|
|
|
248 |
def vocal_only_pipeline(song_input, voice_model, pitch_change, is_webui=0,index_rate=0.5, filter_radius=3,
|
249 |
rms_mix_rate=0.25, f0_method='rmvpe', crepe_hop_length=128, protect=0.33, pitch_change_all=0,
|
250 |
reverb_rm_size=0.15, reverb_wet=0.2, reverb_dry=0.8, reverb_damping=0.7, output_format='mp3',progress=gr.Progress()):
|
@@ -266,7 +270,19 @@ def vocal_only_pipeline(song_input, voice_model, pitch_change, is_webui=0,index_
|
|
266 |
song_dir = os.path.join(output_dir, song_id)
|
267 |
os.makedirs(song_dir)
|
268 |
|
269 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
270 |
|
271 |
pitch_change = pitch_change * 12 + pitch_change_all
|
272 |
ai_vocals_path = os.path.join(song_dir, f'{os.path.splitext(os.path.basename(orig_song_path))[0]}_{voice_model}_p{pitch_change}_i{index_rate}_fr{filter_radius}_rms{rms_mix_rate}_pro{protect}_{f0_method}{"" if f0_method != "mangio-crepe" else f"_{crepe_hop_length}"}.wav')
|
@@ -336,6 +352,7 @@ def song_cover_pipeline(song_input, voice_model, pitch_change, keep_files,
|
|
336 |
|
337 |
pitch_change = pitch_change * 12 + pitch_change_all
|
338 |
ai_vocals_path = os.path.join(song_dir, f'{os.path.splitext(os.path.basename(orig_song_path))[0]}_{voice_model}_p{pitch_change}_i{index_rate}_fr{filter_radius}_rms{rms_mix_rate}_pro{protect}_{f0_method}{"" if f0_method != "mangio-crepe" else f"_{crepe_hop_length}"}.wav')
|
|
|
339 |
ai_cover_path = os.path.join(song_dir, f'{os.path.splitext(os.path.basename(orig_song_path))[0]} ({voice_model} Ver)_cover.{output_format}')
|
340 |
|
341 |
if not os.path.exists(ai_vocals_path):
|
@@ -362,19 +379,6 @@ def song_cover_pipeline(song_input, voice_model, pitch_change, keep_files,
|
|
362 |
if file and os.path.exists(file):
|
363 |
os.remove(file)
|
364 |
|
365 |
-
# Add _stemname to each stem
|
366 |
-
#ai_cover_path = add_stem_name(ai_cover_path, "_cover")
|
367 |
-
display_progress(f'[~] ai_cover_path FINAL PATH {ai_cover_path}', 0.9, is_webui, progress)
|
368 |
-
time.sleep(5)
|
369 |
-
#vocals_path = add_stem_name(vocals_path, "_origvocals")
|
370 |
-
display_progress(f'[~] vocals_path FINAL PATH {vocals_path}', 0.9, is_webui, progress)
|
371 |
-
time.sleep(5)
|
372 |
-
#instrumentals_path = add_stem_name(instrumentals_path, "_originstr")
|
373 |
-
display_progress(f'[~] instrumentals_path FINAL PATH {instrumentals_path}', 0.9, is_webui, progress)
|
374 |
-
time.sleep(5)
|
375 |
-
#ai_vocals_mixed_path = add_stem_name(ai_vocals_mixed_path, "_covervocals")
|
376 |
-
display_progress(f'[~] ai_vocals_mixed_path FINAL PATH {ai_vocals_mixed_path}', 0.9, is_webui, progress)
|
377 |
-
time.sleep(5)
|
378 |
|
379 |
# Returning the stems: AI cover, original vocal, original instrumental, AI generated vocal
|
380 |
|
|
|
191 |
|
192 |
return orig_song_path, vocals_path, instrumentals_path, main_vocals_path, backup_vocals_path, main_vocals_dereverb_path
|
193 |
|
194 |
+
# Function to preprocess vocals only, i.e. apply just dereverb process.
|
195 |
def preprocess_vocals_only(song_input, mdx_model_params, song_id, is_webui, input_type, progress=None):
|
196 |
orig_song_path = song_input
|
197 |
|
|
|
218 |
|
219 |
|
220 |
def add_audio_effects(audio_path, reverb_rm_size, reverb_wet, reverb_dry, reverb_damping):
|
221 |
+
# Added _covervocals at the end of filename
|
222 |
output_path = f'{os.path.splitext(audio_path)[0]}_mixed_covervocals.wav'
|
223 |
|
224 |
# Initialize audio effects plugins
|
|
|
247 |
instrumental_audio = AudioSegment.from_wav(audio_paths[2]) - 7 + inst_gain
|
248 |
main_vocal_audio.overlay(backup_vocal_audio).overlay(instrumental_audio).export(output_path, format=output_format)
|
249 |
|
250 |
+
# Function defining the pipeline to create the AI cover of an audio containing only voice information.
|
251 |
+
# Returns the path of the cover vocal.
|
252 |
def vocal_only_pipeline(song_input, voice_model, pitch_change, is_webui=0,index_rate=0.5, filter_radius=3,
|
253 |
rms_mix_rate=0.25, f0_method='rmvpe', crepe_hop_length=128, protect=0.33, pitch_change_all=0,
|
254 |
reverb_rm_size=0.15, reverb_wet=0.2, reverb_dry=0.8, reverb_damping=0.7, output_format='mp3',progress=gr.Progress()):
|
|
|
270 |
song_dir = os.path.join(output_dir, song_id)
|
271 |
os.makedirs(song_dir)
|
272 |
|
273 |
+
if not os.path.exists(song_dir):
|
274 |
+
os.makedirs(song_dir)
|
275 |
+
orig_song_path, vocals_path = preprocess_vocals_only(song_input, mdx_model_params, song_id, is_webui, input_type, progress)
|
276 |
+
else:
|
277 |
+
vocals_path = None
|
278 |
+
paths = get_audio_paths(song_dir)
|
279 |
+
|
280 |
+
# if any of the audio files aren't available or keep intermediate files, rerun preprocess
|
281 |
+
if any(path is None for path in paths):
|
282 |
+
orig_song_path, vocals_path= preprocess_vocals_only(song_input, mdx_model_params, song_id, is_webui, input_type, progress)
|
283 |
+
else:
|
284 |
+
orig_song_path = paths
|
285 |
+
#orig_song_path, vocals_path = preprocess_vocals_only(song_input, mdx_model_params, song_id, is_webui, input_type, progress)
|
286 |
|
287 |
pitch_change = pitch_change * 12 + pitch_change_all
|
288 |
ai_vocals_path = os.path.join(song_dir, f'{os.path.splitext(os.path.basename(orig_song_path))[0]}_{voice_model}_p{pitch_change}_i{index_rate}_fr{filter_radius}_rms{rms_mix_rate}_pro{protect}_{f0_method}{"" if f0_method != "mangio-crepe" else f"_{crepe_hop_length}"}.wav')
|
|
|
352 |
|
353 |
pitch_change = pitch_change * 12 + pitch_change_all
|
354 |
ai_vocals_path = os.path.join(song_dir, f'{os.path.splitext(os.path.basename(orig_song_path))[0]}_{voice_model}_p{pitch_change}_i{index_rate}_fr{filter_radius}_rms{rms_mix_rate}_pro{protect}_{f0_method}{"" if f0_method != "mangio-crepe" else f"_{crepe_hop_length}"}.wav')
|
355 |
+
# Added _cover at the end of filename
|
356 |
ai_cover_path = os.path.join(song_dir, f'{os.path.splitext(os.path.basename(orig_song_path))[0]} ({voice_model} Ver)_cover.{output_format}')
|
357 |
|
358 |
if not os.path.exists(ai_vocals_path):
|
|
|
379 |
if file and os.path.exists(file):
|
380 |
os.remove(file)
|
381 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
382 |
|
383 |
# Returning the stems: AI cover, original vocal, original instrumental, AI generated vocal
|
384 |
|
src/mdx.py
CHANGED
@@ -238,7 +238,8 @@ class MDX:
|
|
238 |
assert len(processed_batches) == len(waves), 'Incomplete processed batches, please reduce batch size!'
|
239 |
return self.segment(processed_batches, True, chunk)
|
240 |
|
241 |
-
|
|
|
242 |
def run_mdx(model_params, output_dir, model_path, filename, exclude_main=False, exclude_inversion=False, suffix=None, invert_suffix=None, denoise=False, keep_orig=True, m_threads=2, _stemname1="", _stemname2=""):
|
243 |
device = torch.device('cuda:0') if torch.cuda.is_available() else torch.device('cpu')
|
244 |
|
|
|
238 |
assert len(processed_batches) == len(waves), 'Incomplete processed batches, please reduce batch size!'
|
239 |
return self.segment(processed_batches, True, chunk)
|
240 |
|
241 |
+
# Added _stemname1 and parameters to modify the path to add "_origvocals" and "_originstr",
|
242 |
+
# see preprocess_song() in main.py
|
243 |
def run_mdx(model_params, output_dir, model_path, filename, exclude_main=False, exclude_inversion=False, suffix=None, invert_suffix=None, denoise=False, keep_orig=True, m_threads=2, _stemname1="", _stemname2=""):
|
244 |
device = torch.device('cuda:0') if torch.cuda.is_available() else torch.device('cpu')
|
245 |
|
src/webui.py
CHANGED
@@ -245,6 +245,8 @@ if __name__ == '__main__':
|
|
245 |
protect, f0_method, crepe_hop_length, pitch_all, reverb_rm_size, reverb_wet,
|
246 |
reverb_dry, reverb_damping, output_format, ai_cover])
|
247 |
|
|
|
|
|
248 |
with gr.Row():
|
249 |
with gr.Column():
|
250 |
rvc_model = gr.Dropdown(voice_models, label='Voice Models', info='Models folder "AICoverGen --> rvc_models". After new models are added into this folder, click the refresh button')
|
@@ -254,10 +256,6 @@ if __name__ == '__main__':
|
|
254 |
local_file = gr.File(label='Audio file')
|
255 |
song_input_file = gr.UploadButton('Upload 📂', file_types=['audio'], variant='primary')
|
256 |
song_input_file.upload(process_file_upload, inputs=[song_input_file], outputs=[local_file, song_input])
|
257 |
-
|
258 |
-
with gr.Column():
|
259 |
-
pitch = gr.Slider(-3, 3, value=0, step=1, label='Pitch Change (Vocals ONLY)', info='Generally, use 1 for male to female conversions and -1 for vice-versa. (Octaves)')
|
260 |
-
pitch_all = gr.Slider(-12, 12, value=0, step=1, label='Overall Pitch Change', info='Changes pitch/key of vocals and instrumentals together. Altering this slightly reduces sound quality. (Semitones)')
|
261 |
|
262 |
with gr.Row():
|
263 |
ai_vocals =gr.Audio(label='ai_vocals', show_share_button=False)
|
@@ -268,10 +266,7 @@ if __name__ == '__main__':
|
|
268 |
inputs=[song_input, rvc_model, pitch, is_webui, index_rate, filter_radius, rms_mix_rate, f0_method, crepe_hop_length,
|
269 |
protect, pitch_all, reverb_rm_size, reverb_wet, reverb_dry, reverb_damping],
|
270 |
outputs=[ai_vocals])
|
271 |
-
|
272 |
-
# outputs=[pitch, main_gain, backup_gain, inst_gain, index_rate, filter_radius, rms_mix_rate,
|
273 |
-
# protect, f0_method, crepe_hop_length, pitch_all, reverb_rm_size, reverb_wet,
|
274 |
-
# reverb_dry, reverb_damping, output_format, ai_cover])
|
275 |
|
276 |
# Download tab
|
277 |
with gr.Tab('Download model'):
|
|
|
245 |
protect, f0_method, crepe_hop_length, pitch_all, reverb_rm_size, reverb_wet,
|
246 |
reverb_dry, reverb_damping, output_format, ai_cover])
|
247 |
|
248 |
+
gr.Label('If you want to modify just voice content...', show_label=False)
|
249 |
+
|
250 |
with gr.Row():
|
251 |
with gr.Column():
|
252 |
rvc_model = gr.Dropdown(voice_models, label='Voice Models', info='Models folder "AICoverGen --> rvc_models". After new models are added into this folder, click the refresh button')
|
|
|
256 |
local_file = gr.File(label='Audio file')
|
257 |
song_input_file = gr.UploadButton('Upload 📂', file_types=['audio'], variant='primary')
|
258 |
song_input_file.upload(process_file_upload, inputs=[song_input_file], outputs=[local_file, song_input])
|
|
|
|
|
|
|
|
|
259 |
|
260 |
with gr.Row():
|
261 |
ai_vocals =gr.Audio(label='ai_vocals', show_share_button=False)
|
|
|
266 |
inputs=[song_input, rvc_model, pitch, is_webui, index_rate, filter_radius, rms_mix_rate, f0_method, crepe_hop_length,
|
267 |
protect, pitch_all, reverb_rm_size, reverb_wet, reverb_dry, reverb_damping],
|
268 |
outputs=[ai_vocals])
|
269 |
+
|
|
|
|
|
|
|
270 |
|
271 |
# Download tab
|
272 |
with gr.Tab('Download model'):
|