Plachta commited on
Commit
a8d09fe
·
verified ·
1 Parent(s): a924296

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +16 -29
app.py CHANGED
@@ -1,4 +1,3 @@
1
- import spaces
2
  import gradio as gr
3
  import torch
4
  import torchaudio
@@ -15,8 +14,6 @@ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
15
  dit_checkpoint_path, dit_config_path = load_custom_model_from_hf("Plachta/Seed-VC",
16
  "DiT_seed_v2_uvit_whisper_small_wavenet_bigvgan_pruned.pth",
17
  "config_dit_mel_seed_uvit_whisper_small_wavenet.yml")
18
- # dit_checkpoint_path = "E:/DiT_epoch_00018_step_801000.pth"
19
- # dit_config_path = "configs/config_dit_mel_seed_uvit_whisper_small_encoder_wavenet.yml"
20
  config = yaml.safe_load(open(dit_config_path, 'r'))
21
  model_params = recursive_munch(config['model_params'])
22
  model = build_model(model_params, stage='DiT')
@@ -48,19 +45,6 @@ bigvgan_model = bigvgan.BigVGAN.from_pretrained('nvidia/bigvgan_v2_22khz_80band_
48
  bigvgan_model.remove_weight_norm()
49
  bigvgan_model = bigvgan_model.eval().to(device)
50
 
51
- ckpt_path, config_path = load_custom_model_from_hf("Plachta/FAcodec", 'pytorch_model.bin', 'config.yml')
52
-
53
- codec_config = yaml.safe_load(open(config_path))
54
- codec_model_params = recursive_munch(codec_config['model_params'])
55
- codec_encoder = build_model(codec_model_params, stage="codec")
56
-
57
- ckpt_params = torch.load(ckpt_path, map_location="cpu")
58
-
59
- for key in codec_encoder:
60
- codec_encoder[key].load_state_dict(ckpt_params[key], strict=False)
61
- _ = [codec_encoder[key].eval() for key in codec_encoder]
62
- _ = [codec_encoder[key].to(device) for key in codec_encoder]
63
-
64
  # whisper
65
  from transformers import AutoFeatureExtractor, WhisperModel
66
 
@@ -134,16 +118,16 @@ def adjust_f0_semitones(f0_sequence, n_semitones):
134
  def crossfade(chunk1, chunk2, overlap):
135
  fade_out = np.cos(np.linspace(0, np.pi / 2, overlap)) ** 2
136
  fade_in = np.cos(np.linspace(np.pi / 2, 0, overlap)) ** 2
137
- chunk2[:overlap] = chunk2[:overlap] * fade_in + chunk1[-overlap:] * fade_out
 
 
 
138
  return chunk2
139
 
140
  # streaming and chunk processing related params
141
- max_context_window = sr // hop_length * 30
142
  overlap_frame_len = 16
143
- overlap_wave_len = overlap_frame_len * hop_length
144
  bitrate = "320k"
145
 
146
- @spaces.GPU
147
  @torch.no_grad()
148
  @torch.inference_mode()
149
  def voice_conversion(source, target, diffusion_steps, length_adjust, inference_cfg_rate, f0_condition, auto_f0_adjust, pitch_shift):
@@ -151,6 +135,9 @@ def voice_conversion(source, target, diffusion_steps, length_adjust, inference_c
151
  mel_fn = to_mel if not f0_condition else to_mel_f0
152
  bigvgan_fn = bigvgan_model if not f0_condition else bigvgan_44k_model
153
  sr = 22050 if not f0_condition else 44100
 
 
 
154
  # Load audio
155
  source_audio = librosa.load(source, sr=sr)[0]
156
  ref_audio = librosa.load(target, sr=sr)[0]
@@ -243,8 +230,8 @@ def voice_conversion(source, target, diffusion_steps, length_adjust, inference_c
243
  style2 = campplus_model(feat2.unsqueeze(0))
244
 
245
  if f0_condition:
246
- F0_ori = rmvpe.infer_from_audio(ref_waves_16k[0], thred=0.5)
247
- F0_alt = rmvpe.infer_from_audio(converted_waves_16k[0], thred=0.5)
248
 
249
  F0_ori = torch.from_numpy(F0_ori).to(device)[None]
250
  F0_alt = torch.from_numpy(F0_alt).to(device)[None]
@@ -283,7 +270,7 @@ def voice_conversion(source, target, diffusion_steps, length_adjust, inference_c
283
  chunk_cond = cond[:, processed_frames:processed_frames + max_source_window]
284
  is_last_chunk = processed_frames + max_source_window >= cond.size(1)
285
  cat_condition = torch.cat([prompt_condition, chunk_cond], dim=1)
286
- with torch.autocast(device_type='cuda', dtype=torch.float16):
287
  # Voice Conversion
288
  vc_target = inference_module.cfm.inference(cat_condition,
289
  torch.LongTensor([cat_condition.size(1)]).to(mel2.device),
@@ -337,7 +324,7 @@ def voice_conversion(source, target, diffusion_steps, length_adjust, inference_c
337
 
338
 
339
  if __name__ == "__main__":
340
- description = ("State-of-the-Art zero-shot voice conversion/singing voice conversion. For local deployment please check [GitHub repository](https://github.com/Plachtaa/seed-vc) "
341
  "for details and updates.<br>Note that any reference audio will be forcefully clipped to 25s if beyond this length.<br> "
342
  "If total duration of source and reference audio exceeds 30s, source audio will be processed in chunks.<br> "
343
  "无需训练的 zero-shot 语音/歌声转换模型,若需本地部署查看[GitHub页面](https://github.com/Plachtaa/seed-vc)<br>"
@@ -345,7 +332,7 @@ if __name__ == "__main__":
345
  inputs = [
346
  gr.Audio(type="filepath", label="Source Audio / 源音频"),
347
  gr.Audio(type="filepath", label="Reference Audio / 参考音频"),
348
- gr.Slider(minimum=1, maximum=200, value=25, step=1, label="Diffusion Steps / 扩散步数", info="25 by default, 50~100 for best quality / 默认为 25,50~100 为最佳质量"),
349
  gr.Slider(minimum=0.5, maximum=2.0, step=0.1, value=1.0, label="Length Adjust / 长度调整", info="<1.0 for speed-up speech, >1.0 for slow-down speech / <1.0 加速语速,>1.0 减慢语速"),
350
  gr.Slider(minimum=0.0, maximum=1.0, step=0.1, value=0.7, label="Inference CFG Rate", info="has subtle influence / 有微小影响"),
351
  gr.Checkbox(label="Use F0 conditioned model / 启用F0输入", value=False, info="Must set to true for singing voice conversion / 歌声转换时必须勾选"),
@@ -355,11 +342,11 @@ if __name__ == "__main__":
355
  ]
356
 
357
  examples = [["examples/source/yae_0.wav", "examples/reference/dingzhen_0.wav", 25, 1.0, 0.7, False, True, 0],
358
- ["examples/source/jay_0.wav", "examples/reference/azuma_0.wav", 25, 1.0, 0.7, False, True, 0],
359
  ["examples/source/Wiz Khalifa,Charlie Puth - See You Again [vocals]_[cut_28sec].wav",
360
- "examples/reference/kobe_0.wav", 50, 1.0, 0.7, True, False, -6],
361
  ["examples/source/TECHNOPOLIS - 2085 [vocals]_[cut_14sec].wav",
362
- "examples/reference/trump_0.wav", 50, 1.0, 0.7, True, False, -12],
363
  ]
364
 
365
  outputs = [gr.Audio(label="Stream Output Audio / 流式输出", streaming=True, format='mp3'),
@@ -372,4 +359,4 @@ if __name__ == "__main__":
372
  title="Seed Voice Conversion",
373
  examples=examples,
374
  cache_examples=False,
375
- ).launch()
 
 
1
  import gradio as gr
2
  import torch
3
  import torchaudio
 
14
  dit_checkpoint_path, dit_config_path = load_custom_model_from_hf("Plachta/Seed-VC",
15
  "DiT_seed_v2_uvit_whisper_small_wavenet_bigvgan_pruned.pth",
16
  "config_dit_mel_seed_uvit_whisper_small_wavenet.yml")
 
 
17
  config = yaml.safe_load(open(dit_config_path, 'r'))
18
  model_params = recursive_munch(config['model_params'])
19
  model = build_model(model_params, stage='DiT')
 
45
  bigvgan_model.remove_weight_norm()
46
  bigvgan_model = bigvgan_model.eval().to(device)
47
 
 
 
 
 
 
 
 
 
 
 
 
 
 
48
  # whisper
49
  from transformers import AutoFeatureExtractor, WhisperModel
50
 
 
118
  def crossfade(chunk1, chunk2, overlap):
119
  fade_out = np.cos(np.linspace(0, np.pi / 2, overlap)) ** 2
120
  fade_in = np.cos(np.linspace(np.pi / 2, 0, overlap)) ** 2
121
+ if len(chunk2) < overlap:
122
+ chunk2[:overlap] = chunk2[:overlap] * fade_in[:len(chunk2)] + (chunk1[-overlap:] * fade_out)[:len(chunk2)]
123
+ else:
124
+ chunk2[:overlap] = chunk2[:overlap] * fade_in + chunk1[-overlap:] * fade_out
125
  return chunk2
126
 
127
  # streaming and chunk processing related params
 
128
  overlap_frame_len = 16
 
129
  bitrate = "320k"
130
 
 
131
  @torch.no_grad()
132
  @torch.inference_mode()
133
  def voice_conversion(source, target, diffusion_steps, length_adjust, inference_cfg_rate, f0_condition, auto_f0_adjust, pitch_shift):
 
135
  mel_fn = to_mel if not f0_condition else to_mel_f0
136
  bigvgan_fn = bigvgan_model if not f0_condition else bigvgan_44k_model
137
  sr = 22050 if not f0_condition else 44100
138
+ hop_length = 256 if not f0_condition else 512
139
+ max_context_window = sr // hop_length * 30
140
+ overlap_wave_len = overlap_frame_len * hop_length
141
  # Load audio
142
  source_audio = librosa.load(source, sr=sr)[0]
143
  ref_audio = librosa.load(target, sr=sr)[0]
 
230
  style2 = campplus_model(feat2.unsqueeze(0))
231
 
232
  if f0_condition:
233
+ F0_ori = rmvpe.infer_from_audio(ref_waves_16k[0], thred=0.03)
234
+ F0_alt = rmvpe.infer_from_audio(converted_waves_16k[0], thred=0.03)
235
 
236
  F0_ori = torch.from_numpy(F0_ori).to(device)[None]
237
  F0_alt = torch.from_numpy(F0_alt).to(device)[None]
 
270
  chunk_cond = cond[:, processed_frames:processed_frames + max_source_window]
271
  is_last_chunk = processed_frames + max_source_window >= cond.size(1)
272
  cat_condition = torch.cat([prompt_condition, chunk_cond], dim=1)
273
+ with torch.autocast(device_type=device.type, dtype=torch.float16):
274
  # Voice Conversion
275
  vc_target = inference_module.cfm.inference(cat_condition,
276
  torch.LongTensor([cat_condition.size(1)]).to(mel2.device),
 
324
 
325
 
326
  if __name__ == "__main__":
327
+ description = ("Zero-shot voice conversion with in-context learning. For local deployment please check [GitHub repository](https://github.com/Plachtaa/seed-vc) "
328
  "for details and updates.<br>Note that any reference audio will be forcefully clipped to 25s if beyond this length.<br> "
329
  "If total duration of source and reference audio exceeds 30s, source audio will be processed in chunks.<br> "
330
  "无需训练的 zero-shot 语音/歌声转换模型,若需本地部署查看[GitHub页面](https://github.com/Plachtaa/seed-vc)<br>"
 
332
  inputs = [
333
  gr.Audio(type="filepath", label="Source Audio / 源音频"),
334
  gr.Audio(type="filepath", label="Reference Audio / 参考音频"),
335
+ gr.Slider(minimum=1, maximum=200, value=10, step=1, label="Diffusion Steps / 扩散步数", info="10 by default, 50~100 for best quality / 默认为 10,50~100 为最佳质量"),
336
  gr.Slider(minimum=0.5, maximum=2.0, step=0.1, value=1.0, label="Length Adjust / 长度调整", info="<1.0 for speed-up speech, >1.0 for slow-down speech / <1.0 加速语速,>1.0 减慢语速"),
337
  gr.Slider(minimum=0.0, maximum=1.0, step=0.1, value=0.7, label="Inference CFG Rate", info="has subtle influence / 有微小影响"),
338
  gr.Checkbox(label="Use F0 conditioned model / 启用F0输入", value=False, info="Must set to true for singing voice conversion / 歌声转换时必须勾选"),
 
342
  ]
343
 
344
  examples = [["examples/source/yae_0.wav", "examples/reference/dingzhen_0.wav", 25, 1.0, 0.7, False, True, 0],
345
+ ["examples/source/jay_0.wav", "examples/reference/azuma_0.wav", 25, 1.0, 0.7, True, True, 0],
346
  ["examples/source/Wiz Khalifa,Charlie Puth - See You Again [vocals]_[cut_28sec].wav",
347
+ "examples/reference/teio_0.wav", 25, 1.0, 0.7, True, False, 0],
348
  ["examples/source/TECHNOPOLIS - 2085 [vocals]_[cut_14sec].wav",
349
+ "examples/reference/trump_0.wav", 25, 1.0, 0.7, True, False, -12],
350
  ]
351
 
352
  outputs = [gr.Audio(label="Stream Output Audio / 流式输出", streaming=True, format='mp3'),
 
359
  title="Seed Voice Conversion",
360
  examples=examples,
361
  cache_examples=False,
362
+ ).launch()