Spaces:
Running
Running
supports 24kHz
Browse files- app.py +20 -6
- checkpoints/freevc-24.pth +3 -0
- configs/freevc-24.json +54 -0
app.py
CHANGED
@@ -28,6 +28,15 @@ _ = freevc.eval()
|
|
28 |
_ = utils.load_checkpoint("checkpoints/freevc.pth", freevc, None)
|
29 |
smodel = SpeakerEncoder('speaker_encoder/ckpt/pretrained_bak_5805000.pt')
|
30 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
31 |
print("Loading FreeVC-s...")
|
32 |
hps = utils.get_hparams_from_file("configs/freevc-s.json")
|
33 |
freevc_s = SynthesizerTrn(
|
@@ -45,7 +54,7 @@ def convert(model, src, tgt):
|
|
45 |
# tgt
|
46 |
wav_tgt, _ = librosa.load(tgt, sr=hps.data.sampling_rate)
|
47 |
wav_tgt, _ = librosa.effects.trim(wav_tgt, top_db=20)
|
48 |
-
if model == "FreeVC":
|
49 |
g_tgt = smodel.embed_utterance(wav_tgt)
|
50 |
g_tgt = torch.from_numpy(g_tgt).unsqueeze(0).to(device)
|
51 |
else:
|
@@ -67,23 +76,28 @@ def convert(model, src, tgt):
|
|
67 |
# infer
|
68 |
if model == "FreeVC":
|
69 |
audio = freevc.infer(c, g=g_tgt)
|
70 |
-
|
71 |
audio = freevc_s.infer(c, mel=mel_tgt)
|
|
|
|
|
72 |
audio = audio[0][0].data.cpu().float().numpy()
|
73 |
-
|
|
|
|
|
|
|
74 |
out = "out.wav"
|
75 |
return out
|
76 |
|
77 |
-
model = gr.Dropdown(choices=["FreeVC", "FreeVC-s"], value="FreeVC",type="value", label="Model")
|
78 |
audio1 = gr.inputs.Audio(label="Source Audio", type='filepath')
|
79 |
audio2 = gr.inputs.Audio(label="Reference Audio", type='filepath')
|
80 |
inputs = [model, audio1, audio2]
|
81 |
outputs = gr.outputs.Audio(label="Output Audio", type='filepath')
|
82 |
|
83 |
title = "FreeVC"
|
84 |
-
description = "Gradio Demo for FreeVC: Towards High-Quality Text-Free One-Shot Voice Conversion. To use it, simply upload your audio, or click the example to load. Read more at the links below. Note: It seems that the WavLM checkpoint in HuggingFace is a little different from the one used to train FreeVC, which may degrade the performance a bit.
|
85 |
article = "<p style='text-align: center'><a href='https://arxiv.org/abs/2210.15418' target='_blank'>Paper</a> | <a href='https://github.com/OlaWod/FreeVC' target='_blank'>Github Repo</a></p>"
|
86 |
|
87 |
-
examples=[["FreeVC", 'p225_001.wav', 'p226_002.wav'], ["FreeVC-s", 'p226_002.wav', 'p225_001.wav']]
|
88 |
|
89 |
gr.Interface(convert, inputs, outputs, title=title, description=description, article=article, examples=examples, enable_queue=True).launch()
|
|
|
28 |
_ = utils.load_checkpoint("checkpoints/freevc.pth", freevc, None)
|
29 |
smodel = SpeakerEncoder('speaker_encoder/ckpt/pretrained_bak_5805000.pt')
|
30 |
|
31 |
+
print("Loading FreeVC(24k)...")
|
32 |
+
hps = utils.get_hparams_from_file("configs/freevc-24.json")
|
33 |
+
freevc_24 = SynthesizerTrn(
|
34 |
+
hps.data.filter_length // 2 + 1,
|
35 |
+
hps.train.segment_size // hps.data.hop_length,
|
36 |
+
**hps.model).to(device)
|
37 |
+
_ = freevc_24.eval()
|
38 |
+
_ = utils.load_checkpoint("checkpoints/freevc-24.pth", freevc_24, None)
|
39 |
+
|
40 |
print("Loading FreeVC-s...")
|
41 |
hps = utils.get_hparams_from_file("configs/freevc-s.json")
|
42 |
freevc_s = SynthesizerTrn(
|
|
|
54 |
# tgt
|
55 |
wav_tgt, _ = librosa.load(tgt, sr=hps.data.sampling_rate)
|
56 |
wav_tgt, _ = librosa.effects.trim(wav_tgt, top_db=20)
|
57 |
+
if model == "FreeVC" or model == "FreeVC (24kHz)":
|
58 |
g_tgt = smodel.embed_utterance(wav_tgt)
|
59 |
g_tgt = torch.from_numpy(g_tgt).unsqueeze(0).to(device)
|
60 |
else:
|
|
|
76 |
# infer
|
77 |
if model == "FreeVC":
|
78 |
audio = freevc.infer(c, g=g_tgt)
|
79 |
+
elif model == "FreeVC-s":
|
80 |
audio = freevc_s.infer(c, mel=mel_tgt)
|
81 |
+
else:
|
82 |
+
audio = freevc_24.infer(c, g=g_tgt)
|
83 |
audio = audio[0][0].data.cpu().float().numpy()
|
84 |
+
if model == "FreeVC" or model == "FreeVC-s":
|
85 |
+
write("out.wav", hps.data.sampling_rate, audio)
|
86 |
+
else:
|
87 |
+
write("out.wav", 24000, audio)
|
88 |
out = "out.wav"
|
89 |
return out
|
90 |
|
91 |
+
model = gr.Dropdown(choices=["FreeVC", "FreeVC-s", "FreeVC (24kHz)"], value="FreeVC",type="value", label="Model")
|
92 |
audio1 = gr.inputs.Audio(label="Source Audio", type='filepath')
|
93 |
audio2 = gr.inputs.Audio(label="Reference Audio", type='filepath')
|
94 |
inputs = [model, audio1, audio2]
|
95 |
outputs = gr.outputs.Audio(label="Output Audio", type='filepath')
|
96 |
|
97 |
title = "FreeVC"
|
98 |
+
description = "Gradio Demo for FreeVC: Towards High-Quality Text-Free One-Shot Voice Conversion. To use it, simply upload your audio, or click the example to load. Read more at the links below. Note: It seems that the WavLM checkpoint in HuggingFace is a little different from the one used to train FreeVC, which may degrade the performance a bit."
|
99 |
article = "<p style='text-align: center'><a href='https://arxiv.org/abs/2210.15418' target='_blank'>Paper</a> | <a href='https://github.com/OlaWod/FreeVC' target='_blank'>Github Repo</a></p>"
|
100 |
|
101 |
+
examples=[["FreeVC", 'p225_001.wav', 'p226_002.wav'], ["FreeVC-s", 'p226_002.wav', 'p225_001.wav'], ["FreeVC (24kHz)", 'p225_001.wav', 'p226_002.wav']]
|
102 |
|
103 |
gr.Interface(convert, inputs, outputs, title=title, description=description, article=article, examples=examples, enable_queue=True).launch()
|
checkpoints/freevc-24.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:2ff0ebf6bde90bf3f13518f49c204348d2f683ffb9fd31b24f59a2b302998862
|
3 |
+
size 472644351
|
configs/freevc-24.json
ADDED
@@ -0,0 +1,54 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"train": {
|
3 |
+
"log_interval": 200,
|
4 |
+
"eval_interval": 10000,
|
5 |
+
"seed": 1234,
|
6 |
+
"epochs": 10000,
|
7 |
+
"learning_rate": 2e-4,
|
8 |
+
"betas": [0.8, 0.99],
|
9 |
+
"eps": 1e-9,
|
10 |
+
"batch_size": 64,
|
11 |
+
"fp16_run": false,
|
12 |
+
"lr_decay": 0.999875,
|
13 |
+
"segment_size": 8640,
|
14 |
+
"init_lr_ratio": 1,
|
15 |
+
"warmup_epochs": 0,
|
16 |
+
"c_mel": 45,
|
17 |
+
"c_kl": 1.0,
|
18 |
+
"use_sr": true,
|
19 |
+
"max_speclen": 128,
|
20 |
+
"port": "8008"
|
21 |
+
},
|
22 |
+
"data": {
|
23 |
+
"training_files":"filelists/train.txt",
|
24 |
+
"validation_files":"filelists/val.txt",
|
25 |
+
"max_wav_value": 32768.0,
|
26 |
+
"sampling_rate": 16000,
|
27 |
+
"filter_length": 1280,
|
28 |
+
"hop_length": 320,
|
29 |
+
"win_length": 1280,
|
30 |
+
"n_mel_channels": 80,
|
31 |
+
"mel_fmin": 0.0,
|
32 |
+
"mel_fmax": null
|
33 |
+
},
|
34 |
+
"model": {
|
35 |
+
"inter_channels": 192,
|
36 |
+
"hidden_channels": 192,
|
37 |
+
"filter_channels": 768,
|
38 |
+
"n_heads": 2,
|
39 |
+
"n_layers": 6,
|
40 |
+
"kernel_size": 3,
|
41 |
+
"p_dropout": 0.1,
|
42 |
+
"resblock": "1",
|
43 |
+
"resblock_kernel_sizes": [3,7,11],
|
44 |
+
"resblock_dilation_sizes": [[1,3,5], [1,3,5], [1,3,5]],
|
45 |
+
"upsample_rates": [10,6,4,2],
|
46 |
+
"upsample_initial_channel": 512,
|
47 |
+
"upsample_kernel_sizes": [16,16,4,4],
|
48 |
+
"n_layers_q": 3,
|
49 |
+
"use_spectral_norm": false,
|
50 |
+
"gin_channels": 256,
|
51 |
+
"ssl_dim": 1024,
|
52 |
+
"use_spk": true
|
53 |
+
}
|
54 |
+
}
|