Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -5,6 +5,7 @@ import librosa
|
|
5 |
from modules.commons import build_model, load_checkpoint, recursive_munch
|
6 |
import yaml
|
7 |
from hf_utils import load_custom_model_from_hf
|
|
|
8 |
|
9 |
# Load model and configuration
|
10 |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
@@ -81,6 +82,7 @@ from modules.audio import mel_spectrogram
|
|
81 |
|
82 |
to_mel = lambda x: mel_spectrogram(x, **mel_fn_args)
|
83 |
|
|
|
84 |
@torch.no_grad()
|
85 |
@torch.inference_mode()
|
86 |
def voice_conversion(source, target, diffusion_steps, length_adjust, inference_cfg_rate, n_quantizers):
|
@@ -192,4 +194,5 @@ if __name__ == "__main__":
|
|
192 |
outputs=outputs,
|
193 |
title="Seed Voice Conversion",
|
194 |
examples=examples,
|
|
|
195 |
).launch()
|
|
|
5 |
from modules.commons import build_model, load_checkpoint, recursive_munch
|
6 |
import yaml
|
7 |
from hf_utils import load_custom_model_from_hf
|
8 |
+
import spaces
|
9 |
|
10 |
# Load model and configuration
|
11 |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
|
|
82 |
|
83 |
to_mel = lambda x: mel_spectrogram(x, **mel_fn_args)
|
84 |
|
85 |
+
@spaces.GPU
|
86 |
@torch.no_grad()
|
87 |
@torch.inference_mode()
|
88 |
def voice_conversion(source, target, diffusion_steps, length_adjust, inference_cfg_rate, n_quantizers):
|
|
|
194 |
outputs=outputs,
|
195 |
title="Seed Voice Conversion",
|
196 |
examples=examples,
|
197 |
+
cache_examples=False,
|
198 |
).launch()
|