Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -21,7 +21,8 @@ def list_files_tree(directory, indent=""):
|
|
21 |
|
22 |
from huggingface_hub import snapshot_download
|
23 |
print("Models...")
|
24 |
-
models_id = """None1145/So-VITS-SVC-
|
|
|
25 |
None1145/So-VITS-SVC-Lappland-the-Decadenza"""
|
26 |
for model_id in models_id.split("\n"):
|
27 |
if model_id in ["", " "]:
|
@@ -73,10 +74,11 @@ def load(speaker):
|
|
73 |
model = Svc(models_info[sid]["model"], models_info[sid]["config"], cluster_model_path=models_info[sid]["cluster"], feature_retrieval=models_info[sid]["feature_retrieval"])
|
74 |
return "Model loaded successfully", sid
|
75 |
sid = speakers[0]
|
76 |
-
load(sid)
|
77 |
|
78 |
def vc_fn(input_audio, vc_transform, auto_f0,cluster_ratio, slice_db, noise_scale):
|
79 |
global sid
|
|
|
80 |
if input_audio is None:
|
81 |
return "You need to upload an audio", None
|
82 |
sampling_rate, audio = input_audio
|
@@ -100,7 +102,6 @@ with app:
|
|
100 |
speaker = gr.Dropdown(label="Speaker", choices=speakers, value=speakers[0])
|
101 |
model_submit = gr.Button("Load Model", variant="primary")
|
102 |
model_output1 = gr.Textbox(label="Output Message")
|
103 |
-
load(sid)
|
104 |
model_output2 = gr.Textbox(label="Speaker", value=sid)
|
105 |
vc_input3 = gr.Audio(label="Upload Audio")
|
106 |
vc_transform = gr.Number(label="Pitch Shift (integer, can be positive or negative, number of semitones, raising an octave is +12)", value=0)
|
|
|
21 |
|
22 |
from huggingface_hub import snapshot_download
|
23 |
print("Models...")
|
24 |
+
models_id = """None1145/So-VITS-SVC-Vulpisfoglia
|
25 |
+
None1145/So-VITS-SVC-Lappland
|
26 |
None1145/So-VITS-SVC-Lappland-the-Decadenza"""
|
27 |
for model_id in models_id.split("\n"):
|
28 |
if model_id in ["", " "]:
|
|
|
74 |
model = Svc(models_info[sid]["model"], models_info[sid]["config"], cluster_model_path=models_info[sid]["cluster"], feature_retrieval=models_info[sid]["feature_retrieval"])
|
75 |
return "Model loaded successfully", sid
|
76 |
sid = speakers[0]
|
77 |
+
# load(sid)
|
78 |
|
79 |
def vc_fn(input_audio, vc_transform, auto_f0,cluster_ratio, slice_db, noise_scale):
|
80 |
global sid
|
81 |
+
load(sid)
|
82 |
if input_audio is None:
|
83 |
return "You need to upload an audio", None
|
84 |
sampling_rate, audio = input_audio
|
|
|
102 |
speaker = gr.Dropdown(label="Speaker", choices=speakers, value=speakers[0])
|
103 |
model_submit = gr.Button("Load Model", variant="primary")
|
104 |
model_output1 = gr.Textbox(label="Output Message")
|
|
|
105 |
model_output2 = gr.Textbox(label="Speaker", value=sid)
|
106 |
vc_input3 = gr.Audio(label="Upload Audio")
|
107 |
vc_transform = gr.Number(label="Pitch Shift (integer, can be positive or negative, number of semitones, raising an octave is +12)", value=0)
|