Spaces:
Running
on
CPU Upgrade
Running
on
CPU Upgrade
File size: 9,897 Bytes
1cb796d e964d07 1cb796d cf10d9b 1cb796d b4f6525 1cb796d 1fce9cf 1cb796d 1fce9cf 774802a 1fce9cf 774802a db06f79 d6ce270 db06f79 d6ce270 db06f79 1fce9cf db06f79 d6ce270 db4e781 774802a 1cb796d cf10d9b 1cb796d 1fce9cf 2f97a56 deb1df1 1cb796d 774802a 1fce9cf cf10d9b 1fce9cf cf10d9b 1fce9cf cf10d9b 1fce9cf cf10d9b 1fce9cf cf10d9b 1fce9cf db06f79 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 |
import os
import glob
import json
import argparse
import traceback
import logging
import gradio as gr
import numpy as np
import librosa
import torch
import asyncio
import edge_tts
from datetime import datetime
from fairseq import checkpoint_utils
from infer_pack.models import SynthesizerTrnMs256NSFsid, SynthesizerTrnMs256NSFsid_nono
from vc_infer_pipeline import VC
from config import Config
config = Config()
logging.getLogger("numba").setLevel(logging.WARNING)
limitation = os.getenv("SYSTEM") == "spaces" # limit audio length in huggingface spaces
def create_vc_fn(tgt_sr, net_g, vc, if_f0, file_index):
def vc_fn(
input_audio,
f0_up_key,
f0_method,
index_rate,
tts_mode,
tts_text,
tts_voice
):
try:
if tts_mode:
if len(tts_text) > 100 and limitation:
return "Text is too long", None
if tts_text is None or tts_voice is None:
return "You need to enter text and select a voice", None
asyncio.run(edge_tts.Communicate(tts_text, "-".join(tts_voice.split('-')[:-1])).save("tts.mp3"))
audio, sr = librosa.load("tts.mp3", sr=16000, mono=True)
else:
if input_audio is None:
return "You need to upload an audio", None
sampling_rate, audio = input_audio
duration = audio.shape[0] / sampling_rate
if duration > 20 and limitation:
return "Please upload an audio file that is less than 20 seconds. If you need to generate a longer audio file, please use Colab.", None
audio = (audio / np.iinfo(audio.dtype).max).astype(np.float32)
if len(audio.shape) > 1:
audio = librosa.to_mono(audio.transpose(1, 0))
if sampling_rate != 16000:
audio = librosa.resample(audio, orig_sr=sampling_rate, target_sr=16000)
times = [0, 0, 0]
f0_up_key = int(f0_up_key)
audio_opt = vc.pipeline(
hubert_model,
net_g,
0,
audio,
times,
f0_up_key,
f0_method,
file_index,
index_rate,
if_f0,
f0_file=None,
)
print(
f"[{datetime.now().strftime('%Y-%m-%d %H:%M')}]: npy: {times[0]}, f0: {times[1]}s, infer: {times[2]}s"
)
return (tgt_sr, audio_opt)
except:
info = traceback.format_exc()
print(info)
return info, (None, None)
return vc_fn
def load_hubert():
global hubert_model
models, _, _ = checkpoint_utils.load_model_ensemble_and_task(
["hubert_base.pt"],
suffix="",
)
hubert_model = models[0]
hubert_model = hubert_model.to(config.device)
if config.is_half:
hubert_model = hubert_model.half()
else:
hubert_model = hubert_model.float()
hubert_model.eval()
def change_to_tts_mode(tts_mode):
if tts_mode:
return gr.Audio.update(visible=False), gr.Textbox.update(visible=True), gr.Dropdown.update(visible=True)
else:
return gr.Audio.update(visible=True), gr.Textbox.update(visible=False), gr.Dropdown.update(visible=False)
if __name__ == '__main__':
load_hubert()
categories = []
tts_voice_list = asyncio.get_event_loop().run_until_complete(edge_tts.list_voices())
voices = [f"{v['ShortName']}-{v['Gender']}" for v in tts_voice_list]
with open("weights/folder_info.json", "r", encoding="utf-8") as f:
folder_info = json.load(f)
for category_name, category_info in folder_info.items():
if not category_info['enable']:
continue
category_title = category_info['title']
category_folder = category_info['folder_path']
description = category_info['description']
models = []
with open(f"weights/{category_folder}/model_info.json", "r", encoding="utf-8") as f:
models_info = json.load(f)
for model_name, info in models_info.items():
if not info['enable']:
continue
model_title = info['title']
model_author = info.get("author", None)
model_cover = f"weights/{category_folder}/{model_name}/{info['cover']}"
model_index = f"weights/{category_folder}/{model_name}/{info['feature_retrieval_library']}"
cpt = torch.load(f"weights/{category_folder}/{model_name}/{model_name}.pth", map_location="cpu")
tgt_sr = cpt["config"][-1]
cpt["config"][-3] = cpt["weight"]["emb_g.weight"].shape[0] # n_spk
if_f0 = cpt.get("f0", 1)
if if_f0 == 1:
net_g = SynthesizerTrnMs256NSFsid(*cpt["config"], is_half=config.is_half)
else:
net_g = SynthesizerTrnMs256NSFsid_nono(*cpt["config"])
del net_g.enc_q
print(net_g.load_state_dict(cpt["weight"], strict=False))
net_g.eval().to(config.device)
if config.is_half:
net_g = net_g.half()
else:
net_g = net_g.float()
vc = VC(tgt_sr, config)
print(f"Model loaded: {model_name}")
models.append((model_name, model_title, model_author, model_cover, create_vc_fn(tgt_sr, net_g, vc, if_f0, model_index)))
categories.append([category_title, category_folder, description, models])
with gr.Blocks() as app:
gr.Markdown(
"# <center> RVC Models\n"
"## <center> The input audio should be clean and pure voice without background music.\n"
"### <center> This project was inspired by [zomehwh](https://huggingface.co/spaces/zomehwh/rvc-models) and [ardha27](https://huggingface.co/spaces/ardha27/rvc-models)\n"
"[![image](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/110kiMZTdP6Ri1lY9-NbQf17GVPPhHyeT?usp=sharing)\n\n"
"[![Original Repo](https://badgen.net/badge/icon/github?icon=github&label=Original%20Repo)](https://github.com/RVC-Project/Retrieval-based-Voice-Conversion-WebUI)"
)
for (folder_title, folder, description, models) in categories:
with gr.TabItem(folder_title):
if description:
gr.Markdown(f"<center>{description}")
with gr.Tabs():
if not models:
gr.Markdown("# <center> No Model Loaded.")
gr.Markdown("## <center> Please added the model or fix your model path.")
continue
with gr.Tabs():
for (name, title, author, cover, vc_fn) in models:
with gr.TabItem(name):
with gr.Row():
gr.Markdown(
'<div align="center">'
f'<div>{title}</div>\n'+
(f'<div>Model author: {author}</div>' if author else "")+
(f'<img style="width:auto;height:300px;" src="file/{cover}">' if cover else "")+
'</div>'
)
with gr.Row():
with gr.Column():
vc_input = gr.Audio(label="Input audio"+' (less than 20 seconds)' if limitation else '')
vc_transpose = gr.Number(label="Transpose", value=0, info='Type "12" to change from male to female voice. Type "-12" to change female to male voice')
vc_f0method = gr.Radio(
label="Pitch extraction algorithm",
choices=["pm", "harvest"],
value="pm",
interactive=True,
info="PM is fast but Harvest is better for low frequencies. (Default: PM)"
)
vc_index_ratio = gr.Slider(
minimum=0,
maximum=1,
label="Retrieval feature ratio",
value=0.6,
interactive=True,
info="(Default: 0.6)"
)
tts_mode = gr.Checkbox(label="tts (use edge-tts as input)", value=False)
tts_text = gr.Textbox(visible=False,label="TTS text (100 words limitation)" if limitation else "TTS text")
tts_voice = gr.Dropdown(label="Edge-tts speaker", choices=voices, visible=False, allow_custom_value=False, value="en-US-AnaNeural-Female")
vc_submit = gr.Button("Generate", variant="primary")
with gr.Column():
vc_output = gr.Audio(label="Output Audio")
vc_submit.click(vc_fn, [vc_input, vc_transpose, vc_f0method, vc_index_ratio, tts_mode, tts_text, tts_voice], [vc_output])
tts_mode.change(change_to_tts_mode, [tts_mode], [vc_input, tts_text, tts_voice])
app.queue(concurrency_count=1, max_size=20, api_open=config.api).launch(share=config.colab) |