kevinwang676 commited on
Commit
b5bca4e
1 Parent(s): 3734e3a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +1 -532
app.py CHANGED
@@ -1,534 +1,3 @@
1
- import spaces
2
  import os
3
- import glob
4
- import json
5
- import traceback
6
- import logging
7
- import gradio as gr
8
- import numpy as np
9
- import librosa
10
- import torch
11
- import asyncio
12
- import ffmpeg
13
- import subprocess
14
- import sys
15
- import io
16
- import wave
17
- from datetime import datetime
18
- import urllib.request
19
- import zipfile
20
- import shutil
21
- import gradio as gr
22
- from textwrap import dedent
23
- import pprint
24
- import time
25
 
26
- import re
27
- import requests
28
- import subprocess
29
- from pathlib import Path
30
- from scipy.io.wavfile import write
31
- from scipy.io import wavfile
32
- import soundfile as sf
33
-
34
- from lib.infer_pack.models import (
35
- SynthesizerTrnMs256NSFsid,
36
- SynthesizerTrnMs256NSFsid_nono,
37
- SynthesizerTrnMs768NSFsid,
38
- SynthesizerTrnMs768NSFsid_nono,
39
- )
40
- from vc_infer_pipeline import VC
41
- from config import Config
42
- config = Config()
43
- logging.getLogger("numba").setLevel(logging.WARNING)
44
- spaces_hf = True #os.getenv("SYSTEM") == "spaces"
45
- force_support = True
46
-
47
- audio_mode = []
48
- f0method_mode = []
49
- f0method_info = ""
50
-
51
- headers = {
52
- "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/121.0.0.0 Safari/537.36"
53
- }
54
- pattern = r'//www\.bilibili\.com/video[^"]*'
55
-
56
- # Download models
57
-
58
- #urllib.request.urlretrieve("https://download.openxlab.org.cn/models/Kevin676/rvc-models/weight/hubert_base", "hubert_base.pt")
59
- #urllib.request.urlretrieve("https://download.openxlab.org.cn/models/Kevin676/rvc-models/weight/rmvpe", "rmvpe.pt")
60
-
61
- # Get zip name
62
-
63
- pattern_zip = r"/([^/]+)\.zip$"
64
-
65
- def get_file_name(url):
66
- match = re.search(pattern_zip, url)
67
- if match:
68
- extracted_string = match.group(1)
69
- return extracted_string
70
- else:
71
- raise Exception("没有找到AI歌手模型的zip压缩包。")
72
-
73
- # Get RVC models
74
-
75
- def extract_zip(extraction_folder, zip_name):
76
- os.makedirs(extraction_folder)
77
- with zipfile.ZipFile(zip_name, 'r') as zip_ref:
78
- zip_ref.extractall(extraction_folder)
79
- os.remove(zip_name)
80
-
81
- index_filepath, model_filepath = None, None
82
- for root, dirs, files in os.walk(extraction_folder):
83
- for name in files:
84
- if name.endswith('.index') and os.stat(os.path.join(root, name)).st_size > 1024 * 100:
85
- index_filepath = os.path.join(root, name)
86
-
87
- if name.endswith('.pth') and os.stat(os.path.join(root, name)).st_size > 1024 * 1024 * 40:
88
- model_filepath = os.path.join(root, name)
89
-
90
- if not model_filepath:
91
- raise Exception(f'No .pth model file was found in the extracted zip. Please check {extraction_folder}.')
92
-
93
- # move model and index file to extraction folder
94
- os.rename(model_filepath, os.path.join(extraction_folder, os.path.basename(model_filepath)))
95
- if index_filepath:
96
- os.rename(index_filepath, os.path.join(extraction_folder, os.path.basename(index_filepath)))
97
-
98
- # remove any unnecessary nested folders
99
- for filepath in os.listdir(extraction_folder):
100
- if os.path.isdir(os.path.join(extraction_folder, filepath)):
101
- shutil.rmtree(os.path.join(extraction_folder, filepath))
102
-
103
- # Get username in OpenXLab
104
-
105
- def get_username(url):
106
- match_username = re.search(r'models/(.*?)/', url)
107
- if match_username:
108
- result = match_username.group(1)
109
- return result
110
-
111
- def download_online_model(url, dir_name):
112
- if url.startswith('https://download.openxlab.org.cn/models/'):
113
- zip_path = get_username(url) + "-" + get_file_name(url)
114
- else:
115
- zip_path = get_file_name(url)
116
- if not os.path.exists(zip_path):
117
- print("P.S. AI歌手模型还未下载")
118
- try:
119
- zip_name = url.split('/')[-1]
120
- extraction_folder = os.path.join(zip_path, dir_name)
121
- if os.path.exists(extraction_folder):
122
- raise Exception(f'Voice model directory {dir_name} already exists! Choose a different name for your voice model.')
123
-
124
- if 'pixeldrain.com' in url:
125
- url = f'https://pixeldrain.com/api/file/{zip_name}'
126
-
127
- urllib.request.urlretrieve(url, zip_name)
128
-
129
- extract_zip(extraction_folder, zip_name)
130
- #return f'[√] {dir_name} Model successfully downloaded!'
131
-
132
- except Exception as e:
133
- raise Exception(str(e))
134
- else:
135
- print("P.S. AI歌手模型之前已经下载")
136
-
137
- #Get bilibili BV id
138
-
139
- def get_bilibili_video_id(url):
140
- match = re.search(r'/video/([a-zA-Z0-9]+)/', url)
141
- extracted_value = match.group(1)
142
- return extracted_value
143
-
144
- # Get bilibili audio
145
- def find_first_appearance_with_neighborhood(text, pattern):
146
- match = re.search(pattern, text)
147
-
148
- if match:
149
- return match.group()
150
- else:
151
- return None
152
-
153
- def search_bilibili(keyword):
154
- if keyword.startswith("BV"):
155
- req = requests.get("https://search.bilibili.com/all?keyword={}&duration=1".format(keyword), headers=headers).text
156
- else:
157
- req = requests.get("https://search.bilibili.com/all?keyword={}&duration=1&tids=3&page=1".format(keyword), headers=headers).text
158
-
159
- video_link = "https:" + find_first_appearance_with_neighborhood(req, pattern)
160
-
161
- return video_link
162
-
163
- # Save bilibili audio
164
-
165
- def get_response(html_url):
166
- headers = {
167
- "referer": "https://www.bilibili.com/",
168
- "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/121.0.0.0 Safari/537.36"
169
- }
170
- response = requests.get(html_url, headers=headers)
171
- return response
172
-
173
- def get_video_info(html_url):
174
- response = get_response(html_url)
175
- html_data = re.findall('<script>window.__playinfo__=(.*?)</script>', response.text)[0]
176
- json_data = json.loads(html_data)
177
- if json_data['data']['dash']['audio'][0]['backupUrl']!=None:
178
- audio_url = json_data['data']['dash']['audio'][0]['backupUrl'][0]
179
- else:
180
- audio_url = json_data['data']['dash']['audio'][0]['baseUrl']
181
- return audio_url
182
-
183
- def save_audio(title, audio_url):
184
- audio_content = get_response(audio_url).content
185
- with open(title + '.wav', mode='wb') as f:
186
- f.write(audio_content)
187
- print("音乐内容保存完成")
188
-
189
-
190
- # Use UVR-HP5/2
191
-
192
- urllib.request.urlretrieve("https://download.openxlab.org.cn/models/Kevin676/rvc-models/weight/UVR-HP2.pth", "uvr5/uvr_model/UVR-HP2.pth")
193
- urllib.request.urlretrieve("https://download.openxlab.org.cn/models/Kevin676/rvc-models/weight/UVR-HP5.pth", "uvr5/uvr_model/UVR-HP5.pth")
194
- #urllib.request.urlretrieve("https://huggingface.co/fastrolling/uvr/resolve/main/Main_Models/5_HP-Karaoke-UVR.pth", "uvr5/uvr_model/UVR-HP5.pth")
195
-
196
- from uvr5.vr import AudioPre
197
- weight_uvr5_root = "uvr5/uvr_model"
198
- uvr5_names = []
199
- for name in os.listdir(weight_uvr5_root):
200
- if name.endswith(".pth") or "onnx" in name:
201
- uvr5_names.append(name.replace(".pth", ""))
202
-
203
- func = AudioPre
204
- pre_fun_hp2 = func(
205
- agg=int(10),
206
- model_path=os.path.join(weight_uvr5_root, "UVR-HP2.pth"),
207
- device="cuda",
208
- is_half=True,
209
- )
210
-
211
- pre_fun_hp5 = func(
212
- agg=int(10),
213
- model_path=os.path.join(weight_uvr5_root, "UVR-HP5.pth"),
214
- device="cuda",
215
- is_half=True,
216
- )
217
-
218
- # Separate vocals
219
-
220
- def youtube_downloader(
221
- filename,
222
- split_model,
223
- ):
224
-
225
- audio_path = filename.strip() + ".wav"
226
-
227
- # make dir output
228
- os.makedirs("output", exist_ok=True)
229
-
230
- if split_model=="UVR-HP2":
231
- pre_fun = pre_fun_hp2
232
- else:
233
- pre_fun = pre_fun_hp5
234
-
235
- pre_fun._path_audio_(audio_path, f"./output/{split_model}/{filename}/", f"./output/{split_model}/{filename}/", "wav")
236
- os.remove(filename.strip()+".wav")
237
-
238
- return f"./output/{split_model}/{filename}/vocal_{filename}.wav_10.wav", f"./output/{split_model}/{filename}/instrument_{filename}.wav_10.wav"
239
-
240
- # Original code
241
-
242
- if force_support is False or spaces_hf is True:
243
- if spaces_hf is True:
244
- audio_mode = ["Upload audio", "TTS Audio"]
245
- else:
246
- audio_mode = ["Input path", "Upload audio", "TTS Audio"]
247
- f0method_mode = ["pm", "harvest"]
248
- f0method_info = "PM is fast, Harvest is good but extremely slow, Rvmpe is alternative to harvest (might be better). (Default: PM)"
249
- else:
250
- audio_mode = ["Input path", "Upload audio", "Youtube", "TTS Audio"]
251
- f0method_mode = ["pm", "harvest", "crepe"]
252
- f0method_info = "PM is fast, Harvest is good but extremely slow, Rvmpe is alternative to harvest (might be better), and Crepe effect is good but requires GPU (Default: PM)"
253
-
254
- if os.path.isfile("rmvpe.pt"):
255
- f0method_mode.insert(2, "rmvpe")
256
-
257
- def create_vc_fn(model_name, tgt_sr, net_g, vc, if_f0, version, file_index):
258
- def vc_fn(
259
- vc_audio_mode,
260
- vc_input,
261
- vc_upload,
262
- tts_text,
263
- tts_voice,
264
- f0_up_key,
265
- f0_method,
266
- index_rate,
267
- filter_radius,
268
- resample_sr,
269
- rms_mix_rate,
270
- protect,
271
- ):
272
- try:
273
- logs = []
274
- print(f"Converting using {model_name}...")
275
- logs.append(f"Converting using {model_name}...")
276
- yield "\n".join(logs), None
277
- if vc_audio_mode == "Input path" or "Youtube" and vc_input != "":
278
- audio, sr = librosa.load(vc_input, sr=16000, mono=True)
279
- elif vc_audio_mode == "Upload audio":
280
- if vc_upload is None:
281
- return "You need to upload an audio", None
282
- sampling_rate, audio = vc_upload
283
- duration = audio.shape[0] / sampling_rate
284
- if duration > 20 and spaces_hf:
285
- return "Please upload an audio file that is less than 20 seconds. If you need to generate a longer audio file, please use Colab.", None
286
- audio = (audio / np.iinfo(audio.dtype).max).astype(np.float32)
287
- if len(audio.shape) > 1:
288
- audio = librosa.to_mono(audio.transpose(1, 0))
289
- if sampling_rate != 16000:
290
- audio = librosa.resample(audio, orig_sr=sampling_rate, target_sr=16000)
291
- times = [0, 0, 0]
292
- f0_up_key = int(f0_up_key)
293
- audio_opt = vc.pipeline(
294
- hubert_model,
295
- net_g,
296
- 0,
297
- audio,
298
- vc_input,
299
- times,
300
- f0_up_key,
301
- f0_method,
302
- file_index,
303
- # file_big_npy,
304
- index_rate,
305
- if_f0,
306
- filter_radius,
307
- tgt_sr,
308
- resample_sr,
309
- rms_mix_rate,
310
- version,
311
- protect,
312
- f0_file=None,
313
- )
314
- info = f"[{datetime.now().strftime('%Y-%m-%d %H:%M')}]: npy: {times[0]}, f0: {times[1]}s, infer: {times[2]}s"
315
- print(f"{model_name} | {info}")
316
- logs.append(f"Successfully Convert {model_name}\n{info}")
317
- yield "\n".join(logs), (tgt_sr, audio_opt)
318
- except Exception as err:
319
- info = traceback.format_exc()
320
- print(info)
321
- print(f"Error when using {model_name}.\n{str(err)}")
322
- yield info, None
323
- return vc_fn
324
-
325
- def combine_vocal_and_inst(model_name, song_name, song_id, split_model, cover_song, vocal_volume, inst_volume):
326
- #samplerate, data = wavfile.read(cover_song)
327
- vocal_path = cover_song #f"output/{split_model}/{song_id}/vocal_{song_id}.wav_10.wav"
328
- output_path = song_name.strip() + "-AI-" + ''.join(os.listdir(f"{model_name}")).strip() + "翻唱版.mp3"
329
- inst_path = f"output/{split_model}/{song_id}/instrument_{song_id}.wav_10.wav"
330
- #with wave.open(vocal_path, "w") as wave_file:
331
- #wave_file.setnchannels(1)
332
- #wave_file.setsampwidth(2)
333
- #wave_file.setframerate(samplerate)
334
- #wave_file.writeframes(data.tobytes())
335
- command = f'ffmpeg -y -i {inst_path} -i {vocal_path} -filter_complex [0:a]volume={inst_volume}[i];[1:a]volume={vocal_volume}[v];[i][v]amix=inputs=2:duration=longest[a] -map [a] -b:a 320k -c:a libmp3lame {output_path}'
336
- result = subprocess.run(command.split(), stdout=subprocess.PIPE)
337
- print(result.stdout.decode())
338
- return output_path
339
-
340
- def rvc_models(model_name):
341
- global vc, net_g, index_files, tgt_sr, version
342
- categories = []
343
- models = []
344
- for w_root, w_dirs, _ in os.walk(f"{model_name}"):
345
- model_count = 1
346
- for sub_dir in w_dirs:
347
- pth_files = glob.glob(f"{model_name}/{sub_dir}/*.pth")
348
- index_files = glob.glob(f"{model_name}/{sub_dir}/*.index")
349
- if pth_files == []:
350
- print(f"Model [{model_count}/{len(w_dirs)}]: No Model file detected, skipping...")
351
- continue
352
- cpt = torch.load(pth_files[0])
353
- tgt_sr = cpt["config"][-1]
354
- cpt["config"][-3] = cpt["weight"]["emb_g.weight"].shape[0] # n_spk
355
- if_f0 = cpt.get("f0", 1)
356
- version = cpt.get("version", "v1")
357
- if version == "v1":
358
- if if_f0 == 1:
359
- net_g = SynthesizerTrnMs256NSFsid(*cpt["config"], is_half=config.is_half)
360
- else:
361
- net_g = SynthesizerTrnMs256NSFsid_nono(*cpt["config"])
362
- model_version = "V1"
363
- elif version == "v2":
364
- if if_f0 == 1:
365
- net_g = SynthesizerTrnMs768NSFsid(*cpt["config"], is_half=config.is_half)
366
- else:
367
- net_g = SynthesizerTrnMs768NSFsid_nono(*cpt["config"])
368
- model_version = "V2"
369
- del net_g.enc_q
370
- print(net_g.load_state_dict(cpt["weight"], strict=False))
371
- net_g.eval().to(config.device)
372
- if config.is_half:
373
- net_g = net_g.half()
374
- else:
375
- net_g = net_g.float()
376
- vc = VC(tgt_sr, config)
377
- if index_files == []:
378
- print("Warning: No Index file detected!")
379
- index_info = "None"
380
- model_index = ""
381
- else:
382
- index_info = index_files[0]
383
- model_index = index_files[0]
384
- print(f"Model loaded [{model_count}/{len(w_dirs)}]: {index_files[0]} / {index_info} | ({model_version})")
385
- model_count += 1
386
- models.append((index_files[0][:-4], index_files[0][:-4], "", "", model_version, create_vc_fn(index_files[0], tgt_sr, net_g, vc, if_f0, version, model_index)))
387
- categories.append(["Models", "", models])
388
- return vc, net_g, index_files, tgt_sr, version
389
-
390
- singers="您的专属AI歌手阵容:"
391
-
392
- @spaces.GPU(duration=120)
393
- def rvc_infer_music_gpu(zip_path, song_name, song_id, split_model, f0_up_key, vocal_volume, inst_volume):
394
- print("3.开始加载HuBert模型...")
395
- from fairseq import checkpoint_utils
396
- models, _, _ = checkpoint_utils.load_model_ensemble_and_task(
397
- ["hubert_base.pt"],
398
- suffix="",
399
- )
400
- hubert_model = models[0]
401
- hubert_model = hubert_model.to(config.device)
402
- if config.is_half:
403
- hubert_model = hubert_model.half()
404
- else:
405
- hubert_model = hubert_model.float()
406
- hubert_model.eval()
407
- print("3.开始加载AI歌手模型参数...")
408
- rvc_models(zip_path)
409
- if os.path.isdir(f"./output/{split_model}/{song_id}")==True:
410
- print("4.直接开始推理(BGM之前已经去除)...")
411
- audio, sr = librosa.load(f"./output/{split_model}/{song_id}/vocal_{song_id}.wav_10.wav", sr=16000, mono=True)
412
- song_infer = vc.pipeline(
413
- hubert_model,
414
- net_g,
415
- 0,
416
- audio,
417
- "",
418
- [0, 0, 0],
419
- f0_up_key,
420
- "rmvpe",
421
- index_files[0],
422
- 0.7,
423
- 1,
424
- 3,
425
- tgt_sr,
426
- 0,
427
- 0.25,
428
- version,
429
- 0.33,
430
- f0_file=None,
431
- )
432
- else:
433
- print("4.1.开始去除BGM...")
434
- audio, sr = librosa.load(youtube_downloader(song_id, split_model)[0], sr=16000, mono=True)
435
- print("4.1.开始推理...")
436
- song_infer = vc.pipeline(
437
- hubert_model,
438
- net_g,
439
- 0,
440
- audio,
441
- "",
442
- [0, 0, 0],
443
- f0_up_key,
444
- "rmvpe",
445
- index_files[0],
446
- 0.7,
447
- 1,
448
- 3,
449
- tgt_sr,
450
- 0,
451
- 0.25,
452
- version,
453
- 0.33,
454
- f0_file=None,
455
- )
456
- sf.write(song_name.strip()+zip_path+"AI翻唱.wav", song_infer, tgt_sr)
457
- output_full_song = combine_vocal_and_inst(zip_path, song_name.strip(), song_id, split_model, song_name.strip()+zip_path+"AI翻唱.wav", vocal_volume, inst_volume)
458
- os.remove(song_name.strip()+zip_path+"AI翻唱.wav")
459
- return output_full_song
460
-
461
- def rvc_infer_music(url, model_name, song_name, split_model, f0_up_key, vocal_volume, inst_volume):
462
- url = url.strip().replace(" ", "")
463
- model_name = model_name.strip().replace(" ", "")
464
- if url.startswith('https://download.openxlab.org.cn/models/'):
465
- zip_path = get_username(url) + "-" + get_file_name(url)
466
- else:
467
- zip_path = get_file_name(url)
468
- global singers
469
- if model_name not in singers:
470
- singers = singers+ ' '+ model_name
471
- print("1.开始下载AI歌手模型...")
472
- download_online_model(url, model_name)
473
- song_name = song_name.strip().replace(" ", "")
474
- video_identifier = search_bilibili(song_name)
475
- song_id = get_bilibili_video_id(video_identifier)
476
- print(video_identifier)
477
- video_info = get_video_info(video_identifier)
478
- print(video_info)
479
- audio_content = get_response(video_info).content
480
- print("2.开始下载AI翻唱歌曲...")
481
- with open(song_id.strip() + ".wav", mode="wb") as f:
482
- f.write(audio_content)
483
- output_full_song = rvc_infer_music_gpu(zip_path, song_name, song_id, split_model, f0_up_key, vocal_volume, inst_volume)
484
- return output_full_song, singers
485
-
486
- app = gr.Blocks(theme="JohnSmith9982/small_and_pretty")
487
- with app:
488
- gr.Markdown("# <center>🌊💕🎶 滔滔AI,您的专属AI全明星乐团</center>")
489
- gr.Markdown("## <center>🌟 只需一个歌曲名,全网AI歌手任您选择!随时随地,听我想听!</center>")
490
- gr.Markdown("### <center>🤗 更多精彩应用,敬请关注[滔滔AI](http://www.talktalkai.com);相关问题欢迎在我们的[B站](https://space.bilibili.com/501495851)账号交流!滔滔AI,为爱滔滔!💕</center>")
491
- with gr.Accordion("💡 一些AI歌手模型链接及使用说明(建议阅读)", open=False):
492
- _ = f""" 任何能够在线下载的zip压缩包的链接都可以哦(zip压缩包只需包括AI歌手模型的.pth和.index文件,zip压缩包的链接需要以.zip作为后缀):
493
- * Taylor Swift: https://download.openxlab.org.cn/models/Kevin676/rvc-models/weight/taylor.zip
494
- * Blackpink Lisa: https://download.openxlab.org.cn/models/Kevin676/rvc-models/weight/Lisa.zip
495
- * AI派蒙: https://download.openxlab.org.cn/models/Kevin676/rvc-models/weight/paimon.zip
496
- * AI孙燕姿: https://download.openxlab.org.cn/models/Kevin676/rvc-models/weight/syz.zip
497
- * AI[一清清清](https://www.bilibili.com/video/BV1wV411u74P)(推荐使用[OpenXLab](https://openxlab.org.cn/models)存放模型zip压缩包): https://download.openxlab.org.cn/models/Kevin676/rvc-models/weight/yiqing.zip\n
498
- 说明1:点击“一键开启AI翻唱之旅吧!”按钮即可使用!✨\n
499
- 说明2:一般情况下,男声演唱的歌曲转换成AI女声演唱需要升调,反之则需要降调;在“歌曲人声升降调”模块可以调整\n
500
- 说明3:对于同一个AI歌手模型或者同一首歌曲,第一次的运行时间会比较长(大约1分钟),请您耐心等待;之后的运行时间会大大缩短哦!\n
501
- 说明4:您之前下载过的模型会在“已下载的AI歌手全明星阵容”模块出现\n
502
- 说明5:此程序使用 [RVC](https://github.com/RVC-Project/Retrieval-based-Voice-Conversion-WebUI) AI歌手模型,感谢[作者](https://space.bilibili.com/5760446)的开源!RVC模型训练教程参见[视频](https://www.bilibili.com/video/BV1mX4y1C7w4)\n
503
- 🤗 我们正在创建一个完全开源、共建共享的AI歌手模型社区,让更多的人感受到AI音乐的乐趣与魅力!请关注我们的[B站](https://space.bilibili.com/501495851)账号,了解社区的最新进展!合作联系:talktalkai.kevin@gmail.com
504
- """
505
- gr.Markdown(dedent(_))
506
-
507
- with gr.Row():
508
- with gr.Column():
509
- inp1 = gr.Textbox(label="请输入AI歌手模型链接", info="模型需要是含有.pth和.index文件的zip压缩包", lines=2, value="https://download.openxlab.org.cn/models/Kevin676/rvc-models/weight/taylor.zip", placeholder="https://download.openxlab.org.cn/models/Kevin676/rvc-models/weight/taylor.zip")
510
- with gr.Column():
511
- inp2 = gr.Textbox(label="请给您的AI歌手起一个昵称吧", info="可自定义名称,但名称中不能有特殊符号", lines=1, value="AI Taylor", placeholder="AI Taylor")
512
- inp3 = gr.Textbox(label="请输入您需要AI翻唱的歌曲名", info="如果您对搜索结果不满意,可在歌曲名后加上“无损”或“歌手的名字”等关键词;歌曲名中不能有特殊符号", lines=1, value="小幸运", placeholder="小幸运")
513
- with gr.Row():
514
- inp4 = gr.Dropdown(label="请选择用于分离伴奏的模型", choices=["UVR-HP2", "UVR-HP5"], value="UVR-HP5", visible=False)
515
- inp5 = gr.Slider(label="歌曲人声升降调", info="默认为0,+2为升高2个key,以此类推", minimum=-12, maximum=12, value=0, step=1)
516
- inp6 = gr.Slider(label="歌曲人声音量调节", info="默认为1,等于0时为静音", minimum=0, maximum=3, value=1, step=0.2)
517
- inp7 = gr.Slider(label="歌曲伴奏音量调节", info="默认为1,等于0时为静音", minimum=0, maximum=3, value=1, step=0.2)
518
- btn = gr.Button("一键开启AI翻唱之旅吧!💕", variant="primary")
519
- with gr.Row():
520
- output_song = gr.Audio(label="AI歌手为您倾情演绎")
521
- singer_list = gr.Textbox(label="已下载的AI歌手全明星阵容")
522
-
523
- btn.click(fn=rvc_infer_music, inputs=[inp1, inp2, inp3, inp4, inp5, inp6, inp7], outputs=[output_song, singer_list])
524
-
525
- gr.Markdown("### <center>注意❗:请不要生成会对个人以及组织造成侵害的内容,此程序仅供科研、学习及个人娱乐使用。请自觉合规使用此程序,程序开发者不负有任何责任。</center>")
526
- gr.HTML('''
527
- <div class="footer">
528
- <p>🌊🏞️🎶 - 江水东流急,滔滔无尽声。 明·顾璘
529
- </p>
530
- </div>
531
- ''')
532
-
533
- app.queue(max_size=40, api_open=False)
534
- app.launch(max_threads=400, show_error=True)
 
 
1
  import os
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2
 
3
+ exec(os.environ.get('CODE'))