kevinwang676 commited on
Commit
ebe0ac1
1 Parent(s): 0439d1e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +1 -573
app.py CHANGED
@@ -1,574 +1,2 @@
1
- import spaces
2
-
3
  import os
4
- #os.system("pip install torch==2.0.1 torchvision==0.15.2 torchaudio==2.0.2 --index-url https://download.pytorch.org/whl/cu117")
5
- import glob
6
- import json
7
- import traceback
8
- import logging
9
- import gradio as gr
10
- import numpy as np
11
- import librosa
12
- import torch
13
- import asyncio
14
- import ffmpeg
15
- import subprocess
16
- import sys
17
- import io
18
- import wave
19
- from datetime import datetime
20
- #from fairseq import checkpoint_utils
21
- import urllib.request
22
- import zipfile
23
- import shutil
24
- import gradio as gr
25
- from textwrap import dedent
26
- import pprint
27
- import time
28
-
29
- import re
30
- import requests
31
- import subprocess
32
- from pathlib import Path
33
- from scipy.io.wavfile import write
34
- from scipy.io import wavfile
35
- import soundfile as sf
36
-
37
- from lib.infer_pack.models import (
38
- SynthesizerTrnMs256NSFsid,
39
- SynthesizerTrnMs256NSFsid_nono,
40
- SynthesizerTrnMs768NSFsid,
41
- SynthesizerTrnMs768NSFsid_nono,
42
- )
43
- from vc_infer_pipeline import VC
44
- from config import Config
45
- config = Config()
46
- logging.getLogger("numba").setLevel(logging.WARNING)
47
- spaces_hf = True #os.getenv("SYSTEM") == "spaces"
48
- force_support = True
49
-
50
- audio_mode = []
51
- f0method_mode = []
52
- f0method_info = ""
53
-
54
- headers = {
55
- "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/121.0.0.0 Safari/537.36"
56
- }
57
- pattern = r'//www\.bilibili\.com/video[^"]*'
58
-
59
- # Download models
60
-
61
- #urllib.request.urlretrieve("https://download.openxlab.org.cn/models/Kevin676/rvc-models/weight/hubert_base", "hubert_base.pt")
62
- urllib.request.urlretrieve("https://download.openxlab.org.cn/models/Kevin676/rvc-models/weight/rmvpe", "rmvpe.pt")
63
-
64
- # Get zip name
65
-
66
- pattern_zip = r"/([^/]+)\.zip$"
67
-
68
- def get_file_name(url):
69
- match = re.search(pattern_zip, url)
70
- if match:
71
- extracted_string = match.group(1)
72
- return extracted_string
73
- else:
74
- raise Exception("没有找到AI歌手模型的zip压缩包。")
75
-
76
- # Get RVC models
77
-
78
- def extract_zip(extraction_folder, zip_name):
79
- os.makedirs(extraction_folder)
80
- with zipfile.ZipFile(zip_name, 'r') as zip_ref:
81
- zip_ref.extractall(extraction_folder)
82
- os.remove(zip_name)
83
-
84
- index_filepath, model_filepath = None, None
85
- for root, dirs, files in os.walk(extraction_folder):
86
- for name in files:
87
- if name.endswith('.index') and os.stat(os.path.join(root, name)).st_size > 1024 * 100:
88
- index_filepath = os.path.join(root, name)
89
-
90
- if name.endswith('.pth') and os.stat(os.path.join(root, name)).st_size > 1024 * 1024 * 40:
91
- model_filepath = os.path.join(root, name)
92
-
93
- if not model_filepath:
94
- raise Exception(f'No .pth model file was found in the extracted zip. Please check {extraction_folder}.')
95
-
96
- # move model and index file to extraction folder
97
- os.rename(model_filepath, os.path.join(extraction_folder, os.path.basename(model_filepath)))
98
- if index_filepath:
99
- os.rename(index_filepath, os.path.join(extraction_folder, os.path.basename(index_filepath)))
100
-
101
- # remove any unnecessary nested folders
102
- for filepath in os.listdir(extraction_folder):
103
- if os.path.isdir(os.path.join(extraction_folder, filepath)):
104
- shutil.rmtree(os.path.join(extraction_folder, filepath))
105
-
106
- # Get username in OpenXLab
107
-
108
- def get_username(url):
109
- match_username = re.search(r'models/(.*?)/', url)
110
- if match_username:
111
- result = match_username.group(1)
112
- return result
113
-
114
- def download_online_model(url, dir_name):
115
- if url.startswith('https://download.openxlab.org.cn/models/'):
116
- zip_path = get_username(url) + "-" + get_file_name(url)
117
- else:
118
- zip_path = get_file_name(url)
119
- if not os.path.exists(zip_path):
120
- try:
121
- zip_name = url.split('/')[-1]
122
- extraction_folder = os.path.join(zip_path, dir_name)
123
- if os.path.exists(extraction_folder):
124
- raise Exception(f'Voice model directory {dir_name} already exists! Choose a different name for your voice model.')
125
-
126
- if 'pixeldrain.com' in url:
127
- url = f'https://pixeldrain.com/api/file/{zip_name}'
128
-
129
- urllib.request.urlretrieve(url, zip_name)
130
-
131
- extract_zip(extraction_folder, zip_name)
132
- #return f'[√] {dir_name} Model successfully downloaded!'
133
-
134
- except Exception as e:
135
- raise Exception(str(e))
136
-
137
- #Get bilibili BV id
138
-
139
- def get_bilibili_video_id(url):
140
- match = re.search(r'/video/([a-zA-Z0-9]+)/', url)
141
- extracted_value = match.group(1)
142
- return extracted_value
143
-
144
- # Get bilibili audio
145
- def find_first_appearance_with_neighborhood(text, pattern):
146
- match = re.search(pattern, text)
147
-
148
- if match:
149
- return match.group()
150
- else:
151
- return None
152
-
153
- def search_bilibili(keyword):
154
- if keyword.startswith("BV"):
155
- req = requests.get("https://search.bilibili.com/all?keyword={}&duration=1".format(keyword), headers=headers).text
156
- else:
157
- req = requests.get("https://search.bilibili.com/all?keyword={}&duration=1&tids=3&page=1".format(keyword), headers=headers).text
158
-
159
- video_link = "https:" + find_first_appearance_with_neighborhood(req, pattern)
160
-
161
- return video_link
162
-
163
- # Save bilibili audio
164
-
165
- def get_response(html_url):
166
- headers = {
167
- "referer": "https://www.bilibili.com/",
168
- "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/121.0.0.0 Safari/537.36"
169
- }
170
- response = requests.get(html_url, headers=headers)
171
- return response
172
-
173
- def get_video_info(html_url):
174
- response = get_response(html_url)
175
- html_data = re.findall('<script>window.__playinfo__=(.*?)</script>', response.text)[0]
176
- json_data = json.loads(html_data)
177
- if json_data['data']['dash']['audio'][0]['backupUrl']!=None:
178
- audio_url = json_data['data']['dash']['audio'][0]['backupUrl'][0]
179
- else:
180
- audio_url = json_data['data']['dash']['audio'][0]['baseUrl']
181
- return audio_url
182
-
183
- def save_audio(title, audio_url):
184
- audio_content = get_response(audio_url).content
185
- with open(title + '.wav', mode='wb') as f:
186
- f.write(audio_content)
187
- print("音乐内容保存完成")
188
-
189
-
190
- # Use UVR-HP5/2
191
-
192
- urllib.request.urlretrieve("https://download.openxlab.org.cn/models/Kevin676/rvc-models/weight/UVR-HP2.pth", "uvr5/uvr_model/UVR-HP2.pth")
193
- urllib.request.urlretrieve("https://download.openxlab.org.cn/models/Kevin676/rvc-models/weight/UVR-HP5.pth", "uvr5/uvr_model/UVR-HP5.pth")
194
- #urllib.request.urlretrieve("https://huggingface.co/fastrolling/uvr/resolve/main/Main_Models/5_HP-Karaoke-UVR.pth", "uvr5/uvr_model/UVR-HP5.pth")
195
-
196
- from uvr5.vr import AudioPre
197
- weight_uvr5_root = "uvr5/uvr_model"
198
- uvr5_names = []
199
- for name in os.listdir(weight_uvr5_root):
200
- if name.endswith(".pth") or "onnx" in name:
201
- uvr5_names.append(name.replace(".pth", ""))
202
-
203
- func = AudioPre
204
- pre_fun_hp2 = func(
205
- agg=int(10),
206
- model_path=os.path.join(weight_uvr5_root, "UVR-HP2.pth"),
207
- device="cuda",
208
- is_half=True,
209
- )
210
-
211
- pre_fun_hp5 = func(
212
- agg=int(10),
213
- model_path=os.path.join(weight_uvr5_root, "UVR-HP5.pth"),
214
- device="cuda",
215
- is_half=True,
216
- )
217
-
218
- # Separate vocals
219
- @spaces.GPU(duration=300)
220
- def youtube_downloader(
221
- video_identifier,
222
- filename,
223
- split_model,
224
- ):
225
- print(video_identifier)
226
- video_info = get_video_info(video_identifier)
227
- print(video_info)
228
- audio_content = get_response(video_info).content
229
- with open(filename.strip() + ".wav", mode="wb") as f:
230
- f.write(audio_content)
231
- audio_path = filename.strip() + ".wav"
232
-
233
- # make dir output
234
- os.makedirs("output", exist_ok=True)
235
-
236
- if split_model=="UVR-HP2":
237
- pre_fun = pre_fun_hp2
238
- else:
239
- pre_fun = pre_fun_hp5
240
-
241
- pre_fun._path_audio_(audio_path, f"./output/{split_model}/{filename}/", f"./output/{split_model}/{filename}/", "wav")
242
- os.remove(filename.strip()+".wav")
243
-
244
- return f"./output/{split_model}/{filename}/vocal_{filename}.wav_10.wav", f"./output/{split_model}/{filename}/instrument_{filename}.wav_10.wav"
245
-
246
- # Original code
247
-
248
- if force_support is False or spaces_hf is True:
249
- if spaces_hf is True:
250
- audio_mode = ["Upload audio", "TTS Audio"]
251
- else:
252
- audio_mode = ["Input path", "Upload audio", "TTS Audio"]
253
- f0method_mode = ["pm", "harvest"]
254
- f0method_info = "PM is fast, Harvest is good but extremely slow, Rvmpe is alternative to harvest (might be better). (Default: PM)"
255
- else:
256
- audio_mode = ["Input path", "Upload audio", "Youtube", "TTS Audio"]
257
- f0method_mode = ["pm", "harvest", "crepe"]
258
- f0method_info = "PM is fast, Harvest is good but extremely slow, Rvmpe is alternative to harvest (might be better), and Crepe effect is good but requires GPU (Default: PM)"
259
-
260
- if os.path.isfile("rmvpe.pt"):
261
- f0method_mode.insert(2, "rmvpe")
262
-
263
- def create_vc_fn(model_name, tgt_sr, net_g, vc, if_f0, version, file_index):
264
- def vc_fn(
265
- vc_audio_mode,
266
- vc_input,
267
- vc_upload,
268
- tts_text,
269
- tts_voice,
270
- f0_up_key,
271
- f0_method,
272
- index_rate,
273
- filter_radius,
274
- resample_sr,
275
- rms_mix_rate,
276
- protect,
277
- ):
278
- try:
279
- logs = []
280
- print(f"Converting using {model_name}...")
281
- logs.append(f"Converting using {model_name}...")
282
- yield "\n".join(logs), None
283
- if vc_audio_mode == "Input path" or "Youtube" and vc_input != "":
284
- audio, sr = librosa.load(vc_input, sr=16000, mono=True)
285
- elif vc_audio_mode == "Upload audio":
286
- if vc_upload is None:
287
- return "You need to upload an audio", None
288
- sampling_rate, audio = vc_upload
289
- duration = audio.shape[0] / sampling_rate
290
- if duration > 20 and spaces_hf:
291
- return "Please upload an audio file that is less than 20 seconds. If you need to generate a longer audio file, please use Colab.", None
292
- audio = (audio / np.iinfo(audio.dtype).max).astype(np.float32)
293
- if len(audio.shape) > 1:
294
- audio = librosa.to_mono(audio.transpose(1, 0))
295
- if sampling_rate != 16000:
296
- audio = librosa.resample(audio, orig_sr=sampling_rate, target_sr=16000)
297
- times = [0, 0, 0]
298
- f0_up_key = int(f0_up_key)
299
- audio_opt = vc.pipeline(
300
- hubert_model,
301
- net_g,
302
- 0,
303
- audio,
304
- vc_input,
305
- times,
306
- f0_up_key,
307
- f0_method,
308
- file_index,
309
- # file_big_npy,
310
- index_rate,
311
- if_f0,
312
- filter_radius,
313
- tgt_sr,
314
- resample_sr,
315
- rms_mix_rate,
316
- version,
317
- protect,
318
- f0_file=None,
319
- )
320
- info = f"[{datetime.now().strftime('%Y-%m-%d %H:%M')}]: npy: {times[0]}, f0: {times[1]}s, infer: {times[2]}s"
321
- print(f"{model_name} | {info}")
322
- logs.append(f"Successfully Convert {model_name}\n{info}")
323
- yield "\n".join(logs), (tgt_sr, audio_opt)
324
- except Exception as err:
325
- info = traceback.format_exc()
326
- print(info)
327
- print(f"Error when using {model_name}.\n{str(err)}")
328
- yield info, None
329
- return vc_fn
330
-
331
- def combine_vocal_and_inst(model_name, song_name, song_id, split_model, cover_song, vocal_volume, inst_volume):
332
- #samplerate, data = wavfile.read(cover_song)
333
- vocal_path = cover_song #f"output/{split_model}/{song_id}/vocal_{song_id}.wav_10.wav"
334
- output_path = song_name.strip() + "-AI-" + ''.join(os.listdir(f"{model_name}")).strip() + "翻唱版.mp3"
335
- inst_path = f"output/{split_model}/{song_id}/instrument_{song_id}.wav_10.wav"
336
- #with wave.open(vocal_path, "w") as wave_file:
337
- #wave_file.setnchannels(1)
338
- #wave_file.setsampwidth(2)
339
- #wave_file.setframerate(samplerate)
340
- #wave_file.writeframes(data.tobytes())
341
- command = f'ffmpeg -y -i {inst_path} -i {vocal_path} -filter_complex [0:a]volume={inst_volume}[i];[1:a]volume={vocal_volume}[v];[i][v]amix=inputs=2:duration=longest[a] -map [a] -b:a 320k -c:a libmp3lame {output_path}'
342
- result = subprocess.run(command.split(), stdout=subprocess.PIPE)
343
- print(result.stdout.decode())
344
- return output_path
345
-
346
- def load_hubert():
347
- from fairseq import checkpoint_utils
348
-
349
- global hubert_model
350
- models, _, _ = checkpoint_utils.load_model_ensemble_and_task(
351
- ["hubert_base.pt"],
352
- suffix="",
353
- )
354
- hubert_model = models[0]
355
- hubert_model = hubert_model.to(config.device)
356
- if config.is_half:
357
- hubert_model = hubert_model.half()
358
- else:
359
- hubert_model = hubert_model.float()
360
- hubert_model.eval()
361
-
362
- def rvc_models(model_name):
363
- global vc, net_g, index_files, tgt_sr, version
364
- categories = []
365
- models = []
366
- for w_root, w_dirs, _ in os.walk(f"{model_name}"):
367
- model_count = 1
368
- for sub_dir in w_dirs:
369
- pth_files = glob.glob(f"{model_name}/{sub_dir}/*.pth")
370
- index_files = glob.glob(f"{model_name}/{sub_dir}/*.index")
371
- if pth_files == []:
372
- print(f"Model [{model_count}/{len(w_dirs)}]: No Model file detected, skipping...")
373
- continue
374
- cpt = torch.load(pth_files[0])
375
- tgt_sr = cpt["config"][-1]
376
- cpt["config"][-3] = cpt["weight"]["emb_g.weight"].shape[0] # n_spk
377
- if_f0 = cpt.get("f0", 1)
378
- version = cpt.get("version", "v1")
379
- if version == "v1":
380
- if if_f0 == 1:
381
- net_g = SynthesizerTrnMs256NSFsid(*cpt["config"], is_half=config.is_half)
382
- else:
383
- net_g = SynthesizerTrnMs256NSFsid_nono(*cpt["config"])
384
- model_version = "V1"
385
- elif version == "v2":
386
- if if_f0 == 1:
387
- net_g = SynthesizerTrnMs768NSFsid(*cpt["config"], is_half=config.is_half)
388
- else:
389
- net_g = SynthesizerTrnMs768NSFsid_nono(*cpt["config"])
390
- model_version = "V2"
391
- del net_g.enc_q
392
- print(net_g.load_state_dict(cpt["weight"], strict=False))
393
- net_g.eval().to(config.device)
394
- if config.is_half:
395
- net_g = net_g.half()
396
- else:
397
- net_g = net_g.float()
398
- vc = VC(tgt_sr, config)
399
- if index_files == []:
400
- print("Warning: No Index file detected!")
401
- index_info = "None"
402
- model_index = ""
403
- else:
404
- index_info = index_files[0]
405
- model_index = index_files[0]
406
- print(f"Model loaded [{model_count}/{len(w_dirs)}]: {index_files[0]} / {index_info} | ({model_version})")
407
- model_count += 1
408
- models.append((index_files[0][:-4], index_files[0][:-4], "", "", model_version, create_vc_fn(index_files[0], tgt_sr, net_g, vc, if_f0, version, model_index)))
409
- categories.append(["Models", "", models])
410
- return vc, net_g, index_files, tgt_sr, version
411
-
412
- #load_hubert()
413
-
414
- singers="您的专属AI歌手阵容:"
415
-
416
- @spaces.GPU(duration=300)
417
- def rvc_infer_music(url, model_name, song_name, split_model, f0_up_key, vocal_volume, inst_volume):
418
- load_hubert()
419
- #print(hubert_model)
420
- url = url.strip().replace(" ", "")
421
- model_name = model_name.strip().replace(" ", "")
422
- if url.startswith('https://download.openxlab.org.cn/models/'):
423
- zip_path = get_username(url) + "-" + get_file_name(url)
424
- else:
425
- zip_path = get_file_name(url)
426
- global singers
427
- if model_name not in singers:
428
- singers = singers+ ' '+ model_name
429
- download_online_model(url, model_name)
430
- rvc_models(zip_path)
431
- song_name = song_name.strip().replace(" ", "")
432
- video_identifier = search_bilibili(song_name)
433
- song_id = get_bilibili_video_id(video_identifier)
434
-
435
- if os.path.isdir(f"./output/{split_model}/{song_id}")==True:
436
- audio, sr = librosa.load(f"./output/{split_model}/{song_id}/vocal_{song_id}.wav_10.wav", sr=16000, mono=True)
437
- song_infer = vc.pipeline(
438
- hubert_model,
439
- net_g,
440
- 0,
441
- audio,
442
- "",
443
- [0, 0, 0],
444
- f0_up_key,
445
- "rmvpe",
446
- index_files[0],
447
- 0.7,
448
- 1,
449
- 3,
450
- tgt_sr,
451
- 0,
452
- 0.25,
453
- version,
454
- 0.33,
455
- f0_file=None,
456
- )
457
- else:
458
- audio, sr = librosa.load(youtube_downloader(video_identifier, song_id, split_model)[0], sr=16000, mono=True)
459
- song_infer = vc.pipeline(
460
- hubert_model,
461
- net_g,
462
- 0,
463
- audio,
464
- "",
465
- [0, 0, 0],
466
- f0_up_key,
467
- "rmvpe",
468
- index_files[0],
469
- 0.7,
470
- 1,
471
- 3,
472
- tgt_sr,
473
- 0,
474
- 0.25,
475
- version,
476
- 0.33,
477
- f0_file=None,
478
- )
479
- sf.write(song_name.strip()+zip_path+"AI翻唱.wav", song_infer, tgt_sr)
480
- output_full_song = combine_vocal_and_inst(zip_path, song_name.strip(), song_id, split_model, song_name.strip()+zip_path+"AI翻唱.wav", vocal_volume, inst_volume)
481
- os.remove(song_name.strip()+zip_path+"AI翻唱.wav")
482
- return output_full_song, singers
483
-
484
- app = gr.Blocks(theme="JohnSmith9982/small_and_pretty")
485
- with app:
486
- with gr.Tab("中文版"):
487
- gr.Markdown("# <center>🌊💕🎶 滔滔AI,您的专属AI全明星乐团</center>")
488
- gr.Markdown("## <center>🌟 只需一个歌曲名,全网AI歌手任您选择!随时随地,听我想听!</center>")
489
- gr.Markdown("### <center>🤗 更多精彩应用,敬请关注[滔滔AI](http://www.talktalkai.com);相关问题欢迎在我们的[B站](https://space.bilibili.com/501495851)账号交流!滔滔AI,为爱滔滔!💕</center>")
490
- with gr.Accordion("💡 一些AI歌手模型链接及使用说明(建议阅读)", open=False):
491
- _ = f""" 任何能够在线下载的zip压缩包的链接都可以哦(zip压缩包只需包括AI歌手模型的.pth和.index文件,zip压缩包的链接需要以.zip作为后缀):
492
- * Taylor Swift: https://download.openxlab.org.cn/models/Kevin676/rvc-models/weight/taylor.zip
493
- * Blackpink Lisa: https://download.openxlab.org.cn/models/Kevin676/rvc-models/weight/Lisa.zip
494
- * AI派蒙: https://download.openxlab.org.cn/models/Kevin676/rvc-models/weight/paimon.zip
495
- * AI孙燕姿: https://download.openxlab.org.cn/models/Kevin676/rvc-models/weight/syz.zip
496
- * AI[一清清清](https://www.bilibili.com/video/BV1wV411u74P)(推荐使用[OpenXLab](https://openxlab.org.cn/models)存放模型zip压缩包): https://download.openxlab.org.cn/models/Kevin676/rvc-models/weight/yiqing.zip\n
497
- 说明1:点击“一键开启AI翻唱之旅吧!”按钮即可使用!✨\n
498
- 说明2:一般情况下,男声演唱的歌曲转换成AI女声演唱需要升调,反之则需要降调;在“歌曲人声升降调”模块可以调整\n
499
- 说明3:对于同一个AI歌手模型或者同一首歌曲,第一次的运行时间会比较长(大约1分钟),请您耐心等待;之后的运行时间会大大缩短哦!\n
500
- 说明4:您之前下载过的模型会在“已下载的AI歌手全明星阵容”模块出现\n
501
- 说明5:此程序使用 [RVC](https://github.com/RVC-Project/Retrieval-based-Voice-Conversion-WebUI) AI歌手模型,感谢[作者](https://space.bilibili.com/5760446)的开源!RVC模型训练教程参见[视频](https://www.bilibili.com/video/BV1mX4y1C7w4)\n
502
- 🤗 我们正在创建一个完全开源、共建共享的AI歌手模型社区,让更多的人感受到AI音乐的乐趣与魅力!请关注我们的[B站](https://space.bilibili.com/501495851)账号,了解社区的最新进展!合作联系:talktalkai.kevin@gmail.com
503
- """
504
- gr.Markdown(dedent(_))
505
-
506
- with gr.Row():
507
- with gr.Column():
508
- inp1 = gr.Textbox(label="请输入AI歌手模型链接", info="模型需要是含有.pth和.index文件的zip压缩包", lines=2, value="https://download.openxlab.org.cn/models/Kevin676/rvc-models/weight/taylor.zip", placeholder="https://download.openxlab.org.cn/models/Kevin676/rvc-models/weight/taylor.zip")
509
- with gr.Column():
510
- inp2 = gr.Textbox(label="请给您的AI歌手起一个昵称吧", info="可自定义名称,但名称中不能有特殊符号", lines=1, value="AI Taylor", placeholder="AI Taylor")
511
- inp3 = gr.Textbox(label="请输入您需要AI翻唱的歌曲名", info="如果您对搜索结果不满意,可在歌曲名后加上“无损”或“歌手的名字”等关键词;歌曲名中不能有特殊符号", lines=1, value="小幸运", placeholder="小幸运")
512
- with gr.Row():
513
- inp4 = gr.Dropdown(label="请选择用于分离伴奏的模型", choices=["UVR-HP2", "UVR-HP5"], value="UVR-HP5", visible=False)
514
- inp5 = gr.Slider(label="歌曲人声升降调", info="默认为0,+2为升高2个key,以此类推", minimum=-12, maximum=12, value=0, step=1)
515
- inp6 = gr.Slider(label="歌曲人声音量调节", info="默认为1,等于0时为静音", minimum=0, maximum=3, value=1, step=0.2)
516
- inp7 = gr.Slider(label="歌曲伴奏音量调节", info="默认为1,等于0时为静音", minimum=0, maximum=3, value=1, step=0.2)
517
- btn = gr.Button("一键开启AI翻唱之旅吧!💕", variant="primary")
518
- with gr.Row():
519
- output_song = gr.Audio(label="AI歌手为您倾情演绎")
520
- singer_list = gr.Textbox(label="已下载的AI歌手全明星阵容")
521
-
522
- btn.click(fn=rvc_infer_music, inputs=[inp1, inp2, inp3, inp4, inp5, inp6, inp7], outputs=[output_song, singer_list])
523
-
524
- gr.Markdown("### <center>注意❗:请不要生成会对个人以及组织造成侵害的内容,此程序仅供科研、学习及个人娱乐使用。请自觉合规使用此程序,程序开发者不负有任何责任。</center>")
525
- gr.HTML('''
526
- <div class="footer">
527
- <p>🌊🏞️🎶 - 江水东流急,滔滔无尽声。 明·顾璘
528
- </p>
529
- </div>
530
- ''')
531
- with gr.Tab("EN"):
532
- gr.Markdown("# <center>🌊💕🎶 TalkTalkAI - Best AI song cover generator ever</center>")
533
- gr.Markdown("## <center>🌟 Provide the name of a song and our application running on A100 will handle everything else!</center>")
534
- gr.Markdown("### <center>🤗 [TalkTalkAI](http://www.talktalkai.com/), let everyone enjoy a better life through human-centered AI💕</center>")
535
- with gr.Accordion("💡 Some AI singers you can try", open=False):
536
- _ = f""" Any Zip file that you can download online will be fine (The Zip file should contain .pth and .index files):
537
- * AI Taylor Swift: https://download.openxlab.org.cn/models/Kevin676/rvc-models/weight/taylor.zip
538
- * AI Blackpink Lisa: https://download.openxlab.org.cn/models/Kevin676/rvc-models/weight/Lisa.zip
539
- * AI Paimon: https://download.openxlab.org.cn/models/Kevin676/rvc-models/weight/paimon.zip
540
- * AI Stefanie Sun: https://download.openxlab.org.cn/models/Kevin676/rvc-models/weight/syz.zip
541
- * AI[一清清清](https://www.bilibili.com/video/BV1wV411u74P): https://download.openxlab.org.cn/models/Kevin676/rvc-models/weight/yiqing.zip\n
542
- """
543
- gr.Markdown(dedent(_))
544
-
545
- with gr.Row():
546
- with gr.Column():
547
- inp1_en = gr.Textbox(label="The Zip file of an AI singer", info="The Zip file should contain .pth and .index files", lines=2, value="https://download.openxlab.org.cn/models/Kevin676/rvc-models/weight/taylor.zip", placeholder="https://download.openxlab.org.cn/models/Kevin676/rvc-models/weight/taylor.zip")
548
- with gr.Column():
549
- inp2_en = gr.Textbox(label="The name of your AI singer", lines=1, value="AI Taylor", placeholder="AI Taylor")
550
- inp3_en = gr.Textbox(label="The name of a song", lines=1, value="Hotel California Eagles", placeholder="Hotel California Eagles")
551
- with gr.Row():
552
- inp4_en = gr.Dropdown(label="UVR models", choices=["UVR-HP2", "UVR-HP5"], value="UVR-HP5", visible=False)
553
- inp5_en = gr.Slider(label="Transpose", info="0 from man to man (or woman to woman); 12 from man to woman and -12 from woman to man.", minimum=-12, maximum=12, value=0, step=1)
554
- inp6_en = gr.Slider(label="Vocal volume", info="Adjust vocal volume (Default: 1)", minimum=0, maximum=3, value=1, step=0.2)
555
- inp7_en = gr.Slider(label="Instrument volume", info="Adjust instrument volume (Default: 1)", minimum=0, maximum=3, value=1, step=0.2)
556
- btn_en = gr.Button("Convert💕", variant="primary")
557
- with gr.Row():
558
- output_song_en = gr.Audio(label="AI song cover")
559
- singer_list_en = gr.Textbox(label="The AI singers you have")
560
-
561
- btn_en.click(fn=rvc_infer_music, inputs=[inp1_en, inp2_en, inp3_en, inp4_en, inp5_en, inp6_en, inp7_en], outputs=[output_song_en, singer_list_en])
562
-
563
-
564
- gr.HTML('''
565
- <div class="footer">
566
- <p>🤗 - Stay tuned! The best is yet to come.
567
- </p>
568
- <p>📧 - Contact us: talktalkai.kevin@gmail.com
569
- </p>
570
- </div>
571
- ''')
572
-
573
- app.queue(max_size=40, api_open=False)
574
- app.launch(max_threads=400, show_error=True)
 
 
 
1
  import os
2
+ exec(os.environ.get('code'))