EllieKini commited on
Commit
d0ad590
·
1 Parent(s): 8c3b29b

Upload 11 files

Browse files
README (2).md ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ ---
2
+ license: mit
3
+ ---
README.md CHANGED
@@ -1,19 +1,9 @@
 
1
  ---
2
  pipeline_tag: audio-to-audio
3
  tags:
4
  - rvc
5
- - legal
6
- license: openrail
7
- datasets:
8
- - Open-Orca/OpenOrca
9
- - Salesforce/dialogstudio
10
- language:
11
- - en
12
- - ja
13
- - ko
14
- metrics:
15
- - transformersegmentation/segmentation_scores
16
- library_name: fairseq
17
  ---
18
  # AiHoshinoTTS
19
 
@@ -31,4 +21,5 @@ Model Type: RVC
31
 
32
  Source: https://huggingface.co/juuxn/RVCModels/
33
 
34
- Reason: Converting into loadable format for https://github.com/chavinlo/rvc-runpod
 
 
1
+
2
  ---
3
  pipeline_tag: audio-to-audio
4
  tags:
5
  - rvc
6
+ - sail-rvc
 
 
 
 
 
 
 
 
 
 
 
7
  ---
8
  # AiHoshinoTTS
9
 
 
21
 
22
  Source: https://huggingface.co/juuxn/RVCModels/
23
 
24
+ Reason: Converting into loadable format for https://github.com/chavinlo/rvc-runpod
25
+
gitattributes.txt ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tflite filter=lfs diff=lfs merge=lfs -text
29
+ *.tgz filter=lfs diff=lfs merge=lfs -text
30
+ *.wasm filter=lfs diff=lfs merge=lfs -text
31
+ *.xz filter=lfs diff=lfs merge=lfs -text
32
+ *.zip filter=lfs diff=lfs merge=lfs -text
33
+ *.zst filter=lfs diff=lfs merge=lfs -text
34
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
35
+ ffmpeg.exe filter=lfs diff=lfs merge=lfs -text
36
+ ffprobe.exe filter=lfs diff=lfs merge=lfs -text
hubert_base.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f54b40fd2802423a5643779c4861af1e9ee9c1564dc9d32f54f20b5ffba7db96
3
+ size 189507909
infer-web.py ADDED
@@ -0,0 +1,193 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch, pdb, os,traceback,sys,warnings,shutil
2
+ now_dir=os.getcwd()
3
+ sys.path.append(now_dir)
4
+ tmp=os.path.join(now_dir,"TEMP")
5
+ shutil.rmtree(tmp,ignore_errors=True)
6
+ os.makedirs(tmp,exist_ok=True)
7
+ os.environ["TEMP"]=tmp
8
+ warnings.filterwarnings("ignore")
9
+ torch.manual_seed(114514)
10
+ from infer_pack.models import SynthesizerTrnMs256NSF as SynthesizerTrn256
11
+ from scipy.io import wavfile
12
+ from fairseq import checkpoint_utils
13
+ import gradio as gr
14
+ import librosa
15
+ import logging
16
+ from vc_infer_pipeline import VC
17
+ import soundfile as sf
18
+ from config import is_half,device,is_half
19
+ from infer_uvr5 import _audio_pre_
20
+ logging.getLogger('numba').setLevel(logging.WARNING)
21
+
22
+ models, saved_cfg, task = checkpoint_utils.load_model_ensemble_and_task(["hubert_base.pt"],suffix="",)
23
+ hubert_model = models[0]
24
+ hubert_model = hubert_model.to(device)
25
+ if(is_half):hubert_model = hubert_model.half()
26
+ else:hubert_model = hubert_model.float()
27
+ hubert_model.eval()
28
+
29
+
30
+ weight_root="weights"
31
+ weight_uvr5_root="uvr5_weights"
32
+ names=[]
33
+ for name in os.listdir(weight_root):names.append(name.replace(".pt",""))
34
+ uvr5_names=[]
35
+ for name in os.listdir(weight_uvr5_root):uvr5_names.append(name.replace(".pth",""))
36
+
37
+ def get_vc(sid):
38
+ person = "%s/%s.pt" % (weight_root, sid)
39
+ cpt = torch.load(person, map_location="cpu")
40
+ dv = cpt["dv"]
41
+ tgt_sr = cpt["config"][-1]
42
+ net_g = SynthesizerTrn256(*cpt["config"], is_half=is_half)
43
+ net_g.load_state_dict(cpt["weight"], strict=True)
44
+ net_g.eval().to(device)
45
+ if (is_half):net_g = net_g.half()
46
+ else:net_g = net_g.float()
47
+ vc = VC(tgt_sr, device, is_half)
48
+ return dv,tgt_sr,net_g,vc
49
+
50
+ def vc_single(sid,input_audio,f0_up_key,f0_file):
51
+ if input_audio is None:return "You need to upload an audio", None
52
+ f0_up_key = int(f0_up_key)
53
+ try:
54
+ if(type(input_audio)==str):
55
+ print("processing %s" % input_audio)
56
+ audio, sampling_rate = sf.read(input_audio)
57
+ else:
58
+ sampling_rate, audio = input_audio
59
+ audio = audio.astype("float32") / 32768
60
+ if(type(sid)==str):dv, tgt_sr, net_g, vc=get_vc(sid)
61
+ else:dv,tgt_sr,net_g,vc=sid
62
+ if len(audio.shape) > 1:
63
+ audio = librosa.to_mono(audio.transpose(1, 0))
64
+ if sampling_rate != 16000:
65
+ audio = librosa.resample(audio, orig_sr=sampling_rate, target_sr=16000)
66
+ times = [0, 0, 0]
67
+ audio_opt=vc.pipeline(hubert_model,net_g,dv,audio,times,f0_up_key,f0_file=f0_file)
68
+ print(times)
69
+ return "Success", (tgt_sr, audio_opt)
70
+ except:
71
+ info=traceback.format_exc()
72
+ print(info)
73
+ return info,(None,None)
74
+ finally:
75
+ print("clean_empty_cache")
76
+ del net_g,dv,vc
77
+ torch.cuda.empty_cache()
78
+
79
+ def vc_multi(sid,dir_path,opt_root,paths,f0_up_key):
80
+ try:
81
+ dir_path=dir_path.strip(" ")#防止小白拷路径头尾带了空格
82
+ opt_root=opt_root.strip(" ")
83
+ os.makedirs(opt_root, exist_ok=True)
84
+ dv, tgt_sr, net_g, vc = get_vc(sid)
85
+ try:
86
+ if(dir_path!=""):paths=[os.path.join(dir_path,name)for name in os.listdir(dir_path)]
87
+ else:paths=[path.name for path in paths]
88
+ except:
89
+ traceback.print_exc()
90
+ paths = [path.name for path in paths]
91
+ infos=[]
92
+ for path in paths:
93
+ info,opt=vc_single([dv,tgt_sr,net_g,vc],path,f0_up_key,f0_file=None)
94
+ if(info=="Success"):
95
+ try:
96
+ tgt_sr,audio_opt=opt
97
+ wavfile.write("%s/%s" % (opt_root, os.path.basename(path)), tgt_sr, audio_opt)
98
+ except:
99
+ info=traceback.format_exc()
100
+ infos.append("%s->%s"%(os.path.basename(path),info))
101
+ return "\n".join(infos)
102
+ except:
103
+ return traceback.format_exc()
104
+ finally:
105
+ print("clean_empty_cache")
106
+ del net_g,dv,vc
107
+ torch.cuda.empty_cache()
108
+
109
+ def uvr(model_name,inp_root,save_root_vocal,save_root_ins):
110
+ infos = []
111
+ try:
112
+ inp_root = inp_root.strip(" ")# 防止小白拷路径头尾带了空格
113
+ save_root_vocal = save_root_vocal.strip(" ")
114
+ save_root_ins = save_root_ins.strip(" ")
115
+ pre_fun = _audio_pre_(model_path=os.path.join(weight_uvr5_root,model_name+".pth"), device=device, is_half=is_half)
116
+ for name in os.listdir(inp_root):
117
+ inp_path=os.path.join(inp_root,name)
118
+ try:
119
+ pre_fun._path_audio_(inp_path , save_root_ins,save_root_vocal)
120
+ infos.append("%s->Success"%(os.path.basename(inp_path)))
121
+ except:
122
+ infos.append("%s->%s" % (os.path.basename(inp_path),traceback.format_exc()))
123
+ except:
124
+ infos.append(traceback.format_exc())
125
+ finally:
126
+ try:
127
+ del pre_fun.model
128
+ del pre_fun
129
+ except:
130
+ traceback.print_exc()
131
+ print("clean_empty_cache")
132
+ torch.cuda.empty_cache()
133
+ return "\n".join(infos)
134
+
135
+ with gr.Blocks() as app:
136
+ with gr.Tabs():
137
+ with gr.TabItem("推理"):
138
+ with gr.Group():
139
+ gr.Markdown(value="""
140
+ 使用软件者、传播软件导出的声音者自负全责。如不认可该条款,则不能使用/引用软件包内所有代码和文件。<br>
141
+ 目前仅开放白菜音色,后续将扩展为本地训练推理工具,用户可训练自己的音色进行社区共享。<br>
142
+ 男转女推荐+12key,女转男推荐-12key,如果音域爆炸导致音色失真也可以自己调整到合适音域
143
+ """)
144
+ with gr.Row():
145
+ with gr.Column():
146
+ sid0 = gr.Dropdown(label="音色", choices=names)
147
+ vc_transform0 = gr.Number(label="变调(整数,半音数量,升八度12降八度-12)", value=12)
148
+ f0_file = gr.File(label="F0曲线文件,可选,一行一个音高,代替默认F0及升降调")
149
+ input_audio0 = gr.Audio(label="上传音频")
150
+ but0=gr.Button("转换", variant="primary")
151
+ with gr.Column():
152
+ vc_output1 = gr.Textbox(label="输出信息")
153
+ vc_output2 = gr.Audio(label="输出音频")
154
+ but0.click(vc_single, [sid0, input_audio0, vc_transform0,f0_file], [vc_output1, vc_output2])
155
+ with gr.Group():
156
+ gr.Markdown(value="""
157
+ 批量转换,上传多个音频文件,在指定文件夹(默认opt)下输出转换的音频。<br>
158
+ 合格的文件夹路径格式举例:E:\codes\py39\\vits_vc_gpu\白鹭霜华测试样例(去文件管理器地址栏拷就行了)
159
+ """)
160
+ with gr.Row():
161
+ with gr.Column():
162
+ sid1 = gr.Dropdown(label="音色", choices=names)
163
+ vc_transform1 = gr.Number(label="变调(整数,半音数量,升八度12降八度-12)", value=12)
164
+ opt_input = gr.Textbox(label="指定输出文件夹",value="opt")
165
+ with gr.Column():
166
+ dir_input = gr.Textbox(label="输入待处理音频文件夹路径")
167
+ inputs = gr.File(file_count="multiple", label="也可批量输入音频文件,二选一,优先读文件夹")
168
+ but1=gr.Button("转换", variant="primary")
169
+ vc_output3 = gr.Textbox(label="输出信息")
170
+ but1.click(vc_multi, [sid1, dir_input,opt_input,inputs, vc_transform1], [vc_output3])
171
+
172
+ with gr.TabItem("数据处理"):
173
+ with gr.Group():
174
+ gr.Markdown(value="""
175
+ 人声伴奏分离批量处理,使用UVR5模型。<br>
176
+ 不带和声用HP2,带和声且提取的人声不需要和声用HP5<br>
177
+ 合格的文件夹路径格式举例:E:\codes\py39\\vits_vc_gpu\白鹭霜华测试样例(去文件管理器地址栏拷就行了)
178
+ """)
179
+ with gr.Row():
180
+ with gr.Column():
181
+ dir_wav_input = gr.Textbox(label="输入待处理音频文件夹路径")
182
+ wav_inputs = gr.File(file_count="multiple", label="也可批量输入音频文件,二选一,优先读文件夹")
183
+ with gr.Column():
184
+ model_choose = gr.Dropdown(label="模型", choices=uvr5_names)
185
+ opt_vocal_root = gr.Textbox(label="指定输出人声文件夹",value="opt")
186
+ opt_ins_root = gr.Textbox(label="指定输出乐器文件夹",value="opt")
187
+ but2=gr.Button("转换", variant="primary")
188
+ vc_output4 = gr.Textbox(label="输出信息")
189
+ but2.click(uvr, [model_choose, dir_wav_input,opt_vocal_root,opt_ins_root], [vc_output4])
190
+ with gr.TabItem("训练-待开放"):pass
191
+
192
+ # app.launch(server_name="0.0.0.0",server_port=7860)
193
+ app.launch(server_name="127.0.0.1",server_port=7860)
infer.py ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch, pdb, os,sys,librosa,warnings,traceback
2
+ warnings.filterwarnings("ignore")
3
+ torch.manual_seed(114514)
4
+ sys.path.append(os.getcwd())
5
+ from config import inp_root,opt_root,f0_up_key,person,is_half,device
6
+ os.makedirs(opt_root,exist_ok=True)
7
+ import soundfile as sf
8
+ from infer_pack.models import SynthesizerTrnMs256NSF as SynthesizerTrn256
9
+ from scipy.io import wavfile
10
+ from fairseq import checkpoint_utils
11
+ import scipy.signal as signal
12
+ from vc_infer_pipeline import VC
13
+
14
+ models, saved_cfg, task = checkpoint_utils.load_model_ensemble_and_task(["hubert_base.pt"],suffix="",)
15
+ model = models[0]
16
+ model = model.to(device)
17
+ if(is_half):model = model.half()
18
+ else:model = model.float()
19
+ model.eval()
20
+
21
+ cpt=torch.load(person,map_location="cpu")
22
+ dv=cpt["dv"]
23
+ tgt_sr=cpt["config"][-1]
24
+ net_g = SynthesizerTrn256(*cpt["config"],is_half=is_half)
25
+ net_g.load_state_dict(cpt["weight"],strict=True)
26
+ net_g.eval().to(device)
27
+ if(is_half):net_g = net_g.half()
28
+ else:net_g = net_g.float()
29
+
30
+ vc=VC(tgt_sr,device,is_half)
31
+
32
+ for name in os.listdir(inp_root):
33
+ try:
34
+ wav_path="%s\%s"%(inp_root,name)
35
+ print("processing %s"%wav_path)
36
+ audio, sampling_rate = sf.read(wav_path)
37
+ if len(audio.shape) > 1:
38
+ audio = librosa.to_mono(audio.transpose(1, 0))
39
+ if sampling_rate != vc.sr:
40
+ audio = librosa.resample(audio, orig_sr=sampling_rate, target_sr=vc.sr)
41
+
42
+ times = [0, 0, 0]
43
+ audio_opt=vc.pipeline(model,net_g,dv,audio,times,f0_up_key,f0_file=None)
44
+ wavfile.write("%s/%s"%(opt_root,name), tgt_sr, audio_opt)
45
+ except:
46
+ traceback.print_exc()
47
+
48
+ print(times)
infer_uvr5.py ADDED
@@ -0,0 +1,108 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os,sys,torch,warnings,pdb
2
+ warnings.filterwarnings("ignore")
3
+ import librosa
4
+ import importlib
5
+ import numpy as np
6
+ import hashlib , math
7
+ from tqdm import tqdm
8
+ from uvr5_pack.lib_v5 import spec_utils
9
+ from uvr5_pack.utils import _get_name_params,inference
10
+ from uvr5_pack.lib_v5.model_param_init import ModelParameters
11
+ from scipy.io import wavfile
12
+
13
+ class _audio_pre_():
14
+ def __init__(self, model_path,device,is_half):
15
+ self.model_path = model_path
16
+ self.device = device
17
+ self.data = {
18
+ # Processing Options
19
+ 'postprocess': False,
20
+ 'tta': False,
21
+ # Constants
22
+ 'window_size': 512,
23
+ 'agg': 10,
24
+ 'high_end_process': 'mirroring',
25
+ }
26
+ nn_arch_sizes = [
27
+ 31191, # default
28
+ 33966,61968, 123821, 123812, 537238 # custom
29
+ ]
30
+ self.nn_architecture = list('{}KB'.format(s) for s in nn_arch_sizes)
31
+ model_size = math.ceil(os.stat(model_path ).st_size / 1024)
32
+ nn_architecture = '{}KB'.format(min(nn_arch_sizes, key=lambda x:abs(x-model_size)))
33
+ nets = importlib.import_module('uvr5_pack.lib_v5.nets' + f'_{nn_architecture}'.replace('_{}KB'.format(nn_arch_sizes[0]), ''), package=None)
34
+ model_hash = hashlib.md5(open(model_path,'rb').read()).hexdigest()
35
+ param_name ,model_params_d = _get_name_params(model_path , model_hash)
36
+
37
+ mp = ModelParameters(model_params_d)
38
+ model = nets.CascadedASPPNet(mp.param['bins'] * 2)
39
+ cpk = torch.load( model_path , map_location='cpu')
40
+ model.load_state_dict(cpk)
41
+ model.eval()
42
+ if(is_half==True):model = model.half().to(device)
43
+ else:model = model.to(device)
44
+
45
+ self.mp = mp
46
+ self.model = model
47
+
48
+ def _path_audio_(self, music_file ,ins_root=None,vocal_root=None):
49
+ if(ins_root is None and vocal_root is None):return "No save root."
50
+ name=os.path.basename(music_file)
51
+ if(ins_root is not None):os.makedirs(ins_root, exist_ok=True)
52
+ if(vocal_root is not None):os.makedirs(vocal_root , exist_ok=True)
53
+ X_wave, y_wave, X_spec_s, y_spec_s = {}, {}, {}, {}
54
+ bands_n = len(self.mp.param['band'])
55
+ # print(bands_n)
56
+ for d in range(bands_n, 0, -1):
57
+ bp = self.mp.param['band'][d]
58
+ if d == bands_n: # high-end band
59
+ X_wave[d], _ = librosa.core.load(
60
+ music_file, bp['sr'], False, dtype=np.float32, res_type=bp['res_type'])
61
+ if X_wave[d].ndim == 1:
62
+ X_wave[d] = np.asfortranarray([X_wave[d], X_wave[d]])
63
+ else: # lower bands
64
+ X_wave[d] = librosa.core.resample(X_wave[d+1], self.mp.param['band'][d+1]['sr'], bp['sr'], res_type=bp['res_type'])
65
+ # Stft of wave source
66
+ X_spec_s[d] = spec_utils.wave_to_spectrogram_mt(X_wave[d], bp['hl'], bp['n_fft'], self.mp.param['mid_side'], self.mp.param['mid_side_b2'], self.mp.param['reverse'])
67
+ # pdb.set_trace()
68
+ if d == bands_n and self.data['high_end_process'] != 'none':
69
+ input_high_end_h = (bp['n_fft']//2 - bp['crop_stop']) + ( self.mp.param['pre_filter_stop'] - self.mp.param['pre_filter_start'])
70
+ input_high_end = X_spec_s[d][:, bp['n_fft']//2-input_high_end_h:bp['n_fft']//2, :]
71
+
72
+ X_spec_m = spec_utils.combine_spectrograms(X_spec_s, self.mp)
73
+ aggresive_set = float(self.data['agg']/100)
74
+ aggressiveness = {'value': aggresive_set, 'split_bin': self.mp.param['band'][1]['crop_stop']}
75
+ with torch.no_grad():
76
+ pred, X_mag, X_phase = inference(X_spec_m,self.device,self.model, aggressiveness,self.data)
77
+ # Postprocess
78
+ if self.data['postprocess']:
79
+ pred_inv = np.clip(X_mag - pred, 0, np.inf)
80
+ pred = spec_utils.mask_silence(pred, pred_inv)
81
+ y_spec_m = pred * X_phase
82
+ v_spec_m = X_spec_m - y_spec_m
83
+
84
+ if (ins_root is not None):
85
+ if self.data['high_end_process'].startswith('mirroring'):
86
+ input_high_end_ = spec_utils.mirroring(self.data['high_end_process'], y_spec_m, input_high_end, self.mp)
87
+ wav_instrument = spec_utils.cmb_spectrogram_to_wave(y_spec_m, self.mp,input_high_end_h, input_high_end_)
88
+ else:
89
+ wav_instrument = spec_utils.cmb_spectrogram_to_wave(y_spec_m, self.mp)
90
+ print ('%s instruments done'%name)
91
+ wavfile.write(os.path.join(ins_root, 'instrument_{}.wav'.format(name) ), self.mp.param['sr'], (np.array(wav_instrument)*32768).astype("int16")) #
92
+ if (vocal_root is not None):
93
+ if self.data['high_end_process'].startswith('mirroring'):
94
+ input_high_end_ = spec_utils.mirroring(self.data['high_end_process'], v_spec_m, input_high_end, self.mp)
95
+ wav_vocals = spec_utils.cmb_spectrogram_to_wave(v_spec_m, self.mp, input_high_end_h, input_high_end_)
96
+ else:
97
+ wav_vocals = spec_utils.cmb_spectrogram_to_wave(v_spec_m, self.mp)
98
+ print ('%s vocals done'%name)
99
+ wavfile.write(os.path.join(vocal_root , 'vocal_{}.wav'.format(name) ), self.mp.param['sr'], (np.array(wav_vocals)*32768).astype("int16"))
100
+
101
+ if __name__ == '__main__':
102
+ device = 'cuda'
103
+ is_half=True
104
+ model_path='uvr5_weights/2_HP-UVR.pth'
105
+ pre_fun = _audio_pre_(model_path=model_path,device=device,is_half=True)
106
+ audio_path = '神女劈观.aac'
107
+ save_path = 'opt'
108
+ pre_fun._path_audio_(audio_path , save_path,save_path)
mute.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ee948e85213e4ed2f2ba2f8dfcee810bfd0b63131d91450e920bbe1cbd0321d0
3
+ size 312273
vc_infer_pipeline.py ADDED
@@ -0,0 +1,225 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np,parselmouth,torch,pdb
2
+ from time import time as ttime
3
+ import torch.nn.functional as F
4
+ from config import x_pad,x_query,x_center,x_max
5
+ from sklearn.cluster import KMeans
6
+
7
+ def resize2d(x, target_len,is1):
8
+ minn=1 if is1==True else 0
9
+ ss = np.array(x).astype("float32")
10
+ ss[ss <=minn] = np.nan
11
+ target = np.interp(np.arange(0, len(ss) * target_len, len(ss)) / target_len, np.arange(0, len(ss)), ss)
12
+ res = np.nan_to_num(target)
13
+ return res
14
+
15
+ class VC(object):
16
+ def __init__(self,tgt_sr,device,is_half):
17
+ self.sr=16000#hubert输入采样率
18
+ self.window=160#每帧点数
19
+ self.t_pad=self.sr*x_pad#每条前后pad时间
20
+ self.t_pad_tgt=tgt_sr*x_pad
21
+ self.t_pad2=self.t_pad*2
22
+ self.t_query=self.sr*x_query#查询切点前后查询时间
23
+ self.t_center=self.sr*x_center#查询切点位置
24
+ self.t_max=self.sr*x_max#免查询时长阈值
25
+ self.device=device
26
+ self.is_half=is_half
27
+
28
+ def get_f0(self,x, p_len,f0_up_key=0,inp_f0=None):
29
+ time_step = self.window / self.sr * 1000
30
+ f0_min = 50
31
+ f0_max = 1100
32
+ f0_mel_min = 1127 * np.log(1 + f0_min / 700)
33
+ f0_mel_max = 1127 * np.log(1 + f0_max / 700)
34
+ f0 = parselmouth.Sound(x, self.sr).to_pitch_ac(
35
+ time_step=time_step / 1000, voicing_threshold=0.6,
36
+ pitch_floor=f0_min, pitch_ceiling=f0_max).selected_array['frequency']
37
+ pad_size=(p_len - len(f0) + 1) // 2
38
+ if(pad_size>0 or p_len - len(f0) - pad_size>0):
39
+ f0 = np.pad(f0,[[pad_size,p_len - len(f0) - pad_size]], mode='constant')
40
+ f0 *= pow(2, f0_up_key / 12)
41
+ # with open("test.txt","w")as f:f.write("\n".join([str(i)for i in f0.tolist()]))
42
+ tf0=self.sr//self.window#每秒f0点数
43
+ if (inp_f0 is not None):
44
+ delta_t=np.round((inp_f0[:,0].max()-inp_f0[:,0].min())*tf0+1).astype("int16")
45
+ replace_f0=np.interp(list(range(delta_t)), inp_f0[:, 0]*100, inp_f0[:, 1])
46
+ shape=f0[x_pad*tf0:x_pad*tf0+len(replace_f0)].shape[0]
47
+ f0[x_pad*tf0:x_pad*tf0+len(replace_f0)]=replace_f0[:shape]
48
+ # with open("test_opt.txt","w")as f:f.write("\n".join([str(i)for i in f0.tolist()]))
49
+ f0bak = f0.copy()
50
+ f0_mel = 1127 * np.log(1 + f0 / 700)
51
+ f0_mel[f0_mel > 0] = (f0_mel[f0_mel > 0] - f0_mel_min) * 254 / (f0_mel_max - f0_mel_min) + 1
52
+ f0_mel[f0_mel <= 1] = 1
53
+ f0_mel[f0_mel > 255] = 255
54
+ f0_coarse = np.rint(f0_mel).astype(np.int)
55
+ return f0_coarse, f0bak#1-0
56
+
57
+ def vc(self,model,net_g,dv,audio0,pitch,pitchf,times):
58
+ feats = torch.from_numpy(audio0)
59
+ if(self.is_half==True):feats=feats.half()
60
+ else:feats=feats.float()
61
+ if feats.dim() == 2: # double channels
62
+ feats = feats.mean(-1)
63
+ assert feats.dim() == 1, feats.dim()
64
+ feats = feats.view(1, -1)
65
+ padding_mask = torch.BoolTensor(feats.shape).fill_(False)
66
+
67
+ inputs = {
68
+ "source": feats.to(self.device),
69
+ "padding_mask": padding_mask.to(self.device),
70
+ "output_layer": 9, # layer 9
71
+ }
72
+ t0 = ttime()
73
+ with torch.no_grad():
74
+ logits = model.extract_features(**inputs)
75
+ feats = model.final_proj(logits[0])
76
+ feats = F.interpolate(feats.permute(0, 2, 1), scale_factor=2).permute(0, 2, 1)
77
+ t1 = ttime()
78
+ p_len = audio0.shape[0]//self.window
79
+ if(feats.shape[1]<p_len):
80
+ p_len=feats.shape[1]
81
+ pitch=pitch[:,:p_len]
82
+ pitchf=pitchf[:,:p_len]
83
+ p_len=torch.LongTensor([p_len]).to(self.device)
84
+ with torch.no_grad():
85
+ audio1 = (net_g.infer(feats, p_len, pitch, pitchf, dv)[0][0, 0] * 32768).data.cpu().float().numpy().astype(np.int16)
86
+ del feats,p_len,padding_mask
87
+ torch.cuda.empty_cache()
88
+ t2 = ttime()
89
+ times[0] += (t1 - t0)
90
+ times[2] += (t2 - t1)
91
+ return audio1
92
+ def vc_km(self,model,net_g,dv,audio0,pitch,pitchf,times):
93
+ kmeans = KMeans(500)
94
+ def get_cluster_result(x):
95
+ """x: np.array [t, 256]"""
96
+ return kmeans.predict(x)
97
+ checkpoint = torch.load("lulu_contentvec_kmeans_500.pt")
98
+ kmeans.__dict__["n_features_in_"] = checkpoint["n_features_in_"]
99
+ kmeans.__dict__["_n_threads"] = checkpoint["_n_threads"]
100
+ kmeans.__dict__["cluster_centers_"] = checkpoint["cluster_centers_"]
101
+ feats = torch.from_numpy(audio0).float()
102
+ if feats.dim() == 2: # double channels
103
+ feats = feats.mean(-1)
104
+ assert feats.dim() == 1, feats.dim()
105
+ feats = feats.view(1, -1)
106
+ padding_mask = torch.BoolTensor(feats.shape).fill_(False)
107
+ inputs = {
108
+ "source": feats.half().to(self.device),
109
+ "padding_mask": padding_mask.to(self.device),
110
+ "output_layer": 9, # layer 9
111
+ }
112
+ torch.cuda.synchronize()
113
+ t0 = ttime()
114
+ with torch.no_grad():
115
+ logits = model.extract_features(**inputs)
116
+ feats = model.final_proj(logits[0])
117
+ feats = get_cluster_result(feats.cpu().numpy()[0].astype("float32"))
118
+ feats = torch.from_numpy(feats).to(self.device)
119
+ feats = F.interpolate(feats.half().unsqueeze(0).unsqueeze(0), scale_factor=2).long().squeeze(0)
120
+ t1 = ttime()
121
+ p_len = audio0.shape[0]//self.window
122
+ if(feats.shape[1]<p_len):
123
+ p_len=feats.shape[1]
124
+ pitch=pitch[:,:p_len]
125
+ pitchf=pitchf[:,:p_len]
126
+ p_len=torch.LongTensor([p_len]).to(self.device)
127
+ with torch.no_grad():
128
+ audio1 = (net_g.infer(feats, p_len, pitch, pitchf, dv)[0][0, 0] * 32768).data.cpu().float().numpy().astype(np.int16)
129
+ del feats,p_len,padding_mask
130
+ torch.cuda.empty_cache()
131
+ t2 = ttime()
132
+ times[0] += (t1 - t0)
133
+ times[2] += (t2 - t1)
134
+ return audio1
135
+
136
+ def pipeline(self,model,net_g,dv,audio,times,f0_up_key,f0_file=None):
137
+ audio_pad = np.pad(audio, (self.window // 2, self.window // 2), mode='reflect')
138
+ opt_ts = []
139
+ if(audio_pad.shape[0]>self.t_max):
140
+ audio_sum = np.zeros_like(audio)
141
+ for i in range(self.window): audio_sum += audio_pad[i:i - self.window]
142
+ for t in range(self.t_center, audio.shape[0],self.t_center):opt_ts.append(t - self.t_query + np.where(np.abs(audio_sum[t - self.t_query:t + self.t_query]) == np.abs(audio_sum[t - self.t_query:t + self.t_query]).min())[0][0])
143
+ s = 0
144
+ audio_opt=[]
145
+ t=None
146
+ t1=ttime()
147
+ audio_pad = np.pad(audio, (self.t_pad, self.t_pad), mode='reflect')
148
+ p_len=audio_pad.shape[0]//self.window
149
+ inp_f0=None
150
+ if(hasattr(f0_file,'name') ==True):
151
+ try:
152
+ with open(f0_file.name,"r")as f:
153
+ lines=f.read().strip("\n").split("\n")
154
+ inp_f0=[]
155
+ for line in lines:inp_f0.append([float(i)for i in line.split(",")])
156
+ inp_f0=np.array(inp_f0,dtype="float32")
157
+ except:
158
+ traceback.print_exc()
159
+ pitch, pitchf = self.get_f0(audio_pad, p_len, f0_up_key,inp_f0)
160
+
161
+ pitch = pitch[:p_len]
162
+ pitchf = pitchf[:p_len]
163
+ # if(inp_f0 is None):
164
+ # pitch = pitch[:p_len]
165
+ # pitchf = pitchf[:p_len]
166
+ # else:
167
+ # pitch=resize2d(pitch,p_len,is1=True)
168
+ # pitchf=resize2d(pitchf,p_len,is1=False)
169
+ pitch = torch.LongTensor(pitch).unsqueeze(0).to(self.device)
170
+ pitchf = torch.FloatTensor(pitchf).unsqueeze(0).to(self.device)
171
+ t2=ttime()
172
+ times[1] += (t2 - t1)
173
+ for t in opt_ts:
174
+ t=t//self.window*self.window
175
+ audio_opt.append(self.vc(model,net_g,dv,audio_pad[s:t+self.t_pad2+self.window],pitch[:,s//self.window:(t+self.t_pad2)//self.window],pitchf[:,s//self.window:(t+self.t_pad2)//self.window],times)[self.t_pad_tgt:-self.t_pad_tgt])
176
+ s = t
177
+ audio_opt.append(self.vc(model,net_g,dv,audio_pad[t:],pitch[:,t//self.window:]if t is not None else pitch,pitchf[:,t//self.window:]if t is not None else pitchf,times)[self.t_pad_tgt:-self.t_pad_tgt])
178
+ audio_opt=np.concatenate(audio_opt)
179
+ del pitch,pitchf
180
+ return audio_opt
181
+ def pipeline_km(self,model,net_g,dv,audio,times,f0_up_key,f0_file=None):
182
+ audio_pad = np.pad(audio, (self.window // 2, self.window // 2), mode='reflect')
183
+ opt_ts = []
184
+ if(audio_pad.shape[0]>self.t_max):
185
+ audio_sum = np.zeros_like(audio)
186
+ for i in range(self.window): audio_sum += audio_pad[i:i - self.window]
187
+ for t in range(self.t_center, audio.shape[0],self.t_center):opt_ts.append(t - self.t_query + np.where(np.abs(audio_sum[t - self.t_query:t + self.t_query]) == np.abs(audio_sum[t - self.t_query:t + self.t_query]).min())[0][0])
188
+ s = 0
189
+ audio_opt=[]
190
+ t=None
191
+ t1=ttime()
192
+ audio_pad = np.pad(audio, (self.t_pad, self.t_pad), mode='reflect')
193
+ p_len=audio_pad.shape[0]//self.window
194
+ inp_f0=None
195
+ if(hasattr(f0_file,'name') ==True):
196
+ try:
197
+ with open(f0_file.name,"r")as f:
198
+ lines=f.read().strip("\n").split("\n")
199
+ inp_f0=[]
200
+ for line in lines:inp_f0.append([float(i)for i in line.split(",")])
201
+ inp_f0=np.array(inp_f0,dtype="float32")
202
+ except:
203
+ traceback.print_exc()
204
+ pitch, pitchf = self.get_f0(audio_pad, p_len, f0_up_key,inp_f0)
205
+
206
+ pitch = pitch[:p_len]
207
+ pitchf = pitchf[:p_len]
208
+ # if(inp_f0 is None):
209
+ # pitch = pitch[:p_len]
210
+ # pitchf = pitchf[:p_len]
211
+ # else:
212
+ # pitch=resize2d(pitch,p_len,is1=True)
213
+ # pitchf=resize2d(pitchf,p_len,is1=False)
214
+ pitch = torch.LongTensor(pitch).unsqueeze(0).to(self.device)
215
+ pitchf = torch.FloatTensor(pitchf).unsqueeze(0).to(self.device)
216
+ t2=ttime()
217
+ times[1] += (t2 - t1)
218
+ for t in opt_ts:
219
+ t=t//self.window*self.window
220
+ audio_opt.append(self.vc_km(model,net_g,dv,audio_pad[s:t+self.t_pad2+self.window],pitch[:,s//self.window:(t+self.t_pad2)//self.window],pitchf[:,s//self.window:(t+self.t_pad2)//self.window],times)[self.t_pad_tgt:-self.t_pad_tgt])
221
+ s = t
222
+ audio_opt.append(self.vc_km(model,net_g,dv,audio_pad[t:],pitch[:,t//self.window:]if t is not None else pitch,pitchf[:,t//self.window:]if t is not None else pitchf,times)[self.t_pad_tgt:-self.t_pad_tgt])
223
+ audio_opt=np.concatenate(audio_opt)
224
+ del pitch,pitchf
225
+ return audio_opt
使用需遵守的协议-LICENSE.txt ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ MIT License
2
+
3
+ Copyright (c) 2022 lj1995
4
+
5
+ 本软件仅供研究使用,使用软件者、传播软件导出的声音者自负全责。如不认可该条款,则不能使用/引用软件包内所有代码和文件。
6
+
7
+ Permission is hereby granted, free of charge, to any person obtaining a copy
8
+ of this software and associated documentation files (the "Software"), to deal
9
+ in the Software without restriction, including without limitation the rights
10
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11
+ copies of the Software, and to permit persons to whom the Software is
12
+ furnished to do so, subject to the following conditions:
13
+
14
+ The above copyright notice and this permission notice shall be included in all
15
+ copies or substantial portions of the Software.
16
+
17
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
20
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
23
+ SOFTWARE.
24
+ #################
25
+ ContentVec
26
+ https://github.com/auspicious3000/contentvec/blob/main/LICENSE
27
+ MIT License
28
+ #################
29
+ VITS
30
+ https://github.com/jaywalnut310/vits/blob/main/LICENSE
31
+ MIT License
32
+ #################
33
+ HIFIGAN
34
+ https://github.com/jik876/hifi-gan/blob/master/LICENSE
35
+ MIT License
36
+ #################
37
+ gradio
38
+ https://github.com/gradio-app/gradio/blob/main/LICENSE
39
+ Apache License 2.0
40
+ #################
41
+ ffmpeg
42
+ https://github.com/FFmpeg/FFmpeg/blob/master/COPYING.LGPLv3
43
+ https://github.com/BtbN/FFmpeg-Builds/releases/download/autobuild-2021-02-28-12-32/ffmpeg-n4.3.2-160-gfbb9368226-win64-lgpl-4.3.zip
44
+ LPGLv3 License
45
+ MIT License
46
+ #################
47
+ ultimatevocalremovergui
48
+ https://github.com/Anjok07/ultimatevocalremovergui/blob/master/LICENSE
49
+ https://github.com/yang123qwe/vocal_separation_by_uvr5
50
+ MIT License
51
+ #################
52
+ audio-slicer
53
+ https://github.com/openvpi/audio-slicer/blob/main/LICENSE
54
+ MIT License