lj1995 commited on
Commit
82273dd
1 Parent(s): 782ca29

Upload myinfer.py

Browse files
Files changed (1) hide show
  1. myinfer.py +101 -24
myinfer.py CHANGED
@@ -1,12 +1,102 @@
 
 
 
1
  import os,sys,pdb,torch
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2
  f0up_key=sys.argv[1]
3
  input_path=sys.argv[2]
4
  index_path=sys.argv[3]
5
- npy_path=sys.argv[4]
6
  opt_path=sys.argv[5]
7
  model_path=sys.argv[6]
 
 
 
8
  print(sys.argv)
9
- sys.argv=['myinfer.py']
10
  now_dir=os.getcwd()
11
  sys.path.append(now_dir)
12
  from vc_infer_pipeline import VC
@@ -15,19 +105,7 @@ from my_utils import load_audio
15
  from fairseq import checkpoint_utils
16
  from scipy.io import wavfile
17
 
18
-
19
- # f0up_key=0
20
- # input_path=r"E:\codes\py39\RVC-beta\todo-songs\1111.wav"
21
- # index_path=r"E:\codes\py39\logs\mi-test\added_IVF677_Flat_nprobe_7.index"
22
- # npy_path =r"E:\codes\py39\logs\mi-test\total_fea.npy"
23
- # opt_path ="test.wav"
24
- # model_path="mi-test.pth"
25
-
26
-
27
-
28
  hubert_model=None
29
- is_half=False
30
- device="cuda"
31
  def load_hubert():
32
  global hubert_model
33
  models, saved_cfg, task = checkpoint_utils.load_model_ensemble_and_task(["hubert_base.pt"],suffix="",)
@@ -37,8 +115,7 @@ def load_hubert():
37
  else:hubert_model = hubert_model.float()
38
  hubert_model.eval()
39
 
40
-
41
- def vc_single(sid,input_audio,f0_up_key,f0_file,f0_method,file_index,file_big_npy,index_rate):#spk_item, input_audio0, vc_transform0,f0_file,f0method0
42
  global tgt_sr,net_g,vc,hubert_model
43
  if input_audio is None:return "You need to upload an audio", None
44
  f0_up_key = int(f0_up_key)
@@ -46,16 +123,16 @@ def vc_single(sid,input_audio,f0_up_key,f0_file,f0_method,file_index,file_big_np
46
  times = [0, 0, 0]
47
  if(hubert_model==None):load_hubert()
48
  if_f0 = cpt.get("f0", 1)
49
- audio_opt=vc.pipeline(hubert_model,net_g,sid,audio,times,f0_up_key,f0_method,file_index,file_big_npy,index_rate,if_f0,f0_file=f0_file)
 
50
  print(times)
51
  return audio_opt
52
 
53
 
54
- def get_vc(sid):
55
- global n_spk,tgt_sr,net_g,vc,cpt
56
- person = "weights/%s" % (sid)
57
- print("loading %s"%person)
58
- cpt = torch.load(person, map_location="cpu")
59
  tgt_sr = cpt["config"][-1]
60
  cpt["config"][-3]=cpt["weight"]["emb_g.weight"].shape[0]#n_spk
61
  if_f0=cpt.get("f0",1)
@@ -68,12 +145,12 @@ def get_vc(sid):
68
  net_g.eval().to(device)
69
  if (is_half):net_g = net_g.half()
70
  else:net_g = net_g.float()
71
- vc = VC(tgt_sr, device, is_half)
72
  n_spk=cpt["config"][-3]
73
  # return {"visible": True,"maximum": n_spk, "__type__": "update"}
74
 
75
 
76
  get_vc(model_path)
77
- wav_opt=vc_single(0,input_path,f0up_key,None,"harvest",index_path,npy_path,0.6)
78
  wavfile.write(opt_path, tgt_sr, wav_opt)
79
 
1
+ '''
2
+ runtime\python.exe myinfer.py 0 "E:\codes\py39\RVC-beta\todo-songs\1111.wav" "E:\codes\py39\logs\mi-test\added_IVF677_Flat_nprobe_7.index" harvest "test.wav" "weights/mi-test.pth" 0.6 cuda:0 True
3
+ '''
4
  import os,sys,pdb,torch
5
+ now_dir = os.getcwd()
6
+ sys.path.append(now_dir)
7
+ import argparse
8
+ import glob
9
+ import sys
10
+ import torch
11
+ from multiprocessing import cpu_count
12
+ class Config:
13
+ def __init__(self,device,is_half):
14
+ self.device = device
15
+ self.is_half = is_half
16
+ self.n_cpu = 0
17
+ self.gpu_name = None
18
+ self.gpu_mem = None
19
+ self.x_pad, self.x_query, self.x_center, self.x_max = self.device_config()
20
+
21
+ def device_config(self) -> tuple:
22
+ if torch.cuda.is_available():
23
+ i_device = int(self.device.split(":")[-1])
24
+ self.gpu_name = torch.cuda.get_device_name(i_device)
25
+ if (
26
+ ("16" in self.gpu_name and "V100" not in self.gpu_name.upper())
27
+ or "P40" in self.gpu_name.upper()
28
+ or "1060" in self.gpu_name
29
+ or "1070" in self.gpu_name
30
+ or "1080" in self.gpu_name
31
+ ):
32
+ print("16系/10系显卡和P40强制单精度")
33
+ self.is_half = False
34
+ for config_file in ["32k.json", "40k.json", "48k.json"]:
35
+ with open(f"configs/{config_file}", "r") as f:
36
+ strr = f.read().replace("true", "false")
37
+ with open(f"configs/{config_file}", "w") as f:
38
+ f.write(strr)
39
+ with open("trainset_preprocess_pipeline_print.py", "r") as f:
40
+ strr = f.read().replace("3.7", "3.0")
41
+ with open("trainset_preprocess_pipeline_print.py", "w") as f:
42
+ f.write(strr)
43
+ else:
44
+ self.gpu_name = None
45
+ self.gpu_mem = int(
46
+ torch.cuda.get_device_properties(i_device).total_memory
47
+ / 1024
48
+ / 1024
49
+ / 1024
50
+ + 0.4
51
+ )
52
+ if self.gpu_mem <= 4:
53
+ with open("trainset_preprocess_pipeline_print.py", "r") as f:
54
+ strr = f.read().replace("3.7", "3.0")
55
+ with open("trainset_preprocess_pipeline_print.py", "w") as f:
56
+ f.write(strr)
57
+ elif torch.backends.mps.is_available():
58
+ print("没有发现支持的N卡, 使用MPS进行推理")
59
+ self.device = "mps"
60
+ else:
61
+ print("没有发现支持的N卡, 使用CPU进行推理")
62
+ self.device = "cpu"
63
+ self.is_half = True
64
+
65
+ if self.n_cpu == 0:
66
+ self.n_cpu = cpu_count()
67
+
68
+ if self.is_half:
69
+ # 6G显存配置
70
+ x_pad = 3
71
+ x_query = 10
72
+ x_center = 60
73
+ x_max = 65
74
+ else:
75
+ # 5G显存配置
76
+ x_pad = 1
77
+ x_query = 6
78
+ x_center = 38
79
+ x_max = 41
80
+
81
+ if self.gpu_mem != None and self.gpu_mem <= 4:
82
+ x_pad = 1
83
+ x_query = 5
84
+ x_center = 30
85
+ x_max = 32
86
+
87
+ return x_pad, x_query, x_center, x_max
88
+
89
  f0up_key=sys.argv[1]
90
  input_path=sys.argv[2]
91
  index_path=sys.argv[3]
92
+ f0method=sys.argv[4]#harvest or pm
93
  opt_path=sys.argv[5]
94
  model_path=sys.argv[6]
95
+ index_rate=float(sys.argv[7])
96
+ device=sys.argv[8]
97
+ is_half=bool(sys.argv[9])
98
  print(sys.argv)
99
+ config=Config(device,is_half)
100
  now_dir=os.getcwd()
101
  sys.path.append(now_dir)
102
  from vc_infer_pipeline import VC
105
  from fairseq import checkpoint_utils
106
  from scipy.io import wavfile
107
 
 
 
 
 
 
 
 
 
 
 
108
  hubert_model=None
 
 
109
  def load_hubert():
110
  global hubert_model
111
  models, saved_cfg, task = checkpoint_utils.load_model_ensemble_and_task(["hubert_base.pt"],suffix="",)
115
  else:hubert_model = hubert_model.float()
116
  hubert_model.eval()
117
 
118
+ def vc_single(sid,input_audio,f0_up_key,f0_file,f0_method,file_index,index_rate):
 
119
  global tgt_sr,net_g,vc,hubert_model
120
  if input_audio is None:return "You need to upload an audio", None
121
  f0_up_key = int(f0_up_key)
123
  times = [0, 0, 0]
124
  if(hubert_model==None):load_hubert()
125
  if_f0 = cpt.get("f0", 1)
126
+ # audio_opt=vc.pipeline(hubert_model,net_g,sid,audio,times,f0_up_key,f0_method,file_index,file_big_npy,index_rate,if_f0,f0_file=f0_file)
127
+ audio_opt=vc.pipeline(hubert_model,net_g,sid,audio,times,f0_up_key,f0_method,file_index,index_rate,if_f0,f0_file=f0_file)
128
  print(times)
129
  return audio_opt
130
 
131
 
132
+ def get_vc(model_path):
133
+ global n_spk,tgt_sr,net_g,vc,cpt,device,is_half
134
+ print("loading pth %s"%model_path)
135
+ cpt = torch.load(model_path, map_location="cpu")
 
136
  tgt_sr = cpt["config"][-1]
137
  cpt["config"][-3]=cpt["weight"]["emb_g.weight"].shape[0]#n_spk
138
  if_f0=cpt.get("f0",1)
145
  net_g.eval().to(device)
146
  if (is_half):net_g = net_g.half()
147
  else:net_g = net_g.float()
148
+ vc = VC(tgt_sr, config)
149
  n_spk=cpt["config"][-3]
150
  # return {"visible": True,"maximum": n_spk, "__type__": "update"}
151
 
152
 
153
  get_vc(model_path)
154
+ wav_opt=vc_single(0,input_path,f0up_key,None,f0method,index_path,index_rate)
155
  wavfile.write(opt_path, tgt_sr, wav_opt)
156