kevinwang676 commited on
Commit
2c3577a
·
verified ·
1 Parent(s): e409a1a

Upload folder using huggingface_hub

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .dockerignore +8 -0
  2. .gitattributes +5 -0
  3. .gitignore +20 -0
  4. .ipynb_checkpoints/webui-checkpoint.py +1080 -0
  5. 1739554402.592865.pth +3 -0
  6. Docker/damo.sha256 +3 -0
  7. Docker/download.py +5 -0
  8. Docker/download.sh +11 -0
  9. Docker/links.sha256 +12 -0
  10. Docker/links.txt +34 -0
  11. Dockerfile +42 -0
  12. GPT_SoVITS/.ipynb_checkpoints/inference_webui-checkpoint.py +908 -0
  13. GPT_SoVITS/AR/__init__.py +0 -0
  14. GPT_SoVITS/AR/data/__init__.py +0 -0
  15. GPT_SoVITS/AR/data/bucket_sampler.py +163 -0
  16. GPT_SoVITS/AR/data/data_module.py +76 -0
  17. GPT_SoVITS/AR/data/dataset.py +323 -0
  18. GPT_SoVITS/AR/models/__init__.py +0 -0
  19. GPT_SoVITS/AR/models/t2s_lightning_module.py +141 -0
  20. GPT_SoVITS/AR/models/t2s_lightning_module_onnx.py +107 -0
  21. GPT_SoVITS/AR/models/t2s_model.py +876 -0
  22. GPT_SoVITS/AR/models/t2s_model_onnx.py +338 -0
  23. GPT_SoVITS/AR/models/utils.py +229 -0
  24. GPT_SoVITS/AR/modules/__init__.py +0 -0
  25. GPT_SoVITS/AR/modules/activation.py +428 -0
  26. GPT_SoVITS/AR/modules/activation_onnx.py +178 -0
  27. GPT_SoVITS/AR/modules/embedding.py +81 -0
  28. GPT_SoVITS/AR/modules/embedding_onnx.py +63 -0
  29. GPT_SoVITS/AR/modules/lr_schedulers.py +83 -0
  30. GPT_SoVITS/AR/modules/optim.py +622 -0
  31. GPT_SoVITS/AR/modules/patched_mha_with_cache.py +465 -0
  32. GPT_SoVITS/AR/modules/patched_mha_with_cache_onnx.py +92 -0
  33. GPT_SoVITS/AR/modules/scaling.py +335 -0
  34. GPT_SoVITS/AR/modules/transformer.py +378 -0
  35. GPT_SoVITS/AR/modules/transformer_onnx.py +292 -0
  36. GPT_SoVITS/AR/text_processing/__init__.py +0 -0
  37. GPT_SoVITS/AR/text_processing/phonemizer.py +79 -0
  38. GPT_SoVITS/AR/text_processing/symbols.py +10 -0
  39. GPT_SoVITS/AR/utils/__init__.py +37 -0
  40. GPT_SoVITS/AR/utils/initialize.py +38 -0
  41. GPT_SoVITS/AR/utils/io.py +34 -0
  42. GPT_SoVITS/BigVGAN/LICENSE +21 -0
  43. GPT_SoVITS/BigVGAN/README.md +266 -0
  44. GPT_SoVITS/BigVGAN/activations.py +126 -0
  45. GPT_SoVITS/BigVGAN/alias_free_activation/cuda/__init__.py +0 -0
  46. GPT_SoVITS/BigVGAN/alias_free_activation/cuda/activation1d.py +77 -0
  47. GPT_SoVITS/BigVGAN/alias_free_activation/cuda/anti_alias_activation.cpp +23 -0
  48. GPT_SoVITS/BigVGAN/alias_free_activation/cuda/anti_alias_activation_cuda.cu +246 -0
  49. GPT_SoVITS/BigVGAN/alias_free_activation/cuda/build/_ +1 -0
  50. GPT_SoVITS/BigVGAN/alias_free_activation/cuda/compat.h +29 -0
.dockerignore ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ docs
2
+ logs
3
+ output
4
+ reference
5
+ SoVITS_weights
6
+ GPT_weights
7
+ TEMP
8
+ .git
.gitattributes CHANGED
@@ -33,3 +33,8 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ GPT_SoVITS/text/ja_userdic/userdict.csv filter=lfs diff=lfs merge=lfs -text
37
+ tools/damo_asr/punc_ct-transformer_zh-cn-common-vocab272727-pytorch/fig/struct.png filter=lfs diff=lfs merge=lfs -text
38
+ tools/damo_asr/speech_fsmn_vad_zh-cn-16k-common-pytorch/example/vad_example.wav filter=lfs diff=lfs merge=lfs -text
39
+ tools/damo_asr/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch/example/asr_example.wav filter=lfs diff=lfs merge=lfs -text
40
+ xinghui.wav filter=lfs diff=lfs merge=lfs -text
.gitignore ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ .DS_Store
2
+ .vscode
3
+ __pycache__
4
+ *.pyc
5
+ env
6
+ runtime
7
+ .idea
8
+ output
9
+ logs
10
+ reference
11
+ GPT_weights
12
+ SoVITS_weights
13
+ GPT_weights_v2
14
+ SoVITS_weights_v2
15
+ GPT_weights_v3
16
+ SoVITS_weights_v3
17
+ TEMP
18
+ weight.json
19
+ ffmpeg*
20
+ ffprobe*
.ipynb_checkpoints/webui-checkpoint.py ADDED
@@ -0,0 +1,1080 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os,sys
2
+ if len(sys.argv)==1:sys.argv.append('v2')
3
+ version="v1"if sys.argv[1]=="v1" else"v2"
4
+ os.environ["version"]=version
5
+ now_dir = os.getcwd()
6
+ sys.path.insert(0, now_dir)
7
+ import warnings
8
+ warnings.filterwarnings("ignore")
9
+ import json,yaml,torch,pdb,re,shutil
10
+ import platform
11
+ import psutil
12
+ import signal
13
+ os.environ['TORCH_DISTRIBUTED_DEBUG'] = 'INFO'
14
+ torch.manual_seed(233333)
15
+ tmp = os.path.join(now_dir, "TEMP")
16
+ os.makedirs(tmp, exist_ok=True)
17
+ os.environ["TEMP"] = tmp
18
+ if(os.path.exists(tmp)):
19
+ for name in os.listdir(tmp):
20
+ if(name=="jieba.cache"):continue
21
+ path="%s/%s"%(tmp,name)
22
+ delete=os.remove if os.path.isfile(path) else shutil.rmtree
23
+ try:
24
+ delete(path)
25
+ except Exception as e:
26
+ print(str(e))
27
+ pass
28
+ import site
29
+ import traceback
30
+ site_packages_roots = []
31
+ for path in site.getsitepackages():
32
+ if "packages" in path:
33
+ site_packages_roots.append(path)
34
+ if(site_packages_roots==[]):site_packages_roots=["%s/runtime/Lib/site-packages" % now_dir]
35
+ #os.environ["OPENBLAS_NUM_THREADS"] = "4"
36
+ os.environ["no_proxy"] = "localhost, 127.0.0.1, ::1"
37
+ os.environ["all_proxy"] = ""
38
+ for site_packages_root in site_packages_roots:
39
+ if os.path.exists(site_packages_root):
40
+ try:
41
+ with open("%s/users.pth" % (site_packages_root), "w") as f:
42
+ f.write(
43
+ # "%s\n%s/runtime\n%s/tools\n%s/tools/asr\n%s/GPT_SoVITS\n%s/tools/uvr5"
44
+ "%s\n%s/GPT_SoVITS/BigVGAN\n%s/tools\n%s/tools/asr\n%s/GPT_SoVITS\n%s/tools/uvr5"
45
+ % (now_dir, now_dir, now_dir, now_dir, now_dir, now_dir)
46
+ )
47
+ break
48
+ except PermissionError as e:
49
+ traceback.print_exc()
50
+ from tools import my_utils
51
+ import shutil
52
+ import pdb
53
+ from subprocess import Popen
54
+ import signal
55
+ from config import python_exec,infer_device,is_half,exp_root,webui_port_main,webui_port_infer_tts,webui_port_uvr5,webui_port_subfix,is_share
56
+ from tools.i18n.i18n import I18nAuto, scan_language_list
57
+ language=sys.argv[-1] if sys.argv[-1] in scan_language_list() else "Auto"
58
+ os.environ["language"]=language
59
+ i18n = I18nAuto(language=language)
60
+ from scipy.io import wavfile
61
+ from tools.my_utils import load_audio, check_for_existance, check_details
62
+ from multiprocessing import cpu_count
63
+ # os.environ['PYTORCH_ENABLE_MPS_FALLBACK'] = '1' # 当遇到mps不支持的步骤时使用cpu
64
+ try:
65
+ import gradio.analytics as analytics
66
+ analytics.version_check = lambda:None
67
+ except:...
68
+ import gradio as gr
69
+ n_cpu=cpu_count()
70
+
71
+ ngpu = torch.cuda.device_count()
72
+ gpu_infos = []
73
+ mem = []
74
+ if_gpu_ok = False
75
+
76
+ # 判断是否有能用来训练和加速推理的N卡
77
+ ok_gpu_keywords={"10","16","20","30","40","A2","A3","A4","P4","A50","500","A60","70","80","90","M4","T4","TITAN","L4","4060","H","600","506","507","508","509"}
78
+ set_gpu_numbers=set()
79
+ if torch.cuda.is_available() or ngpu != 0:
80
+ for i in range(ngpu):
81
+ gpu_name = torch.cuda.get_device_name(i)
82
+ if any(value in gpu_name.upper()for value in ok_gpu_keywords):
83
+ # A10#A100#V100#A40#P40#M40#K80#A4500
84
+ if_gpu_ok = True # 至少有一张能用的N卡
85
+ gpu_infos.append("%s\t%s" % (i, gpu_name))
86
+ set_gpu_numbers.add(i)
87
+ mem.append(int(torch.cuda.get_device_properties(i).total_memory/ 1024/ 1024/ 1024+ 0.4))
88
+ # # 判断是否支持mps加速
89
+ # if torch.backends.mps.is_available():
90
+ # if_gpu_ok = True
91
+ # gpu_infos.append("%s\t%s" % ("0", "Apple GPU"))
92
+ # mem.append(psutil.virtual_memory().total/ 1024 / 1024 / 1024) # 实测使用系统内存作为显存不会爆显存
93
+
94
+
95
+ def set_default():
96
+ global default_batch_size,default_max_batch_size,gpu_info,default_sovits_epoch,default_sovits_save_every_epoch,max_sovits_epoch,max_sovits_save_every_epoch,default_batch_size_s1
97
+ if if_gpu_ok and len(gpu_infos) > 0:
98
+ gpu_info = "\n".join(gpu_infos)
99
+ minmem = min(mem)
100
+ default_batch_size = minmem // 2 if version!="v3"else minmem//14
101
+ default_batch_size_s1=minmem // 2
102
+ else:
103
+ gpu_info = ("%s\t%s" % ("0", "CPU"))
104
+ gpu_infos.append("%s\t%s" % ("0", "CPU"))
105
+ set_gpu_numbers.add(0)
106
+ default_batch_size = default_batch_size_s1=int(psutil.virtual_memory().total/ 1024 / 1024 / 1024 / 2)
107
+ if version!="v3":
108
+ default_sovits_epoch=8
109
+ default_sovits_save_every_epoch=4
110
+ max_sovits_epoch=25
111
+ max_sovits_save_every_epoch=25
112
+ else:
113
+ default_sovits_epoch=2
114
+ default_sovits_save_every_epoch=1
115
+ max_sovits_epoch=3
116
+ max_sovits_save_every_epoch=3
117
+ default_max_batch_size=default_batch_size*3
118
+
119
+ set_default()
120
+
121
+ gpus = "-".join([i[0] for i in gpu_infos])
122
+ default_gpu_numbers=str(sorted(list(set_gpu_numbers))[0])
123
+ def fix_gpu_number(input):#将越界的number强制改到界内
124
+ try:
125
+ if(int(input)not in set_gpu_numbers):return default_gpu_numbers
126
+ except:return input
127
+ return input
128
+ def fix_gpu_numbers(inputs):
129
+ output=[]
130
+ try:
131
+ for input in inputs.split(","):output.append(str(fix_gpu_number(input)))
132
+ return ",".join(output)
133
+ except:
134
+ return inputs
135
+
136
+ pretrained_sovits_name=["GPT_SoVITS/pretrained_models/s2G488k.pth", "GPT_SoVITS/pretrained_models/gsv-v2final-pretrained/s2G2333k.pth","GPT_SoVITS/pretrained_models/s2Gv3.pth"]
137
+ pretrained_gpt_name=["GPT_SoVITS/pretrained_models/s1bert25hz-2kh-longer-epoch=68e-step=50232.ckpt","GPT_SoVITS/pretrained_models/gsv-v2final-pretrained/s1bert25hz-5kh-longer-epoch=12-step=369668.ckpt", "GPT_SoVITS/pretrained_models/s1v3.ckpt"]
138
+
139
+ pretrained_model_list = (pretrained_sovits_name[int(version[-1])-1],pretrained_sovits_name[int(version[-1])-1].replace("s2G","s2D"),pretrained_gpt_name[int(version[-1])-1],"GPT_SoVITS/pretrained_models/chinese-roberta-wwm-ext-large","GPT_SoVITS/pretrained_models/chinese-hubert-base")
140
+
141
+ _=''
142
+ for i in pretrained_model_list:
143
+ if "s2Dv3"not in i and os.path.exists(i)==False:_+=f'\n {i}'
144
+ if _:
145
+ print("warning:",i18n('以下模型不存在:')+_)
146
+
147
+ _ =[[],[]]
148
+ for i in range(3):
149
+ if os.path.exists(pretrained_gpt_name[i]):_[0].append(pretrained_gpt_name[i])
150
+ else:_[0].append("")##没有下pretrained模型的,说不定他们是想自己从零训底模呢
151
+ if os.path.exists(pretrained_sovits_name[i]):_[-1].append(pretrained_sovits_name[i])
152
+ else:_[-1].append("")
153
+ pretrained_gpt_name,pretrained_sovits_name = _
154
+
155
+ SoVITS_weight_root=["SoVITS_weights","SoVITS_weights_v2","SoVITS_weights_v3"]
156
+ GPT_weight_root=["GPT_weights","GPT_weights_v2","GPT_weights_v3"]
157
+ for root in SoVITS_weight_root+GPT_weight_root:
158
+ os.makedirs(root,exist_ok=True)
159
+ def get_weights_names():
160
+ SoVITS_names = [name for name in pretrained_sovits_name if name!=""]
161
+ for path in SoVITS_weight_root:
162
+ for name in os.listdir(path):
163
+ if name.endswith(".pth"): SoVITS_names.append("%s/%s" % (path, name))
164
+ GPT_names = [name for name in pretrained_gpt_name if name!=""]
165
+ for path in GPT_weight_root:
166
+ for name in os.listdir(path):
167
+ if name.endswith(".ckpt"): GPT_names.append("%s/%s" % (path, name))
168
+ return SoVITS_names, GPT_names
169
+
170
+ SoVITS_names,GPT_names = get_weights_names()
171
+ for path in SoVITS_weight_root+GPT_weight_root:
172
+ os.makedirs(path,exist_ok=True)
173
+
174
+
175
+ def custom_sort_key(s):
176
+ # 使用正则表达式提取字符串中的数字部分和非数字部分
177
+ parts = re.split('(\d+)', s)
178
+ # 将数字部分转换为整数,非数字部分保持不变
179
+ parts = [int(part) if part.isdigit() else part for part in parts]
180
+ return parts
181
+
182
+ def change_choices():
183
+ SoVITS_names, GPT_names = get_weights_names()
184
+ return {"choices": sorted(SoVITS_names,key=custom_sort_key), "__type__": "update"}, {"choices": sorted(GPT_names,key=custom_sort_key), "__type__": "update"}
185
+
186
+ p_label=None
187
+ p_uvr5=None
188
+ p_asr=None
189
+ p_denoise=None
190
+ p_tts_inference=None
191
+
192
+ def kill_proc_tree(pid, including_parent=True):
193
+ try:
194
+ parent = psutil.Process(pid)
195
+ except psutil.NoSuchProcess:
196
+ # Process already terminated
197
+ return
198
+
199
+ children = parent.children(recursive=True)
200
+ for child in children:
201
+ try:
202
+ os.kill(child.pid, signal.SIGTERM) # or signal.SIGKILL
203
+ except OSError:
204
+ pass
205
+ if including_parent:
206
+ try:
207
+ os.kill(parent.pid, signal.SIGTERM) # or signal.SIGKILL
208
+ except OSError:
209
+ pass
210
+
211
+ system=platform.system()
212
+ def kill_process(pid):
213
+ if(system=="Windows"):
214
+ cmd = "taskkill /t /f /pid %s" % pid
215
+ os.system(cmd)
216
+ else:
217
+ kill_proc_tree(pid)
218
+
219
+
220
+ def change_label(path_list):
221
+ global p_label
222
+ if(p_label==None):
223
+ check_for_existance([path_list])
224
+ path_list=my_utils.clean_path(path_list)
225
+ cmd = '"%s" tools/subfix_webui.py --load_list "%s" --webui_port %s --is_share %s'%(python_exec,path_list,webui_port_subfix,is_share)
226
+ yield i18n("打标工具WebUI已开启"), {'__type__':'update','visible':False}, {'__type__':'update','visible':True}
227
+ print(cmd)
228
+ p_label = Popen(cmd, shell=True)
229
+ elif(p_label!=None):
230
+ kill_process(p_label.pid)
231
+ p_label=None
232
+ yield i18n("打标工具WebUI已关闭"), {'__type__':'update','visible':True}, {'__type__':'update','visible':False}
233
+
234
+ def change_uvr5():
235
+ global p_uvr5
236
+ if(p_uvr5==None):
237
+ cmd = '"%s" tools/uvr5/webui.py "%s" %s %s %s'%(python_exec,infer_device,is_half,webui_port_uvr5,is_share)
238
+ yield i18n("UVR5已开启"), {'__type__':'update','visible':False}, {'__type__':'update','visible':True}
239
+ print(cmd)
240
+ p_uvr5 = Popen(cmd, shell=True)
241
+ elif(p_uvr5!=None):
242
+ kill_process(p_uvr5.pid)
243
+ p_uvr5=None
244
+ yield i18n("UVR5���关闭"), {'__type__':'update','visible':True}, {'__type__':'update','visible':False}
245
+
246
+ def change_tts_inference(bert_path,cnhubert_base_path,gpu_number,gpt_path,sovits_path, batched_infer_enabled):
247
+ global p_tts_inference
248
+ if batched_infer_enabled:
249
+ cmd = '"%s" GPT_SoVITS/inference_webui_fast.py "%s"'%(python_exec, language)
250
+ else:
251
+ cmd = '"%s" GPT_SoVITS/inference_webui.py "%s"'%(python_exec, language)
252
+ #####v3暂不支持加速推理
253
+ if version=="v3":
254
+ cmd = '"%s" GPT_SoVITS/inference_webui.py "%s"'%(python_exec, language)
255
+ if(p_tts_inference==None):
256
+ os.environ["gpt_path"]=gpt_path if "/" in gpt_path else "%s/%s"%(GPT_weight_root,gpt_path)
257
+ os.environ["sovits_path"]=sovits_path if "/"in sovits_path else "%s/%s"%(SoVITS_weight_root,sovits_path)
258
+ os.environ["cnhubert_base_path"]=cnhubert_base_path
259
+ os.environ["bert_path"]=bert_path
260
+ os.environ["_CUDA_VISIBLE_DEVICES"]=fix_gpu_number(gpu_number)
261
+ os.environ["is_half"]=str(is_half)
262
+ os.environ["infer_ttswebui"]=str(webui_port_infer_tts)
263
+ os.environ["is_share"]=str(is_share)
264
+ yield i18n("TTS推理进程已开启"), {'__type__':'update','visible':False}, {'__type__':'update','visible':True}
265
+ print(cmd)
266
+ p_tts_inference = Popen(cmd, shell=True)
267
+ elif(p_tts_inference!=None):
268
+ kill_process(p_tts_inference.pid)
269
+ p_tts_inference=None
270
+ yield i18n("TTS推理进程已关闭"), {'__type__':'update','visible':True}, {'__type__':'update','visible':False}
271
+
272
+ from tools.asr.config import asr_dict
273
+ def open_asr(asr_inp_dir, asr_opt_dir, asr_model, asr_model_size, asr_lang, asr_precision):
274
+ global p_asr
275
+ if(p_asr==None):
276
+ asr_inp_dir=my_utils.clean_path(asr_inp_dir)
277
+ asr_opt_dir=my_utils.clean_path(asr_opt_dir)
278
+ check_for_existance([asr_inp_dir])
279
+ cmd = f'"{python_exec}" tools/asr/{asr_dict[asr_model]["path"]}'
280
+ cmd += f' -i "{asr_inp_dir}"'
281
+ cmd += f' -o "{asr_opt_dir}"'
282
+ cmd += f' -s {asr_model_size}'
283
+ cmd += f' -l {asr_lang}'
284
+ cmd += f" -p {asr_precision}"
285
+ output_file_name = os.path.basename(asr_inp_dir)
286
+ output_folder = asr_opt_dir or "output/asr_opt"
287
+ output_file_path = os.path.abspath(f'{output_folder}/{output_file_name}.list')
288
+ yield "ASR任务开启:%s"%cmd, {"__type__":"update","visible":False}, {"__type__":"update","visible":True}, {"__type__":"update"}, {"__type__":"update"}, {"__type__":"update"}
289
+ print(cmd)
290
+ p_asr = Popen(cmd, shell=True)
291
+ p_asr.wait()
292
+ p_asr=None
293
+ yield f"ASR任务完成, 查看终端进行下一步", {"__type__":"update","visible":True}, {"__type__":"update","visible":False}, {"__type__":"update","value":output_file_path}, {"__type__":"update","value":output_file_path}, {"__type__":"update","value":asr_inp_dir}
294
+ else:
295
+ yield "已有正在进行的ASR任务,需先终止才能开启下一次任务", {"__type__":"update","visible":False}, {"__type__":"update","visible":True}, {"__type__":"update"}, {"__type__":"update"}, {"__type__":"update"}
296
+ # return None
297
+
298
+ def close_asr():
299
+ global p_asr
300
+ if(p_asr!=None):
301
+ kill_process(p_asr.pid)
302
+ p_asr=None
303
+ return "已终止ASR进程", {"__type__":"update","visible":True}, {"__type__":"update","visible":False}
304
+ def open_denoise(denoise_inp_dir, denoise_opt_dir):
305
+ global p_denoise
306
+ if(p_denoise==None):
307
+ denoise_inp_dir=my_utils.clean_path(denoise_inp_dir)
308
+ denoise_opt_dir=my_utils.clean_path(denoise_opt_dir)
309
+ check_for_existance([denoise_inp_dir])
310
+ cmd = '"%s" tools/cmd-denoise.py -i "%s" -o "%s" -p %s'%(python_exec,denoise_inp_dir,denoise_opt_dir,"float16"if is_half==True else "float32")
311
+
312
+ yield "语音降噪任务开启:%s"%cmd, {"__type__":"update","visible":False}, {"__type__":"update","visible":True}, {"__type__":"update"}, {"__type__":"update"}
313
+ print(cmd)
314
+ p_denoise = Popen(cmd, shell=True)
315
+ p_denoise.wait()
316
+ p_denoise=None
317
+ yield f"语音降噪任务完成, 查看终端进行下一步", {"__type__":"update","visible":True}, {"__type__":"update","visible":False}, {"__type__":"update","value":denoise_opt_dir}, {"__type__":"update","value":denoise_opt_dir}
318
+ else:
319
+ yield "已有正在进行的语音降噪任务,需先终止才能开启下一次任务", {"__type__":"update","visible":False}, {"__type__":"update","visible":True}, {"__type__":"update"}, {"__type__":"update"}
320
+ # return None
321
+
322
+ def close_denoise():
323
+ global p_denoise
324
+ if(p_denoise!=None):
325
+ kill_process(p_denoise.pid)
326
+ p_denoise=None
327
+ return "已终止语音降噪进程", {"__type__":"update","visible":True}, {"__type__":"update","visible":False}
328
+
329
+ p_train_SoVITS=None
330
+ def open1Ba(batch_size,total_epoch,exp_name,text_low_lr_rate,if_save_latest,if_save_every_weights,save_every_epoch,gpu_numbers1Ba,pretrained_s2G,pretrained_s2D,if_grad_ckpt):
331
+ global p_train_SoVITS
332
+ if(p_train_SoVITS==None):
333
+ with open("GPT_SoVITS/configs/s2.json")as f:
334
+ data=f.read()
335
+ data=json.loads(data)
336
+ s2_dir="%s/%s"%(exp_root,exp_name)
337
+ os.makedirs("%s/logs_s2_%s"%(s2_dir,version),exist_ok=True)
338
+ if check_for_existance([s2_dir],is_train=True):
339
+ check_details([s2_dir],is_train=True)
340
+ if(is_half==False):
341
+ data["train"]["fp16_run"]=False
342
+ batch_size=max(1,batch_size//2)
343
+ data["train"]["batch_size"]=batch_size
344
+ data["train"]["epochs"]=total_epoch
345
+ data["train"]["text_low_lr_rate"]=text_low_lr_rate
346
+ data["train"]["pretrained_s2G"]=pretrained_s2G
347
+ data["train"]["pretrained_s2D"]=pretrained_s2D
348
+ data["train"]["if_save_latest"]=if_save_latest
349
+ data["train"]["if_save_every_weights"]=if_save_every_weights
350
+ data["train"]["save_every_epoch"]=save_every_epoch
351
+ data["train"]["gpu_numbers"]=gpu_numbers1Ba
352
+ data["train"]["grad_ckpt"]=if_grad_ckpt
353
+ data["model"]["version"]=version
354
+ data["data"]["exp_dir"]=data["s2_ckpt_dir"]=s2_dir
355
+ data["save_weight_dir"]=SoVITS_weight_root[int(version[-1])-1]
356
+ data["name"]=exp_name
357
+ data["version"]=version
358
+ tmp_config_path="%s/tmp_s2.json"%tmp
359
+ with open(tmp_config_path,"w")as f:f.write(json.dumps(data))
360
+ if version in ["v1","v2"]:
361
+ cmd = '"%s" GPT_SoVITS/s2_train.py --config "%s"'%(python_exec,tmp_config_path)
362
+ else:
363
+ cmd = '"%s" GPT_SoVITS/s2_train_v3.py --config "%s"'%(python_exec,tmp_config_path)
364
+ yield "SoVITS训练开始:%s"%cmd, {"__type__":"update","visible":False}, {"__type__":"update","visible":True}
365
+ print(cmd)
366
+ p_train_SoVITS = Popen(cmd, shell=True)
367
+ p_train_SoVITS.wait()
368
+ p_train_SoVITS=None
369
+ yield "SoVITS训练完成", {"__type__":"update","visible":True}, {"__type__":"update","visible":False}
370
+ else:
371
+ yield "已有正在进行的SoVITS训练任务,需先终止才能开启下一次任务", {"__type__":"update","visible":False}, {"__type__":"update","visible":True}
372
+
373
+ def close1Ba():
374
+ global p_train_SoVITS
375
+ if(p_train_SoVITS!=None):
376
+ kill_process(p_train_SoVITS.pid)
377
+ p_train_SoVITS=None
378
+ return "已终止SoVITS训练", {"__type__":"update","visible":True}, {"__type__":"update","visible":False}
379
+
380
+ p_train_GPT=None
381
+ def open1Bb(batch_size,total_epoch,exp_name,if_dpo,if_save_latest,if_save_every_weights,save_every_epoch,gpu_numbers,pretrained_s1):
382
+ global p_train_GPT
383
+ if(p_train_GPT==None):
384
+ with open("GPT_SoVITS/configs/s1longer.yaml"if version=="v1"else "GPT_SoVITS/configs/s1longer-v2.yaml")as f:
385
+ data=f.read()
386
+ data=yaml.load(data, Loader=yaml.FullLoader)
387
+ s1_dir="%s/%s"%(exp_root,exp_name)
388
+ os.makedirs("%s/logs_s1"%(s1_dir),exist_ok=True)
389
+ if check_for_existance([s1_dir],is_train=True):
390
+ check_details([s1_dir],is_train=True)
391
+ if(is_half==False):
392
+ data["train"]["precision"]="32"
393
+ batch_size = max(1, batch_size // 2)
394
+ data["train"]["batch_size"]=batch_size
395
+ data["train"]["epochs"]=total_epoch
396
+ data["pretrained_s1"]=pretrained_s1
397
+ data["train"]["save_every_n_epoch"]=save_every_epoch
398
+ data["train"]["if_save_every_weights"]=if_save_every_weights
399
+ data["train"]["if_save_latest"]=if_save_latest
400
+ data["train"]["if_dpo"]=if_dpo
401
+ data["train"]["half_weights_save_dir"]=GPT_weight_root[int(version[-1])-1]
402
+ data["train"]["exp_name"]=exp_name
403
+ data["train_semantic_path"]="%s/6-name2semantic.tsv"%s1_dir
404
+ data["train_phoneme_path"]="%s/2-name2text.txt"%s1_dir
405
+ data["output_dir"]="%s/logs_s1_%s"%(s1_dir,version)
406
+ # data["version"]=version
407
+
408
+ os.environ["_CUDA_VISIBLE_DEVICES"]=fix_gpu_numbers(gpu_numbers.replace("-",","))
409
+ os.environ["hz"]="25hz"
410
+ tmp_config_path="%s/tmp_s1.yaml"%tmp
411
+ with open(tmp_config_path, "w") as f:f.write(yaml.dump(data, default_flow_style=False))
412
+ # cmd = '"%s" GPT_SoVITS/s1_train.py --config_file "%s" --train_semantic_path "%s/6-name2semantic.tsv" --train_phoneme_path "%s/2-name2text.txt" --output_dir "%s/logs_s1"'%(python_exec,tmp_config_path,s1_dir,s1_dir,s1_dir)
413
+ cmd = '"%s" GPT_SoVITS/s1_train.py --config_file "%s" '%(python_exec,tmp_config_path)
414
+ yield "GPT训练开始:%s"%cmd, {"__type__":"update","visible":False}, {"__type__":"update","visible":True}
415
+ print(cmd)
416
+ p_train_GPT = Popen(cmd, shell=True)
417
+ p_train_GPT.wait()
418
+ p_train_GPT=None
419
+ yield "GPT训练完成", {"__type__":"update","visible":True}, {"__type__":"update","visible":False}
420
+ else:
421
+ yield "已有正在进行的GPT训练任务,需先终止才能开启下一次任务", {"__type__":"update","visible":False}, {"__type__":"update","visible":True}
422
+
423
+ def close1Bb():
424
+ global p_train_GPT
425
+ if(p_train_GPT!=None):
426
+ kill_process(p_train_GPT.pid)
427
+ p_train_GPT=None
428
+ return "已终止GPT训练", {"__type__":"update","visible":True}, {"__type__":"update","visible":False}
429
+
430
+ ps_slice=[]
431
+ def open_slice(inp,opt_root,threshold,min_length,min_interval,hop_size,max_sil_kept,_max,alpha,n_parts):
432
+ global ps_slice
433
+ inp = my_utils.clean_path(inp)
434
+ opt_root = my_utils.clean_path(opt_root)
435
+ check_for_existance([inp])
436
+ if(os.path.exists(inp)==False):
437
+ yield "输入路径不存在", {"__type__":"update","visible":True}, {"__type__":"update","visible":False}, {"__type__": "update"}, {"__type__": "update"}, {"__type__": "update"}
438
+ return
439
+ if os.path.isfile(inp):n_parts=1
440
+ elif os.path.isdir(inp):pass
441
+ else:
442
+ yield "输入路径存在但既不是文件也不是文件夹", {"__type__":"update","visible":True}, {"__type__":"update","visible":False}, {"__type__": "update"}, {"__type__": "update"}, {"__type__": "update"}
443
+ return
444
+ if (ps_slice == []):
445
+ for i_part in range(n_parts):
446
+ cmd = '"%s" tools/slice_audio.py "%s" "%s" %s %s %s %s %s %s %s %s %s''' % (python_exec,inp, opt_root, threshold, min_length, min_interval, hop_size, max_sil_kept, _max, alpha, i_part, n_parts)
447
+ print(cmd)
448
+ p = Popen(cmd, shell=True)
449
+ ps_slice.append(p)
450
+ yield "切割执行中", {"__type__": "update", "visible": False}, {"__type__": "update", "visible": True}, {"__type__": "update"}, {"__type__": "update"}, {"__type__": "update"}
451
+ for p in ps_slice:
452
+ p.wait()
453
+ ps_slice=[]
454
+ yield "切割结束", {"__type__":"update","visible":True}, {"__type__":"update","visible":False}, {"__type__": "update", "value":opt_root}, {"__type__": "update", "value":opt_root}, {"__type__": "update", "value":opt_root}
455
+ else:
456
+ yield "已有正在进行的切割任务,需先终止才能开启下一次任务", {"__type__": "update", "visible": False}, {"__type__": "update", "visible": True}, {"__type__": "update"}, {"__type__": "update"}, {"__type__": "update"}
457
+
458
+ def close_slice():
459
+ global ps_slice
460
+ if (ps_slice != []):
461
+ for p_slice in ps_slice:
462
+ try:
463
+ kill_process(p_slice.pid)
464
+ except:
465
+ traceback.print_exc()
466
+ ps_slice=[]
467
+ return "已终止所有切割进程", {"__type__": "update", "visible": True}, {"__type__": "update", "visible": False}
468
+
469
+ ps1a=[]
470
+ def open1a(inp_text,inp_wav_dir,exp_name,gpu_numbers,bert_pretrained_dir):
471
+ global ps1a
472
+ inp_text = my_utils.clean_path(inp_text)
473
+ inp_wav_dir = my_utils.clean_path(inp_wav_dir)
474
+ if check_for_existance([inp_text,inp_wav_dir], is_dataset_processing=True):
475
+ check_details([inp_text,inp_wav_dir], is_dataset_processing=True)
476
+ if (ps1a == []):
477
+ opt_dir="%s/%s"%(exp_root,exp_name)
478
+ config={
479
+ "inp_text":inp_text,
480
+ "inp_wav_dir":inp_wav_dir,
481
+ "exp_name":exp_name,
482
+ "opt_dir":opt_dir,
483
+ "bert_pretrained_dir":bert_pretrained_dir,
484
+ }
485
+ gpu_names=gpu_numbers.split("-")
486
+ all_parts=len(gpu_names)
487
+ for i_part in range(all_parts):
488
+ config.update(
489
+ {
490
+ "i_part": str(i_part),
491
+ "all_parts": str(all_parts),
492
+ "_CUDA_VISIBLE_DEVICES": fix_gpu_number(gpu_names[i_part]),
493
+ "is_half": str(is_half)
494
+ }
495
+ )
496
+ os.environ.update(config)
497
+ cmd = '"%s" GPT_SoVITS/prepare_datasets/1-get-text.py'%python_exec
498
+ print(cmd)
499
+ p = Popen(cmd, shell=True)
500
+ ps1a.append(p)
501
+ yield "文本进程执行中", {"__type__": "update", "visible": False}, {"__type__": "update", "visible": True}
502
+ for p in ps1a:
503
+ p.wait()
504
+ opt = []
505
+ for i_part in range(all_parts):
506
+ txt_path = "%s/2-name2text-%s.txt" % (opt_dir, i_part)
507
+ with open(txt_path, "r", encoding="utf8") as f:
508
+ opt += f.read().strip("\n").split("\n")
509
+ os.remove(txt_path)
510
+ path_text = "%s/2-name2text.txt" % opt_dir
511
+ with open(path_text, "w", encoding="utf8") as f:
512
+ f.write("\n".join(opt) + "\n")
513
+ ps1a=[]
514
+ if len("".join(opt)) > 0:
515
+ yield "文本进程成功", {"__type__": "update", "visible": True}, {"__type__": "update", "visible": False}
516
+ else:
517
+ yield "文本进程失败", {"__type__": "update", "visible": True}, {"__type__": "update", "visible": False}
518
+ else:
519
+ yield "已有正在进行的文本任务,需先终止才能开启下一次任务", {"__type__": "update", "visible": False}, {"__type__": "update", "visible": True}
520
+
521
+ def close1a():
522
+ global ps1a
523
+ if (ps1a != []):
524
+ for p1a in ps1a:
525
+ try:
526
+ kill_process(p1a.pid)
527
+ except:
528
+ traceback.print_exc()
529
+ ps1a=[]
530
+ return "已终止所有1a进程", {"__type__": "update", "visible": True}, {"__type__": "update", "visible": False}
531
+
532
+ ps1b=[]
533
+ def open1b(inp_text,inp_wav_dir,exp_name,gpu_numbers,ssl_pretrained_dir):
534
+ global ps1b
535
+ inp_text = my_utils.clean_path(inp_text)
536
+ inp_wav_dir = my_utils.clean_path(inp_wav_dir)
537
+ if check_for_existance([inp_text,inp_wav_dir], is_dataset_processing=True):
538
+ check_details([inp_text,inp_wav_dir], is_dataset_processing=True)
539
+ if (ps1b == []):
540
+ config={
541
+ "inp_text":inp_text,
542
+ "inp_wav_dir":inp_wav_dir,
543
+ "exp_name":exp_name,
544
+ "opt_dir":"%s/%s"%(exp_root,exp_name),
545
+ "cnhubert_base_dir":ssl_pretrained_dir,
546
+ "is_half": str(is_half)
547
+ }
548
+ gpu_names=gpu_numbers.split("-")
549
+ all_parts=len(gpu_names)
550
+ for i_part in range(all_parts):
551
+ config.update(
552
+ {
553
+ "i_part": str(i_part),
554
+ "all_parts": str(all_parts),
555
+ "_CUDA_VISIBLE_DEVICES": fix_gpu_number(gpu_names[i_part]),
556
+ }
557
+ )
558
+ os.environ.update(config)
559
+ cmd = '"%s" GPT_SoVITS/prepare_datasets/2-get-hubert-wav32k.py'%python_exec
560
+ print(cmd)
561
+ p = Popen(cmd, shell=True)
562
+ ps1b.append(p)
563
+ yield "SSL提取进程执行中", {"__type__": "update", "visible": False}, {"__type__": "update", "visible": True}
564
+ for p in ps1b:
565
+ p.wait()
566
+ ps1b=[]
567
+ yield "SSL提取进程结束", {"__type__":"update","visible":True}, {"__type__":"update","visible":False}
568
+ else:
569
+ yield "已有正在进行的SSL提取任务,需先终止才能开启下一次任务", {"__type__": "update", "visible": False}, {"__type__": "update", "visible": True}
570
+
571
+ def close1b():
572
+ global ps1b
573
+ if (ps1b != []):
574
+ for p1b in ps1b:
575
+ try:
576
+ kill_process(p1b.pid)
577
+ except:
578
+ traceback.print_exc()
579
+ ps1b=[]
580
+ return "已终止所有1b进程", {"__type__": "update", "visible": True}, {"__type__": "update", "visible": False}
581
+
582
+ ps1c=[]
583
+ def open1c(inp_text,exp_name,gpu_numbers,pretrained_s2G_path):
584
+ global ps1c
585
+ inp_text = my_utils.clean_path(inp_text)
586
+ if check_for_existance([inp_text,''], is_dataset_processing=True):
587
+ check_details([inp_text,''], is_dataset_processing=True)
588
+ if (ps1c == []):
589
+ opt_dir="%s/%s"%(exp_root,exp_name)
590
+ config={
591
+ "inp_text":inp_text,
592
+ "exp_name":exp_name,
593
+ "opt_dir":opt_dir,
594
+ "pretrained_s2G":pretrained_s2G_path,
595
+ "s2config_path":"GPT_SoVITS/configs/s2.json",
596
+ "is_half": str(is_half)
597
+ }
598
+ gpu_names=gpu_numbers.split("-")
599
+ all_parts=len(gpu_names)
600
+ for i_part in range(all_parts):
601
+ config.update(
602
+ {
603
+ "i_part": str(i_part),
604
+ "all_parts": str(all_parts),
605
+ "_CUDA_VISIBLE_DEVICES": fix_gpu_number(gpu_names[i_part]),
606
+ }
607
+ )
608
+ os.environ.update(config)
609
+ cmd = '"%s" GPT_SoVITS/prepare_datasets/3-get-semantic.py'%python_exec
610
+ print(cmd)
611
+ p = Popen(cmd, shell=True)
612
+ ps1c.append(p)
613
+ yield "语义token提取进程执行中", {"__type__": "update", "visible": False}, {"__type__": "update", "visible": True}
614
+ for p in ps1c:
615
+ p.wait()
616
+ opt = ["item_name\tsemantic_audio"]
617
+ path_semantic = "%s/6-name2semantic.tsv" % opt_dir
618
+ for i_part in range(all_parts):
619
+ semantic_path = "%s/6-name2semantic-%s.tsv" % (opt_dir, i_part)
620
+ with open(semantic_path, "r", encoding="utf8") as f:
621
+ opt += f.read().strip("\n").split("\n")
622
+ os.remove(semantic_path)
623
+ with open(path_semantic, "w", encoding="utf8") as f:
624
+ f.write("\n".join(opt) + "\n")
625
+ ps1c=[]
626
+ yield "语义token提取进程结束", {"__type__":"update","visible":True}, {"__type__":"update","visible":False}
627
+ else:
628
+ yield "已有正在进行的语义token提取任务,需先终止才能开启下一次任务", {"__type__": "update", "visible": False}, {"__type__": "update", "visible": True}
629
+
630
+ def close1c():
631
+ global ps1c
632
+ if (ps1c != []):
633
+ for p1c in ps1c:
634
+ try:
635
+ kill_process(p1c.pid)
636
+ except:
637
+ traceback.print_exc()
638
+ ps1c=[]
639
+ return "已终止所有语义token进程", {"__type__": "update", "visible": True}, {"__type__": "update", "visible": False}
640
+ #####inp_text,inp_wav_dir,exp_name,gpu_numbers1a,gpu_numbers1Ba,gpu_numbers1c,bert_pretrained_dir,cnhubert_base_dir,pretrained_s2G
641
+ ps1abc=[]
642
+ def open1abc(inp_text,inp_wav_dir,exp_name,gpu_numbers1a,gpu_numbers1Ba,gpu_numbers1c,bert_pretrained_dir,ssl_pretrained_dir,pretrained_s2G_path):
643
+ global ps1abc
644
+ inp_text = my_utils.clean_path(inp_text)
645
+ inp_wav_dir = my_utils.clean_path(inp_wav_dir)
646
+ if check_for_existance([inp_text,inp_wav_dir], is_dataset_processing=True):
647
+ check_details([inp_text,inp_wav_dir], is_dataset_processing=True)
648
+ if (ps1abc == []):
649
+ opt_dir="%s/%s"%(exp_root,exp_name)
650
+ try:
651
+ #############################1a
652
+ path_text="%s/2-name2text.txt" % opt_dir
653
+ if(os.path.exists(path_text)==False or (os.path.exists(path_text)==True and len(open(path_text,"r",encoding="utf8").read().strip("\n").split("\n"))<2)):
654
+ config={
655
+ "inp_text":inp_text,
656
+ "inp_wav_dir":inp_wav_dir,
657
+ "exp_name":exp_name,
658
+ "opt_dir":opt_dir,
659
+ "bert_pretrained_dir":bert_pretrained_dir,
660
+ "is_half": str(is_half)
661
+ }
662
+ gpu_names=gpu_numbers1a.split("-")
663
+ all_parts=len(gpu_names)
664
+ for i_part in range(all_parts):
665
+ config.update(
666
+ {
667
+ "i_part": str(i_part),
668
+ "all_parts": str(all_parts),
669
+ "_CUDA_VISIBLE_DEVICES": fix_gpu_number(gpu_names[i_part]),
670
+ }
671
+ )
672
+ os.environ.update(config)
673
+ cmd = '"%s" GPT_SoVITS/prepare_datasets/1-get-text.py'%python_exec
674
+ print(cmd)
675
+ p = Popen(cmd, shell=True)
676
+ ps1abc.append(p)
677
+ yield "进度:1a-ing", {"__type__": "update", "visible": False}, {"__type__": "update", "visible": True}
678
+ for p in ps1abc:p.wait()
679
+
680
+ opt = []
681
+ for i_part in range(all_parts):#txt_path="%s/2-name2text-%s.txt"%(opt_dir,i_part)
682
+ txt_path = "%s/2-name2text-%s.txt" % (opt_dir, i_part)
683
+ with open(txt_path, "r",encoding="utf8") as f:
684
+ opt += f.read().strip("\n").split("\n")
685
+ os.remove(txt_path)
686
+ with open(path_text, "w",encoding="utf8") as f:
687
+ f.write("\n".join(opt) + "\n")
688
+ assert len("".join(opt)) > 0, "1Aa-文本获取进程失败"
689
+ yield "进度:1a-done", {"__type__": "update", "visible": False}, {"__type__": "update", "visible": True}
690
+ ps1abc=[]
691
+ #############################1b
692
+ config={
693
+ "inp_text":inp_text,
694
+ "inp_wav_dir":inp_wav_dir,
695
+ "exp_name":exp_name,
696
+ "opt_dir":opt_dir,
697
+ "cnhubert_base_dir":ssl_pretrained_dir,
698
+ }
699
+ gpu_names=gpu_numbers1Ba.split("-")
700
+ all_parts=len(gpu_names)
701
+ for i_part in range(all_parts):
702
+ config.update(
703
+ {
704
+ "i_part": str(i_part),
705
+ "all_parts": str(all_parts),
706
+ "_CUDA_VISIBLE_DEVICES": fix_gpu_number(gpu_names[i_part]),
707
+ }
708
+ )
709
+ os.environ.update(config)
710
+ cmd = '"%s" GPT_SoVITS/prepare_datasets/2-get-hubert-wav32k.py'%python_exec
711
+ print(cmd)
712
+ p = Popen(cmd, shell=True)
713
+ ps1abc.append(p)
714
+ yield "进度:1a-done, 1b-ing", {"__type__": "update", "visible": False}, {"__type__": "update", "visible": True}
715
+ for p in ps1abc:p.wait()
716
+ yield "进度:1a1b-done", {"__type__": "update", "visible": False}, {"__type__": "update", "visible": True}
717
+ ps1abc=[]
718
+ #############################1c
719
+ path_semantic = "%s/6-name2semantic.tsv" % opt_dir
720
+ if(os.path.exists(path_semantic)==False or (os.path.exists(path_semantic)==True and os.path.getsize(path_semantic)<31)):
721
+ config={
722
+ "inp_text":inp_text,
723
+ "exp_name":exp_name,
724
+ "opt_dir":opt_dir,
725
+ "pretrained_s2G":pretrained_s2G_path,
726
+ "s2config_path":"GPT_SoVITS/configs/s2.json",
727
+ }
728
+ gpu_names=gpu_numbers1c.split("-")
729
+ all_parts=len(gpu_names)
730
+ for i_part in range(all_parts):
731
+ config.update(
732
+ {
733
+ "i_part": str(i_part),
734
+ "all_parts": str(all_parts),
735
+ "_CUDA_VISIBLE_DEVICES": fix_gpu_number(gpu_names[i_part]),
736
+ }
737
+ )
738
+ os.environ.update(config)
739
+ cmd = '"%s" GPT_SoVITS/prepare_datasets/3-get-semantic.py'%python_exec
740
+ print(cmd)
741
+ p = Popen(cmd, shell=True)
742
+ ps1abc.append(p)
743
+ yield "进度:1a1b-done, 1cing", {"__type__": "update", "visible": False}, {"__type__": "update", "visible": True}
744
+ for p in ps1abc:p.wait()
745
+
746
+ opt = ["item_name\tsemantic_audio"]
747
+ for i_part in range(all_parts):
748
+ semantic_path = "%s/6-name2semantic-%s.tsv" % (opt_dir, i_part)
749
+ with open(semantic_path, "r",encoding="utf8") as f:
750
+ opt += f.read().strip("\n").split("\n")
751
+ os.remove(semantic_path)
752
+ with open(path_semantic, "w",encoding="utf8") as f:
753
+ f.write("\n".join(opt) + "\n")
754
+ yield "进度:all-done", {"__type__": "update", "visible": False}, {"__type__": "update", "visible": True}
755
+ ps1abc = []
756
+ yield "一键三连进程结束", {"__type__": "update", "visible": True}, {"__type__": "update", "visible": False}
757
+ except:
758
+ traceback.print_exc()
759
+ close1abc()
760
+ yield "一键三连中途报错", {"__type__": "update", "visible": True}, {"__type__": "update", "visible": False}
761
+ else:
762
+ yield "已有正在进行的一键三连任务,需先终止才能开启下一次任务", {"__type__": "update", "visible": False}, {"__type__": "update", "visible": True}
763
+
764
+ def close1abc():
765
+ global ps1abc
766
+ if (ps1abc != []):
767
+ for p1abc in ps1abc:
768
+ try:
769
+ kill_process(p1abc.pid)
770
+ except:
771
+ traceback.print_exc()
772
+ ps1abc=[]
773
+ return "已终止所有一键三连进程", {"__type__": "update", "visible": True}, {"__type__": "update", "visible": False}
774
+
775
+ def switch_version(version_):
776
+ os.environ["version"]=version_
777
+ global version
778
+ version = version_
779
+ if pretrained_sovits_name[int(version[-1])-1] !='' and pretrained_gpt_name[int(version[-1])-1] !='':...
780
+ else:
781
+ gr.Warning(i18n(f'未下载{version.upper()}模型'))
782
+ set_default()
783
+ return {'__type__':'update', 'value':pretrained_sovits_name[int(version[-1])-1]}, {'__type__':'update', 'value':pretrained_sovits_name[int(version[-1])-1].replace("s2G","s2D")}, {'__type__':'update', 'value':pretrained_gpt_name[int(version[-1])-1]}, {'__type__':'update', 'value':pretrained_gpt_name[int(version[-1])-1]}, {'__type__':'update', 'value':pretrained_sovits_name[int(version[-1])-1]},{'__type__':'update',"value":default_batch_size,"maximum":default_max_batch_size},{'__type__':'update',"value":default_sovits_epoch,"maximum":max_sovits_epoch},{'__type__':'update',"value":default_sovits_save_every_epoch,"maximum":max_sovits_save_every_epoch},{'__type__':'update',"interactive":True if version!="v3"else False},{'__type__':'update',"interactive":True if version == "v3" else False},{'__type__':'update',"interactive":False if version == "v3" else True,"value":False}
784
+
785
+ if os.path.exists('GPT_SoVITS/text/G2PWModel'):...
786
+ else:
787
+ cmd = '"%s" GPT_SoVITS/download.py'%python_exec
788
+ p = Popen(cmd, shell=True)
789
+ p.wait()
790
+
791
+ def sync(text):
792
+ return {'__type__':'update','value':text}
793
+ with gr.Blocks(title="GPT-SoVITS WebUI") as app:
794
+ gr.Markdown(
795
+ value=
796
+ i18n("本软件以MIT协议开源, 作者不对软件具备任何控制力, 使用软件者、传播软件导出的声音者自负全责. <br>如不认可该条款, 则不能使用或引用软件包内任何代码和文件. 详见根目录<b>LICENSE</b>.")
797
+ )
798
+ gr.Markdown(
799
+ value=
800
+ i18n("中文教程文档:https://www.yuque.com/baicaigongchang1145haoyuangong/ib3g1e")
801
+ )
802
+
803
+ with gr.Tabs():
804
+ with gr.TabItem(i18n("0-前置数据集获取工具")):#提前随机切片防止uvr5爆内存->uvr5->slicer->asr->打标
805
+ gr.Markdown(value=i18n("0a-UVR5人声伴奏分离&去混响去延迟工具"))
806
+ with gr.Row():
807
+ with gr.Column(scale=3):
808
+ with gr.Row():
809
+ uvr5_info = gr.Textbox(label=i18n("UVR5进程输出信息"))
810
+ open_uvr5 = gr.Button(value=i18n("开启UVR5-WebUI"),variant="primary",visible=True)
811
+ close_uvr5 = gr.Button(value=i18n("关闭UVR5-WebUI"),variant="primary",visible=False)
812
+ gr.Markdown(value=i18n("0b-语音切分工具"))
813
+ with gr.Row():
814
+ with gr.Column(scale=3):
815
+ with gr.Row():
816
+ slice_inp_path=gr.Textbox(label=i18n("音频自动切分输入路径,可文件可文件夹"),value="")
817
+ slice_opt_root=gr.Textbox(label=i18n("切分后的子音频的输出根目录"),value="output/slicer_opt")
818
+ with gr.Row():
819
+ threshold=gr.Textbox(label=i18n("threshold:音量小于这个值视作静音的备选切割点"),value="-34")
820
+ min_length=gr.Textbox(label=i18n("min_length:每段最小多长,如果第一段太短一直和后面段连起来直到超过这个值"),value="4000")
821
+ min_interval=gr.Textbox(label=i18n("min_interval:最短切割间隔"),value="300")
822
+ hop_size=gr.Textbox(label=i18n("hop_size:怎么算音量曲线,越小精度越大计算量越高(不是精度越大效果越好)"),value="10")
823
+ max_sil_kept=gr.Textbox(label=i18n("max_sil_kept:切完后静音最多留多长"),value="500")
824
+ with gr.Row():
825
+ _max=gr.Slider(minimum=0,maximum=1,step=0.05,label=i18n("max:归一化后最大值多少"),value=0.9,interactive=True)
826
+ alpha=gr.Slider(minimum=0,maximum=1,step=0.05,label=i18n("alpha_mix:混多少比例归一化后音频进来"),value=0.25,interactive=True)
827
+ with gr.Row():
828
+ n_process=gr.Slider(minimum=1,maximum=n_cpu,step=1,label=i18n("切割使用的进程数"),value=4,interactive=True)
829
+ slicer_info = gr.Textbox(label=i18n("语音切割进程输出信息"))
830
+ open_slicer_button=gr.Button(i18n("开启语音切割"), variant="primary",visible=True)
831
+ close_slicer_button=gr.Button(i18n("终止语音切割"), variant="primary",visible=False)
832
+ gr.Markdown(value=i18n("0bb-语音降噪工具"))
833
+ with gr.Row():
834
+ with gr.Column(scale=3):
835
+ with gr.Row():
836
+ denoise_input_dir=gr.Textbox(label=i18n("降噪音频文件输入文件夹"),value="")
837
+ denoise_output_dir=gr.Textbox(label=i18n("降噪结果输出文件夹"),value="output/denoise_opt")
838
+ with gr.Row():
839
+ denoise_info = gr.Textbox(label=i18n("语音降噪进程输出信息"))
840
+ open_denoise_button = gr.Button(i18n("开启语音降噪"), variant="primary",visible=True)
841
+ close_denoise_button = gr.Button(i18n("终止语音降噪进程"), variant="primary",visible=False)
842
+ gr.Markdown(value=i18n("0c-中文批量离线ASR工具"))
843
+ with gr.Row():
844
+ with gr.Column(scale=3):
845
+ with gr.Row():
846
+ asr_inp_dir = gr.Textbox(
847
+ label=i18n("输入文件夹路径"),
848
+ value="D:\\GPT-SoVITS\\raw\\xxx",
849
+ interactive=True,
850
+ )
851
+ asr_opt_dir = gr.Textbox(
852
+ label = i18n("输出文件夹路径"),
853
+ value = "output/asr_opt",
854
+ interactive = True,
855
+ )
856
+ with gr.Row():
857
+ asr_model = gr.Dropdown(
858
+ label = i18n("ASR 模型"),
859
+ choices = list(asr_dict.keys()),
860
+ interactive = True,
861
+ value="达摩 ASR (中文)"
862
+ )
863
+ asr_size = gr.Dropdown(
864
+ label = i18n("ASR 模型尺寸"),
865
+ choices = ["large"],
866
+ interactive = True,
867
+ value="large"
868
+ )
869
+ asr_lang = gr.Dropdown(
870
+ label = i18n("ASR 语言设置"),
871
+ choices = ["zh","yue"],
872
+ interactive = True,
873
+ value="zh"
874
+ )
875
+ asr_precision = gr.Dropdown(
876
+ label = i18n("数据类型精度"),
877
+ choices = ["float32"],
878
+ interactive = True,
879
+ value="float32"
880
+ )
881
+ with gr.Row():
882
+ asr_info = gr.Textbox(label=i18n("ASR进程输出信息"))
883
+ open_asr_button = gr.Button(i18n("开启离线批量ASR"), variant="primary",visible=True)
884
+ close_asr_button = gr.Button(i18n("终止ASR进程"), variant="primary",visible=False)
885
+
886
+ def change_lang_choices(key): #根据选择的模型修改可选的语言
887
+ # return gr.Dropdown(choices=asr_dict[key]['lang'])
888
+ return {"__type__": "update", "choices": asr_dict[key]['lang'],"value":asr_dict[key]['lang'][0]}
889
+ def change_size_choices(key): # 根据选择的模型修改可选的模型尺寸
890
+ # return gr.Dropdown(choices=asr_dict[key]['size'])
891
+ return {"__type__": "update", "choices": asr_dict[key]['size'],"value":asr_dict[key]['size'][-1]}
892
+ def change_precision_choices(key): #根据选择的模型修改可选的语言
893
+ if key =="Faster Whisper (多语种)":
894
+ if default_batch_size <= 4:
895
+ precision = 'int8'
896
+ elif is_half:
897
+ precision = 'float16'
898
+ else:
899
+ precision = 'float32'
900
+ else:
901
+ precision = 'float32'
902
+ # return gr.Dropdown(choices=asr_dict[key]['precision'])
903
+ return {"__type__": "update", "choices": asr_dict[key]['precision'],"value":precision}
904
+ asr_model.change(change_lang_choices, [asr_model], [asr_lang])
905
+ asr_model.change(change_size_choices, [asr_model], [asr_size])
906
+ asr_model.change(change_precision_choices, [asr_model], [asr_precision])
907
+
908
+
909
+ gr.Markdown(value=i18n("0d-语音文本校对标注工具"))
910
+ with gr.Row():
911
+ with gr.Column(scale=3):
912
+ with gr.Row():
913
+ path_list = gr.Textbox(
914
+ label=i18n(".list标注文件的路径"),
915
+ value="D:\\RVC1006\\GPT-SoVITS\\raw\\xxx.list",
916
+ interactive=True,
917
+ )
918
+ label_info = gr.Textbox(label=i18n("打标工具进程输出信息"))
919
+
920
+ open_label = gr.Button(value=i18n("开启打标WebUI"),variant="primary",visible=True)
921
+ close_label = gr.Button(value=i18n("关闭打标WebUI"),variant="primary",visible=False)
922
+ open_label.click(change_label, [path_list], [label_info,open_label,close_label])
923
+ close_label.click(change_label, [path_list], [label_info,open_label,close_label])
924
+ open_uvr5.click(change_uvr5, [], [uvr5_info,open_uvr5,close_uvr5])
925
+ close_uvr5.click(change_uvr5, [], [uvr5_info,open_uvr5,close_uvr5])
926
+
927
+ with gr.TabItem(i18n("1-GPT-SoVITS-TTS")):
928
+ with gr.Row():
929
+ with gr.Row():
930
+ exp_name = gr.Textbox(label=i18n("*实验/模型名"), value="xxx", interactive=True)
931
+ gpu_info = gr.Textbox(label=i18n("显卡信息"), value=gpu_info, visible=True, interactive=False)
932
+ version_checkbox = gr.Radio(label=i18n("版本"),value=version,choices=['v1','v2','v3'])
933
+ with gr.Row():
934
+ pretrained_s2G = gr.Textbox(label=i18n("预训练的SoVITS-G模型路径"), value=pretrained_sovits_name[int(version[-1])-1], interactive=True, lines=2, max_lines=3,scale=9)
935
+ pretrained_s2D = gr.Textbox(label=i18n("预训练的SoVITS-D模型路径"), value=pretrained_sovits_name[int(version[-1])-1].replace("s2G","s2D"), interactive=True, lines=2, max_lines=3,scale=9)
936
+ pretrained_s1 = gr.Textbox(label=i18n("预训练的GPT模型路径"), value=pretrained_gpt_name[int(version[-1])-1], interactive=True, lines=2, max_lines=3,scale=10)
937
+ with gr.TabItem(i18n("1A-训练集格式化工具")):
938
+ gr.Markdown(value=i18n("输出logs/实验名目录下应有23456开头的文件和文件夹"))
939
+ with gr.Row():
940
+ with gr.Row():
941
+ inp_text = gr.Textbox(label=i18n("*文本标注文件"),value=r"D:\RVC1006\GPT-SoVITS\raw\xxx.list",interactive=True,scale=10)
942
+ with gr.Row():
943
+ inp_wav_dir = gr.Textbox(
944
+ label=i18n("*训练集音频文件目录"),
945
+ # value=r"D:\RVC1006\GPT-SoVITS\raw\xxx",
946
+ interactive=True,
947
+ placeholder=i18n("填切割后音频所在目录!读取的音频文件完整路径=该目录-拼接-list文件里波形对应的文件名(不是全路径)。如果留空则使用.list文件里的绝对全路径。"), scale=10
948
+ )
949
+ gr.Markdown(value=i18n("1Aa-文本内容"))
950
+ with gr.Row():
951
+ with gr.Row():
952
+ gpu_numbers1a = gr.Textbox(label=i18n("GPU卡号以-分割,每个卡号一个进程"),value="%s-%s"%(gpus,gpus),interactive=True)
953
+ with gr.Row():
954
+ bert_pretrained_dir = gr.Textbox(label=i18n("预训练的中文BERT模型路径"),value="GPT_SoVITS/pretrained_models/chinese-roberta-wwm-ext-large",interactive=False,lines=2)
955
+ with gr.Row():
956
+ button1a_open = gr.Button(i18n("开启文本获取"), variant="primary",visible=True)
957
+ button1a_close = gr.Button(i18n("终止文本获取进程"), variant="primary",visible=False)
958
+ with gr.Row():
959
+ info1a=gr.Textbox(label=i18n("文本进程输出信息"))
960
+ gr.Markdown(value=i18n("1Ab-SSL自监督特征提取"))
961
+ with gr.Row():
962
+ with gr.Row():
963
+ gpu_numbers1Ba = gr.Textbox(label=i18n("GPU卡号以-分割,每个卡号一个进程"),value="%s-%s"%(gpus,gpus),interactive=True)
964
+ with gr.Row():
965
+ cnhubert_base_dir = gr.Textbox(label=i18n("预训练的SSL模型路径"),value="GPT_SoVITS/pretrained_models/chinese-hubert-base",interactive=False,lines=2)
966
+ with gr.Row():
967
+ button1b_open = gr.Button(i18n("开启SSL提取"), variant="primary",visible=True)
968
+ button1b_close = gr.Button(i18n("终止SSL提取进程"), variant="primary",visible=False)
969
+ with gr.Row():
970
+ info1b=gr.Textbox(label=i18n("SSL进程输出信息"))
971
+ gr.Markdown(value=i18n("1Ac-语义token提取"))
972
+ with gr.Row():
973
+ with gr.Row():
974
+ gpu_numbers1c = gr.Textbox(label=i18n("GPU卡号以-分割,每个卡号一个进程"),value="%s-%s"%(gpus,gpus),interactive=True)
975
+ with gr.Row():
976
+ pretrained_s2G_ = gr.Textbox(label=i18n("预训练的SoVITS-G模型路径"), value=pretrained_sovits_name[int(version[-1])-1], interactive=False,lines=2)
977
+ with gr.Row():
978
+ button1c_open = gr.Button(i18n("开启语义token提取"), variant="primary",visible=True)
979
+ button1c_close = gr.Button(i18n("终止语义token提取进程"), variant="primary",visible=False)
980
+ with gr.Row():
981
+ info1c=gr.Textbox(label=i18n("语义token提取进程输出信息"))
982
+ gr.Markdown(value=i18n("1Aabc-训练集格式化一键三连"))
983
+ with gr.Row():
984
+ with gr.Row():
985
+ button1abc_open = gr.Button(i18n("开启一键三连"), variant="primary",visible=True)
986
+ button1abc_close = gr.Button(i18n("终止一键三连"), variant="primary",visible=False)
987
+ with gr.Row():
988
+ info1abc=gr.Textbox(label=i18n("一键三连进程输出信息"))
989
+
990
+ pretrained_s2G.change(sync,[pretrained_s2G],[pretrained_s2G_])
991
+ open_asr_button.click(open_asr, [asr_inp_dir, asr_opt_dir, asr_model, asr_size, asr_lang, asr_precision], [asr_info,open_asr_button,close_asr_button,path_list,inp_text,inp_wav_dir])
992
+ close_asr_button.click(close_asr, [], [asr_info,open_asr_button,close_asr_button])
993
+ open_slicer_button.click(open_slice, [slice_inp_path,slice_opt_root,threshold,min_length,min_interval,hop_size,max_sil_kept,_max,alpha,n_process], [slicer_info,open_slicer_button,close_slicer_button,asr_inp_dir,denoise_input_dir,inp_wav_dir])
994
+ close_slicer_button.click(close_slice, [], [slicer_info,open_slicer_button,close_slicer_button])
995
+ open_denoise_button.click(open_denoise, [denoise_input_dir,denoise_output_dir], [denoise_info,open_denoise_button,close_denoise_button,asr_inp_dir,inp_wav_dir])
996
+ close_denoise_button.click(close_denoise, [], [denoise_info,open_denoise_button,close_denoise_button])
997
+
998
+ button1a_open.click(open1a, [inp_text,inp_wav_dir,exp_name,gpu_numbers1a,bert_pretrained_dir], [info1a,button1a_open,button1a_close])
999
+ button1a_close.click(close1a, [], [info1a,button1a_open,button1a_close])
1000
+ button1b_open.click(open1b, [inp_text,inp_wav_dir,exp_name,gpu_numbers1Ba,cnhubert_base_dir], [info1b,button1b_open,button1b_close])
1001
+ button1b_close.click(close1b, [], [info1b,button1b_open,button1b_close])
1002
+ button1c_open.click(open1c, [inp_text,exp_name,gpu_numbers1c,pretrained_s2G], [info1c,button1c_open,button1c_close])
1003
+ button1c_close.click(close1c, [], [info1c,button1c_open,button1c_close])
1004
+ button1abc_open.click(open1abc, [inp_text,inp_wav_dir,exp_name,gpu_numbers1a,gpu_numbers1Ba,gpu_numbers1c,bert_pretrained_dir,cnhubert_base_dir,pretrained_s2G], [info1abc,button1abc_open,button1abc_close])
1005
+ button1abc_close.click(close1abc, [], [info1abc,button1abc_open,button1abc_close])
1006
+ with gr.TabItem(i18n("1B-微调训练")):
1007
+ gr.Markdown(value=i18n("1Ba-SoVITS训练。用于分享的模型文件输出在SoVITS_weights下。"))
1008
+ with gr.Row():
1009
+ with gr.Column():
1010
+ with gr.Row():
1011
+ batch_size = gr.Slider(minimum=1,maximum=default_max_batch_size,step=1,label=i18n("每张显卡的batch_size"),value=default_batch_size,interactive=True)
1012
+ total_epoch = gr.Slider(minimum=1,maximum=max_sovits_epoch,step=1,label=i18n("总训练轮数total_epoch,不建议太高"),value=default_sovits_epoch,interactive=True)
1013
+ with gr.Row():
1014
+ text_low_lr_rate = gr.Slider(minimum=0.2,maximum=0.6,step=0.05,label=i18n("文本模块学习率权重"),value=0.4,interactive=True if version!="v3"else False)#v3 not need
1015
+ save_every_epoch = gr.Slider(minimum=1,maximum=max_sovits_save_every_epoch,step=1,label=i18n("保存频率save_every_epoch"),value=default_sovits_save_every_epoch,interactive=True)
1016
+ with gr.Column():
1017
+ with gr.Column():
1018
+ if_save_latest = gr.Checkbox(label=i18n("是否仅保存最新的ckpt文件以节省硬盘空间"), value=True, interactive=True, show_label=True)
1019
+ if_save_every_weights = gr.Checkbox(label=i18n("是否在每次保存时间点将最终小模型保存至weights文件夹"), value=True, interactive=True, show_label=True)
1020
+ if_grad_ckpt = gr.Checkbox(label="v3是否开启梯度检查点节省显存占用", value=False, interactive=True if version == "v3" else False, show_label=True) # 只有V3s2可以用
1021
+ with gr.Row():
1022
+ gpu_numbers1Ba = gr.Textbox(label=i18n("GPU卡号以-分割,每个卡号一个进程"), value="%s" % (gpus), interactive=True)
1023
+ with gr.Row():
1024
+ with gr.Row():
1025
+ button1Ba_open = gr.Button(i18n("开启SoVITS训练"), variant="primary",visible=True)
1026
+ button1Ba_close = gr.Button(i18n("终止SoVITS训练"), variant="primary",visible=False)
1027
+ with gr.Row():
1028
+ info1Ba=gr.Textbox(label=i18n("SoVITS训练进程输出信息"))
1029
+ gr.Markdown(value=i18n("1Bb-GPT训练。用于分享的模型文件输出在GPT_weights下。"))
1030
+ with gr.Row():
1031
+ with gr.Column():
1032
+ with gr.Row():
1033
+ batch_size1Bb = gr.Slider(minimum=1,maximum=40,step=1,label=i18n("每张显卡的batch_size"),value=default_batch_size_s1,interactive=True)
1034
+ total_epoch1Bb = gr.Slider(minimum=2,maximum=50,step=1,label=i18n("总训练轮数total_epoch"),value=15,interactive=True)
1035
+ with gr.Row():
1036
+ save_every_epoch1Bb = gr.Slider(minimum=1,maximum=50,step=1,label=i18n("保存频率save_every_epoch"),value=5,interactive=True)
1037
+ if_dpo = gr.Checkbox(label=i18n("是否开启dpo训练选项(实验性)"), value=False, interactive=True, show_label=True)
1038
+ with gr.Column():
1039
+ with gr.Column():
1040
+ if_save_latest1Bb = gr.Checkbox(label=i18n("是否仅保存最新的ckpt文件以节省硬盘空间"), value=True, interactive=True, show_label=True)
1041
+ if_save_every_weights1Bb = gr.Checkbox(label=i18n("是否在每次保存时间点将最终小模型保存至weights文件夹"), value=True, interactive=True, show_label=True)
1042
+ with gr.Row():
1043
+ gpu_numbers1Bb = gr.Textbox(label=i18n("GPU卡号以-分割,每个卡号一个进程"), value="%s" % (gpus), interactive=True)
1044
+ with gr.Row():
1045
+ with gr.Row():
1046
+ button1Bb_open = gr.Button(i18n("开启GPT训练"), variant="primary",visible=True)
1047
+ button1Bb_close = gr.Button(i18n("终止GPT训练"), variant="primary",visible=False)
1048
+ with gr.Row():
1049
+ info1Bb=gr.Textbox(label=i18n("GPT训练进程输出信息"))
1050
+ button1Ba_open.click(open1Ba, [batch_size,total_epoch,exp_name,text_low_lr_rate,if_save_latest,if_save_every_weights,save_every_epoch,gpu_numbers1Ba,pretrained_s2G,pretrained_s2D,if_grad_ckpt], [info1Ba,button1Ba_open,button1Ba_close])
1051
+ button1Ba_close.click(close1Ba, [], [info1Ba,button1Ba_open,button1Ba_close])
1052
+ button1Bb_open.click(open1Bb, [batch_size1Bb,total_epoch1Bb,exp_name,if_dpo,if_save_latest1Bb,if_save_every_weights1Bb,save_every_epoch1Bb,gpu_numbers1Bb,pretrained_s1], [info1Bb,button1Bb_open,button1Bb_close])
1053
+ button1Bb_close.click(close1Bb, [], [info1Bb,button1Bb_open,button1Bb_close])
1054
+ with gr.TabItem(i18n("1C-推理")):
1055
+ gr.Markdown(value=i18n("选择训练完存放在SoVITS_weights和GPT_weights下的模型。默认的一个是底模,体验5秒Zero Shot TTS用。"))
1056
+ with gr.Row():
1057
+ with gr.Row():
1058
+ GPT_dropdown = gr.Dropdown(label=i18n("*GPT模型列表"), choices=sorted(GPT_names,key=custom_sort_key),value=pretrained_gpt_name[0],interactive=True)
1059
+ SoVITS_dropdown = gr.Dropdown(label=i18n("*SoVITS模型列表"), choices=sorted(SoVITS_names,key=custom_sort_key),value=pretrained_sovits_name[0],interactive=True)
1060
+ with gr.Row():
1061
+ gpu_number_1C=gr.Textbox(label=i18n("GPU卡号,只能填1个整数"), value=gpus, interactive=True)
1062
+ refresh_button = gr.Button(i18n("刷新模型路径"), variant="primary")
1063
+ refresh_button.click(fn=change_choices,inputs=[],outputs=[SoVITS_dropdown,GPT_dropdown])
1064
+ with gr.Row():
1065
+ with gr.Row():
1066
+ batched_infer_enabled = gr.Checkbox(label=i18n("启用并行推理版本"), value=False, interactive=True, show_label=True)
1067
+ with gr.Row():
1068
+ open_tts = gr.Button(value=i18n("开启TTS推理WebUI"),variant='primary',visible=True)
1069
+ close_tts = gr.Button(value=i18n("关闭TTS推理WebUI"),variant='primary',visible=False)
1070
+ with gr.Row():
1071
+ tts_info = gr.Textbox(label=i18n("TTS推理WebUI进程输出信息"))
1072
+ open_tts.click(change_tts_inference, [bert_pretrained_dir,cnhubert_base_dir,gpu_number_1C,GPT_dropdown,SoVITS_dropdown, batched_infer_enabled], [tts_info,open_tts,close_tts])
1073
+ close_tts.click(change_tts_inference, [bert_pretrained_dir,cnhubert_base_dir,gpu_number_1C,GPT_dropdown,SoVITS_dropdown, batched_infer_enabled], [tts_info,open_tts,close_tts])
1074
+ version_checkbox.change(switch_version,[version_checkbox],[pretrained_s2G,pretrained_s2D,pretrained_s1,GPT_dropdown,SoVITS_dropdown,batch_size,total_epoch,save_every_epoch,text_low_lr_rate, if_grad_ckpt, batched_infer_enabled])
1075
+ with gr.TabItem(i18n("2-GPT-SoVITS-变声")):gr.Markdown(value=i18n("施工中,请静候佳音"))
1076
+ app.queue().launch(#concurrency_count=511, max_size=1022
1077
+ inbrowser=True,
1078
+ share=True,
1079
+ quiet=True,
1080
+ )
1739554402.592865.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:31b05188bbdaebb1665189a4e5bfd70ba5875fe210742f811cb3efceb47ee875
3
+ size 677773440
Docker/damo.sha256 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ 5bba782a5e9196166233b9ab12ba04cadff9ef9212b4ff6153ed9290ff679025 /workspace/tools/damo_asr/models/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch/model.pb
2
+ b3be75be477f0780277f3bae0fe489f48718f585f3a6e45d7dd1fbb1a4255fc5 /workspace/tools/damo_asr/models/speech_fsmn_vad_zh-cn-16k-common-pytorch/model.pb
3
+ a5818bb9d933805a916eebe41eb41648f7f9caad30b4bd59d56f3ca135421916 /workspace/tools/damo_asr/models/punc_ct-transformer_zh-cn-common-vocab272727-pytorch/model.pb
Docker/download.py ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ # Download moda ASR related models
2
+ from modelscope import snapshot_download
3
+ model_dir = snapshot_download('damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch',revision="v2.0.4")
4
+ model_dir = snapshot_download('damo/speech_fsmn_vad_zh-cn-16k-common-pytorch',revision="v2.0.4")
5
+ model_dir = snapshot_download('damo/punc_ct-transformer_zh-cn-common-vocab272727-pytorch',revision="v2.0.4")
Docker/download.sh ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env bash
2
+
3
+ set -Eeuo pipefail
4
+
5
+ echo "Downloading models..."
6
+
7
+ aria2c --disable-ipv6 --input-file /workspace/Docker/links.txt --dir /workspace --continue
8
+
9
+ echo "Checking SHA256..."
10
+
11
+ parallel --will-cite -a /workspace/Docker/links.sha256 "echo -n {} | sha256sum -c"
Docker/links.sha256 ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ b1c1e17e9c99547a89388f72048cd6e1b41b5a18b170e86a46dfde0324d63eb1 /workspace/GPT_SoVITS/pretrained_models/s1bert25hz-2kh-longer-epoch=68e-step=50232.ckpt
2
+ fc579c1db3c1e21b721001cf99d7a584214280df19b002e200b630a34fa06eb8 /workspace/GPT_SoVITS/pretrained_models/s2D488k.pth
3
+ 020a014e1e01e550e510f2f61fae5e5f5b6aab40f15c22f1f12f724df507e835 /workspace/GPT_SoVITS/pretrained_models/s2G488k.pth
4
+ 24164f129c66499d1346e2aa55f183250c223161ec2770c0da3d3b08cf432d3c /workspace/GPT_SoVITS/pretrained_models/chinese-hubert-base/pytorch_model.bin
5
+ e53a693acc59ace251d143d068096ae0d7b79e4b1b503fa84c9dcf576448c1d8 /workspace/GPT_SoVITS/pretrained_models/chinese-roberta-wwm-ext-large/pytorch_model.bin
6
+ 39796caa5db18d7f9382d8ac997ac967bfd85f7761014bb807d2543cc844ef05 /workspace/tools/uvr5/uvr5_weights/HP2_all_vocals.pth
7
+ 45e6b65199e781b4a6542002699be9f19cd3d1cb7d1558bc2bfbcd84674dfe28 /workspace/tools/uvr5/uvr5_weights/HP3_all_vocals.pth
8
+ 5908891829634926119720241e8573d97cbeb8277110a7512bdb0bd7563258ee /workspace/tools/uvr5/uvr5_weights/HP5_only_main_vocal.pth
9
+ 8c8fd1582f9aabc363e47af62ddb88df6cae7e064cae75bbf041a067a5e0aee2 /workspace/tools/uvr5/uvr5_weights/VR-DeEchoAggressive.pth
10
+ 01376dd2a571bf3cb9cced680732726d2d732609d09216a610b0d110f133febe /workspace/tools/uvr5/uvr5_weights/VR-DeEchoDeReverb.pth
11
+ 56aba59db3bcdd14a14464e62f3129698ecdea62eee0f003b9360923eb3ac79e /workspace/tools/uvr5/uvr5_weights/VR-DeEchoNormal.pth
12
+ 233bb5c6aaa365e568659a0a81211746fa881f8f47f82d9e864fce1f7692db80 /workspace/tools/uvr5/uvr5_weights/onnx_dereverb_By_FoxJoy/vocals.onnx
Docker/links.txt ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # GPT-SoVITS models
2
+ https://huggingface.co/lj1995/GPT-SoVITS/resolve/main/s1bert25hz-2kh-longer-epoch%3D68e-step%3D50232.ckpt
3
+ out=GPT_SoVITS/pretrained_models/s1bert25hz-2kh-longer-epoch=68e-step=50232.ckpt
4
+ https://huggingface.co/lj1995/GPT-SoVITS/resolve/main/s2D488k.pth
5
+ out=GPT_SoVITS/pretrained_models/s2D488k.pth
6
+ https://huggingface.co/lj1995/GPT-SoVITS/resolve/main/s2G488k.pth
7
+ out=GPT_SoVITS/pretrained_models/s2G488k.pth
8
+ https://huggingface.co/lj1995/GPT-SoVITS/resolve/main/chinese-hubert-base/config.json
9
+ out=GPT_SoVITS/pretrained_models/chinese-hubert-base/config.json
10
+ https://huggingface.co/lj1995/GPT-SoVITS/resolve/main/chinese-hubert-base/preprocessor_config.json
11
+ out=GPT_SoVITS/pretrained_models/chinese-hubert-base/preprocessor_config.json
12
+ https://huggingface.co/lj1995/GPT-SoVITS/resolve/main/chinese-hubert-base/pytorch_model.bin
13
+ out=GPT_SoVITS/pretrained_models/chinese-hubert-base/pytorch_model.bin
14
+ https://huggingface.co/lj1995/GPT-SoVITS/resolve/main/chinese-roberta-wwm-ext-large/config.json
15
+ out=GPT_SoVITS/pretrained_models/chinese-roberta-wwm-ext-large/config.json
16
+ https://huggingface.co/lj1995/GPT-SoVITS/resolve/main/chinese-roberta-wwm-ext-large/pytorch_model.bin
17
+ out=GPT_SoVITS/pretrained_models/chinese-roberta-wwm-ext-large/pytorch_model.bin
18
+ https://huggingface.co/lj1995/GPT-SoVITS/resolve/main/chinese-roberta-wwm-ext-large/tokenizer.json
19
+ out=GPT_SoVITS/pretrained_models/chinese-roberta-wwm-ext-large/tokenizer.json
20
+ # UVR5
21
+ https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/uvr5_weights/HP2_all_vocals.pth
22
+ out=tools/uvr5/uvr5_weights/HP2_all_vocals.pth
23
+ https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/uvr5_weights/HP3_all_vocals.pth
24
+ out=tools/uvr5/uvr5_weights/HP3_all_vocals.pth
25
+ https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/uvr5_weights/HP5_only_main_vocal.pth
26
+ out=tools/uvr5/uvr5_weights/HP5_only_main_vocal.pth
27
+ https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/uvr5_weights/VR-DeEchoAggressive.pth
28
+ out=tools/uvr5/uvr5_weights/VR-DeEchoAggressive.pth
29
+ https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/uvr5_weights/VR-DeEchoDeReverb.pth
30
+ out=tools/uvr5/uvr5_weights/VR-DeEchoDeReverb.pth
31
+ https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/uvr5_weights/VR-DeEchoNormal.pth
32
+ out=tools/uvr5/uvr5_weights/VR-DeEchoNormal.pth
33
+ https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/uvr5_weights/onnx_dereverb_By_FoxJoy/vocals.onnx
34
+ out=tools/uvr5/uvr5_weights/onnx_dereverb_By_FoxJoy/vocals.onnx
Dockerfile ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Base CUDA image
2
+ FROM cnstark/pytorch:2.0.1-py3.9.17-cuda11.8.0-ubuntu20.04
3
+
4
+ LABEL maintainer="breakstring@hotmail.com"
5
+ LABEL version="dev-20240209"
6
+ LABEL description="Docker image for GPT-SoVITS"
7
+
8
+
9
+ # Install 3rd party apps
10
+ ENV DEBIAN_FRONTEND=noninteractive
11
+ ENV TZ=Etc/UTC
12
+ RUN apt-get update && \
13
+ apt-get install -y --no-install-recommends tzdata ffmpeg libsox-dev parallel aria2 git git-lfs && \
14
+ git lfs install && \
15
+ rm -rf /var/lib/apt/lists/*
16
+
17
+ # Copy only requirements.txt initially to leverage Docker cache
18
+ WORKDIR /workspace
19
+ COPY requirements.txt /workspace/
20
+ RUN pip install --no-cache-dir -r requirements.txt
21
+
22
+ # Define a build-time argument for image type
23
+ ARG IMAGE_TYPE=full
24
+
25
+ # Conditional logic based on the IMAGE_TYPE argument
26
+ # Always copy the Docker directory, but only use it if IMAGE_TYPE is not "elite"
27
+ COPY ./Docker /workspace/Docker
28
+ # elite 类型的镜像里面不包含额外的模型
29
+ RUN if [ "$IMAGE_TYPE" != "elite" ]; then \
30
+ chmod +x /workspace/Docker/download.sh && \
31
+ /workspace/Docker/download.sh && \
32
+ python /workspace/Docker/download.py && \
33
+ python -m nltk.downloader averaged_perceptron_tagger cmudict; \
34
+ fi
35
+
36
+
37
+ # Copy the rest of the application
38
+ COPY . /workspace
39
+
40
+ EXPOSE 9871 9872 9873 9874 9880
41
+
42
+ CMD ["python", "webui.py"]
GPT_SoVITS/.ipynb_checkpoints/inference_webui-checkpoint.py ADDED
@@ -0,0 +1,908 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ '''
2
+ 按中英混合识别
3
+ 按日英混合识别
4
+ 多语种启动切分识别语种
5
+ 全部按中文识别
6
+ 全部按英文识别
7
+ 全部按日文识别
8
+ '''
9
+ import logging
10
+ import traceback,torchaudio,warnings
11
+ logging.getLogger("markdown_it").setLevel(logging.ERROR)
12
+ logging.getLogger("urllib3").setLevel(logging.ERROR)
13
+ logging.getLogger("httpcore").setLevel(logging.ERROR)
14
+ logging.getLogger("httpx").setLevel(logging.ERROR)
15
+ logging.getLogger("asyncio").setLevel(logging.ERROR)
16
+ logging.getLogger("charset_normalizer").setLevel(logging.ERROR)
17
+ logging.getLogger("torchaudio._extension").setLevel(logging.ERROR)
18
+ logging.getLogger("multipart.multipart").setLevel(logging.ERROR)
19
+ warnings.simplefilter(action='ignore', category=FutureWarning)
20
+
21
+ import os, re, sys, json
22
+ import pdb
23
+ import torch
24
+ from text.LangSegmenter import LangSegmenter
25
+
26
+ try:
27
+ import gradio.analytics as analytics
28
+ analytics.version_check = lambda:None
29
+ except:...
30
+ version=model_version=os.environ.get("version","v2")
31
+ pretrained_sovits_name=["GPT_SoVITS/pretrained_models/s2G488k.pth", "GPT_SoVITS/pretrained_models/gsv-v2final-pretrained/s2G2333k.pth","GPT_SoVITS/pretrained_models/s2Gv3.pth"]
32
+ pretrained_gpt_name=["GPT_SoVITS/pretrained_models/s1bert25hz-2kh-longer-epoch=68e-step=50232.ckpt","GPT_SoVITS/pretrained_models/gsv-v2final-pretrained/s1bert25hz-5kh-longer-epoch=12-step=369668.ckpt", "GPT_SoVITS/pretrained_models/s1v3.ckpt"]
33
+
34
+
35
+ _ =[[],[]]
36
+ for i in range(3):
37
+ if os.path.exists(pretrained_gpt_name[i]):_[0].append(pretrained_gpt_name[i])
38
+ if os.path.exists(pretrained_sovits_name[i]):_[-1].append(pretrained_sovits_name[i])
39
+ pretrained_gpt_name,pretrained_sovits_name = _
40
+
41
+
42
+ if os.path.exists(f"./weight.json"):
43
+ pass
44
+ else:
45
+ with open(f"./weight.json", 'w', encoding="utf-8") as file:json.dump({'GPT':{},'SoVITS':{}},file)
46
+
47
+ with open(f"./weight.json", 'r', encoding="utf-8") as file:
48
+ weight_data = file.read()
49
+ weight_data=json.loads(weight_data)
50
+ gpt_path = os.environ.get(
51
+ "gpt_path", weight_data.get('GPT',{}).get(version,pretrained_gpt_name))
52
+ sovits_path = os.environ.get(
53
+ "sovits_path", weight_data.get('SoVITS',{}).get(version,pretrained_sovits_name))
54
+ if isinstance(gpt_path,list):
55
+ gpt_path = gpt_path[0]
56
+ if isinstance(sovits_path,list):
57
+ sovits_path = sovits_path[0]
58
+
59
+ # gpt_path = os.environ.get(
60
+ # "gpt_path", pretrained_gpt_name
61
+ # )
62
+ # sovits_path = os.environ.get("sovits_path", pretrained_sovits_name)
63
+ cnhubert_base_path = os.environ.get(
64
+ "cnhubert_base_path", "GPT_SoVITS/pretrained_models/chinese-hubert-base"
65
+ )
66
+ bert_path = os.environ.get(
67
+ "bert_path", "GPT_SoVITS/pretrained_models/chinese-roberta-wwm-ext-large"
68
+ )
69
+ infer_ttswebui = os.environ.get("infer_ttswebui", 9872)
70
+ infer_ttswebui = int(infer_ttswebui)
71
+ is_share = os.environ.get("is_share", "False")
72
+ is_share = eval(is_share)
73
+ if "_CUDA_VISIBLE_DEVICES" in os.environ:
74
+ os.environ["CUDA_VISIBLE_DEVICES"] = os.environ["_CUDA_VISIBLE_DEVICES"]
75
+ is_half = eval(os.environ.get("is_half", "True")) and torch.cuda.is_available()
76
+ punctuation = set(['!', '?', '…', ',', '.', '-'," "])
77
+ import gradio as gr
78
+ from transformers import AutoModelForMaskedLM, AutoTokenizer
79
+ import numpy as np
80
+ import librosa
81
+ from feature_extractor import cnhubert
82
+
83
+ cnhubert.cnhubert_base_path = cnhubert_base_path
84
+
85
+ from GPT_SoVITS.module.models import SynthesizerTrn,SynthesizerTrnV3
86
+ from AR.models.t2s_lightning_module import Text2SemanticLightningModule
87
+ from text import cleaned_text_to_sequence
88
+ from text.cleaner import clean_text
89
+ from time import time as ttime
90
+ from module.mel_processing import spectrogram_torch
91
+ from tools.my_utils import load_audio
92
+ from tools.i18n.i18n import I18nAuto, scan_language_list
93
+
94
+ language=os.environ.get("language","Auto")
95
+ language=sys.argv[-1] if sys.argv[-1] in scan_language_list() else language
96
+ i18n = I18nAuto(language=language)
97
+
98
+ # os.environ['PYTORCH_ENABLE_MPS_FALLBACK'] = '1' # 确保直接启动推理UI时也能够设置。
99
+
100
+ if torch.cuda.is_available():
101
+ device = "cuda"
102
+ else:
103
+ device = "cpu"
104
+
105
+ dict_language_v1 = {
106
+ i18n("中文"): "all_zh",#全部按中文识别
107
+ i18n("英文"): "en",#全部按英文识别#######不变
108
+ i18n("日文"): "all_ja",#全部按日文识别
109
+ i18n("中英混合"): "zh",#按中英混合识别####不变
110
+ i18n("日英混合"): "ja",#按日英混合识别####不变
111
+ i18n("多语种混合"): "auto",#多语种启动切分识别语种
112
+ }
113
+ dict_language_v2 = {
114
+ i18n("中文"): "all_zh",#全部按中文识别
115
+ i18n("英文"): "en",#全部按英文识别#######不变
116
+ i18n("日文"): "all_ja",#全部按日文识别
117
+ i18n("粤语"): "all_yue",#全部按中文识别
118
+ i18n("韩文"): "all_ko",#全部按韩文识别
119
+ i18n("中英混合"): "zh",#按中英混合识别####不变
120
+ i18n("日英混合"): "ja",#按日英混合识别####不变
121
+ i18n("粤英混合"): "yue",#按粤英混合识别####不变
122
+ i18n("韩英混合"): "ko",#按韩英混合识别####不变
123
+ i18n("多语种混合"): "auto",#多语种启动切分识别语种
124
+ i18n("多语种混合(粤语)"): "auto_yue",#多语种启动切分识别语种
125
+ }
126
+ dict_language = dict_language_v1 if version =='v1' else dict_language_v2
127
+
128
+ tokenizer = AutoTokenizer.from_pretrained(bert_path)
129
+ bert_model = AutoModelForMaskedLM.from_pretrained(bert_path)
130
+ if is_half == True:
131
+ bert_model = bert_model.half().to(device)
132
+ else:
133
+ bert_model = bert_model.to(device)
134
+
135
+
136
+ def get_bert_feature(text, word2ph):
137
+ with torch.no_grad():
138
+ inputs = tokenizer(text, return_tensors="pt")
139
+ for i in inputs:
140
+ inputs[i] = inputs[i].to(device)
141
+ res = bert_model(**inputs, output_hidden_states=True)
142
+ res = torch.cat(res["hidden_states"][-3:-2], -1)[0].cpu()[1:-1]
143
+ assert len(word2ph) == len(text)
144
+ phone_level_feature = []
145
+ for i in range(len(word2ph)):
146
+ repeat_feature = res[i].repeat(word2ph[i], 1)
147
+ phone_level_feature.append(repeat_feature)
148
+ phone_level_feature = torch.cat(phone_level_feature, dim=0)
149
+ return phone_level_feature.T
150
+
151
+
152
+ class DictToAttrRecursive(dict):
153
+ def __init__(self, input_dict):
154
+ super().__init__(input_dict)
155
+ for key, value in input_dict.items():
156
+ if isinstance(value, dict):
157
+ value = DictToAttrRecursive(value)
158
+ self[key] = value
159
+ setattr(self, key, value)
160
+
161
+ def __getattr__(self, item):
162
+ try:
163
+ return self[item]
164
+ except KeyError:
165
+ raise AttributeError(f"Attribute {item} not found")
166
+
167
+ def __setattr__(self, key, value):
168
+ if isinstance(value, dict):
169
+ value = DictToAttrRecursive(value)
170
+ super(DictToAttrRecursive, self).__setitem__(key, value)
171
+ super().__setattr__(key, value)
172
+
173
+ def __delattr__(self, item):
174
+ try:
175
+ del self[item]
176
+ except KeyError:
177
+ raise AttributeError(f"Attribute {item} not found")
178
+
179
+
180
+ ssl_model = cnhubert.get_model()
181
+ if is_half == True:
182
+ ssl_model = ssl_model.half().to(device)
183
+ else:
184
+ ssl_model = ssl_model.to(device)
185
+
186
+ resample_transform_dict={}
187
+ def resample(audio_tensor, sr0):
188
+ global resample_transform_dict
189
+ if sr0 not in resample_transform_dict:
190
+ resample_transform_dict[sr0] = torchaudio.transforms.Resample(
191
+ sr0, 24000
192
+ ).to(device)
193
+ return resample_transform_dict[sr0](audio_tensor)
194
+
195
+ def change_sovits_weights(sovits_path,prompt_language=None,text_language=None):
196
+ global vq_model, hps, version, model_version, dict_language
197
+ '''
198
+ v1:about 82942KB
199
+ half thr:82978KB
200
+ v2:about 83014KB
201
+ half thr:100MB
202
+ v1base:103490KB
203
+ half thr:103520KB
204
+ v2base:103551KB
205
+ v3:about 750MB
206
+
207
+ ~82978K~100M~103420~700M
208
+ v1-v2-v1base-v2base-v3
209
+ version:
210
+ symbols version and timebre_embedding version
211
+ model_version:
212
+ sovits is v1/2 (VITS) or v3 (shortcut CFM DiT)
213
+ '''
214
+ size=os.path.getsize(sovits_path)
215
+ if size<82978*1024:
216
+ model_version=version="v1"
217
+ elif size<100*1024*1024:
218
+ model_version=version="v2"
219
+ elif size<103520*1024:
220
+ model_version=version="v1"
221
+ elif size<700*1024*1024:
222
+ model_version = version = "v2"
223
+ else:
224
+ version = "v2"
225
+ model_version="v3"
226
+
227
+ dict_language = dict_language_v1 if version =='v1' else dict_language_v2
228
+ if prompt_language is not None and text_language is not None:
229
+ if prompt_language in list(dict_language.keys()):
230
+ prompt_text_update, prompt_language_update = {'__type__':'update'}, {'__type__':'update', 'value':prompt_language}
231
+ else:
232
+ prompt_text_update = {'__type__':'update', 'value':''}
233
+ prompt_language_update = {'__type__':'update', 'value':i18n("中文")}
234
+ if text_language in list(dict_language.keys()):
235
+ text_update, text_language_update = {'__type__':'update'}, {'__type__':'update', 'value':text_language}
236
+ else:
237
+ text_update = {'__type__':'update', 'value':''}
238
+ text_language_update = {'__type__':'update', 'value':i18n("中文")}
239
+ if model_version=="v3":
240
+ visible_sample_steps=True
241
+ visible_inp_refs=False
242
+ else:
243
+ visible_sample_steps=False
244
+ visible_inp_refs=True
245
+ yield {'__type__':'update', 'choices':list(dict_language.keys())}, {'__type__':'update', 'choices':list(dict_language.keys())}, prompt_text_update, prompt_language_update, text_update, text_language_update,{"__type__": "update", "visible": visible_sample_steps},{"__type__": "update", "visible": visible_inp_refs},{"__type__": "update", "value": False,"interactive":True if model_version!="v3"else False}
246
+
247
+ dict_s2 = torch.load(sovits_path, map_location="cpu", weights_only=False)
248
+ hps = dict_s2["config"]
249
+ hps = DictToAttrRecursive(hps)
250
+ hps.model.semantic_frame_rate = "25hz"
251
+ if dict_s2['weight']['enc_p.text_embedding.weight'].shape[0] == 322:
252
+ hps.model.version = "v1"
253
+ else:
254
+ hps.model.version = "v2"
255
+ version=hps.model.version
256
+ # print("sovits版本:",hps.model.version)
257
+ if model_version!="v3":
258
+ vq_model = SynthesizerTrn(
259
+ hps.data.filter_length // 2 + 1,
260
+ hps.train.segment_size // hps.data.hop_length,
261
+ n_speakers=hps.data.n_speakers,
262
+ **hps.model
263
+ )
264
+ model_version=version
265
+ else:
266
+ vq_model = SynthesizerTrnV3(
267
+ hps.data.filter_length // 2 + 1,
268
+ hps.train.segment_size // hps.data.hop_length,
269
+ n_speakers=hps.data.n_speakers,
270
+ **hps.model
271
+ )
272
+ if ("pretrained" not in sovits_path):
273
+ try:
274
+ del vq_model.enc_q
275
+ except:pass
276
+ if is_half == True:
277
+ vq_model = vq_model.half().to(device)
278
+ else:
279
+ vq_model = vq_model.to(device)
280
+ vq_model.eval()
281
+ print("loading sovits_%s"%model_version,vq_model.load_state_dict(dict_s2["weight"], strict=False))
282
+ with open("./weight.json")as f:
283
+ data=f.read()
284
+ data=json.loads(data)
285
+ data["SoVITS"][version]=sovits_path
286
+ with open("./weight.json","w")as f:f.write(json.dumps(data))
287
+
288
+
289
+ try:next(change_sovits_weights(sovits_path))
290
+ except:pass
291
+
292
+ def change_gpt_weights(gpt_path):
293
+ global hz, max_sec, t2s_model, config
294
+ hz = 50
295
+ dict_s1 = torch.load(gpt_path, map_location="cpu")
296
+ config = dict_s1["config"]
297
+ max_sec = config["data"]["max_sec"]
298
+ t2s_model = Text2SemanticLightningModule(config, "****", is_train=False)
299
+ t2s_model.load_state_dict(dict_s1["weight"])
300
+ if is_half == True:
301
+ t2s_model = t2s_model.half()
302
+ t2s_model = t2s_model.to(device)
303
+ t2s_model.eval()
304
+ # total = sum([param.nelement() for param in t2s_model.parameters()])
305
+ # print("Number of parameter: %.2fM" % (total / 1e6))
306
+ with open("./weight.json")as f:
307
+ data=f.read()
308
+ data=json.loads(data)
309
+ data["GPT"][version]=gpt_path
310
+ with open("./weight.json","w")as f:f.write(json.dumps(data))
311
+
312
+
313
+ change_gpt_weights(gpt_path)
314
+ os.environ["HF_ENDPOINT"] = "https://hf-mirror.com"
315
+ import torch,soundfile
316
+ now_dir = os.getcwd()
317
+ import soundfile
318
+
319
+ def init_bigvgan():
320
+ global model
321
+ from BigVGAN import bigvgan
322
+ model = bigvgan.BigVGAN.from_pretrained("%s/GPT_SoVITS/pretrained_models/models--nvidia--bigvgan_v2_24khz_100band_256x" % (now_dir,), use_cuda_kernel=False) # if True, RuntimeError: Ninja is required to load C++ extensions
323
+ # remove weight norm in the model and set to eval mode
324
+ model.remove_weight_norm()
325
+ model = model.eval()
326
+ if is_half == True:
327
+ model = model.half().to(device)
328
+ else:
329
+ model = model.to(device)
330
+
331
+ if model_version!="v3":model=None
332
+ else:init_bigvgan()
333
+
334
+
335
+ def get_spepc(hps, filename):
336
+ audio = load_audio(filename, int(hps.data.sampling_rate))
337
+ audio = torch.FloatTensor(audio)
338
+ maxx=audio.abs().max()
339
+ if(maxx>1):audio/=min(2,maxx)
340
+ audio_norm = audio
341
+ audio_norm = audio_norm.unsqueeze(0)
342
+ spec = spectrogram_torch(
343
+ audio_norm,
344
+ hps.data.filter_length,
345
+ hps.data.sampling_rate,
346
+ hps.data.hop_length,
347
+ hps.data.win_length,
348
+ center=False,
349
+ )
350
+ return spec
351
+
352
+ def clean_text_inf(text, language, version):
353
+ phones, word2ph, norm_text = clean_text(text, language, version)
354
+ phones = cleaned_text_to_sequence(phones, version)
355
+ return phones, word2ph, norm_text
356
+
357
+ dtype=torch.float16 if is_half == True else torch.float32
358
+ def get_bert_inf(phones, word2ph, norm_text, language):
359
+ language=language.replace("all_","")
360
+ if language == "zh":
361
+ bert = get_bert_feature(norm_text, word2ph).to(device)#.to(dtype)
362
+ else:
363
+ bert = torch.zeros(
364
+ (1024, len(phones)),
365
+ dtype=torch.float16 if is_half == True else torch.float32,
366
+ ).to(device)
367
+
368
+ return bert
369
+
370
+
371
+ splits = {",", "。", "?", "!", ",", ".", "?", "!", "~", ":", ":", "—", "…", }
372
+
373
+
374
+ def get_first(text):
375
+ pattern = "[" + "".join(re.escape(sep) for sep in splits) + "]"
376
+ text = re.split(pattern, text)[0].strip()
377
+ return text
378
+
379
+ from text import chinese
380
+ def get_phones_and_bert(text,language,version,final=False):
381
+ if language in {"en", "all_zh", "all_ja", "all_ko", "all_yue"}:
382
+ language = language.replace("all_","")
383
+ if language == "en":
384
+ formattext = text
385
+ else:
386
+ # 因无法区别中日韩文汉字,以用户输入为准
387
+ formattext = text
388
+ while " " in formattext:
389
+ formattext = formattext.replace(" ", " ")
390
+ if language == "zh":
391
+ if re.search(r'[A-Za-z]', formattext):
392
+ formattext = re.sub(r'[a-z]', lambda x: x.group(0).upper(), formattext)
393
+ formattext = chinese.mix_text_normalize(formattext)
394
+ return get_phones_and_bert(formattext,"zh",version)
395
+ else:
396
+ phones, word2ph, norm_text = clean_text_inf(formattext, language, version)
397
+ bert = get_bert_feature(norm_text, word2ph).to(device)
398
+ elif language == "yue" and re.search(r'[A-Za-z]', formattext):
399
+ formattext = re.sub(r'[a-z]', lambda x: x.group(0).upper(), formattext)
400
+ formattext = chinese.mix_text_normalize(formattext)
401
+ return get_phones_and_bert(formattext,"yue",version)
402
+ else:
403
+ phones, word2ph, norm_text = clean_text_inf(formattext, language, version)
404
+ bert = torch.zeros(
405
+ (1024, len(phones)),
406
+ dtype=torch.float16 if is_half == True else torch.float32,
407
+ ).to(device)
408
+ elif language in {"zh", "ja", "ko", "yue", "auto", "auto_yue"}:
409
+ textlist=[]
410
+ langlist=[]
411
+ if language == "auto":
412
+ for tmp in LangSegmenter.getTexts(text):
413
+ langlist.append(tmp["lang"])
414
+ textlist.append(tmp["text"])
415
+ elif language == "auto_yue":
416
+ for tmp in LangSegmenter.getTexts(text):
417
+ if tmp["lang"] == "zh":
418
+ tmp["lang"] = "yue"
419
+ langlist.append(tmp["lang"])
420
+ textlist.append(tmp["text"])
421
+ else:
422
+ for tmp in LangSegmenter.getTexts(text):
423
+ if tmp["lang"] == "en":
424
+ langlist.append(tmp["lang"])
425
+ else:
426
+ # 因无法区别中日韩文汉字,以用户输入为准
427
+ langlist.append(language)
428
+ textlist.append(tmp["text"])
429
+ print(textlist)
430
+ print(langlist)
431
+ phones_list = []
432
+ bert_list = []
433
+ norm_text_list = []
434
+ for i in range(len(textlist)):
435
+ lang = langlist[i]
436
+ phones, word2ph, norm_text = clean_text_inf(textlist[i], lang, version)
437
+ bert = get_bert_inf(phones, word2ph, norm_text, lang)
438
+ phones_list.append(phones)
439
+ norm_text_list.append(norm_text)
440
+ bert_list.append(bert)
441
+ bert = torch.cat(bert_list, dim=1)
442
+ phones = sum(phones_list, [])
443
+ norm_text = ''.join(norm_text_list)
444
+
445
+ if not final and len(phones) < 6:
446
+ return get_phones_and_bert("." + text,language,version,final=True)
447
+
448
+ return phones,bert.to(dtype),norm_text
449
+
450
+ from module.mel_processing import spectrogram_torch,spec_to_mel_torch
451
+ def mel_spectrogram(y, n_fft, num_mels, sampling_rate, hop_size, win_size, fmin, fmax, center=False):
452
+ spec=spectrogram_torch(y,n_fft,sampling_rate,hop_size,win_size,center)
453
+ mel=spec_to_mel_torch(spec,n_fft,num_mels,sampling_rate,fmin,fmax)
454
+ return mel
455
+ mel_fn_args = {
456
+ "n_fft": 1024,
457
+ "win_size": 1024,
458
+ "hop_size": 256,
459
+ "num_mels": 100,
460
+ "sampling_rate": 24000,
461
+ "fmin": 0,
462
+ "fmax": None,
463
+ "center": False
464
+ }
465
+
466
+ spec_min = -12
467
+ spec_max = 2
468
+ def norm_spec(x):
469
+ return (x - spec_min) / (spec_max - spec_min) * 2 - 1
470
+ def denorm_spec(x):
471
+ return (x + 1) / 2 * (spec_max - spec_min) + spec_min
472
+ mel_fn=lambda x: mel_spectrogram(x, **mel_fn_args)
473
+
474
+
475
+ def merge_short_text_in_array(texts, threshold):
476
+ if (len(texts)) < 2:
477
+ return texts
478
+ result = []
479
+ text = ""
480
+ for ele in texts:
481
+ text += ele
482
+ if len(text) >= threshold:
483
+ result.append(text)
484
+ text = ""
485
+ if (len(text) > 0):
486
+ if len(result) == 0:
487
+ result.append(text)
488
+ else:
489
+ result[len(result) - 1] += text
490
+ return result
491
+
492
+ ##ref_wav_path+prompt_text+prompt_language+text(单个)+text_language+top_k+top_p+temperature
493
+ # cache_tokens={}#暂未实现清理机制
494
+ cache= {}
495
+ def get_tts_wav(ref_wav_path, prompt_text, prompt_language, text, text_language, how_to_cut=i18n("不切"), top_k=20, top_p=0.6, temperature=0.6, ref_free = False,speed=1,if_freeze=False,inp_refs=None,sample_steps=8):
496
+ global cache
497
+ if ref_wav_path:pass
498
+ else:gr.Warning(i18n('请上传参考音频'))
499
+ if text:pass
500
+ else:gr.Warning(i18n('请填入推理文本'))
501
+ t = []
502
+ if prompt_text is None or len(prompt_text) == 0:
503
+ ref_free = True
504
+ if model_version=="v3":ref_free=False#s2v3暂不支持ref_free
505
+ t0 = ttime()
506
+ prompt_language = dict_language[prompt_language]
507
+ text_language = dict_language[text_language]
508
+
509
+
510
+ if not ref_free:
511
+ prompt_text = prompt_text.strip("\n")
512
+ if (prompt_text[-1] not in splits): prompt_text += "。" if prompt_language != "en" else "."
513
+ print(i18n("实际输入的参考文本:"), prompt_text)
514
+ text = text.strip("\n")
515
+ # if (text[0] not in splits and len(get_first(text)) < 4): text = "。" + text if text_language != "en" else "." + text
516
+
517
+ print(i18n("实际输入的目标文本:"), text)
518
+ zero_wav = np.zeros(
519
+ int(hps.data.sampling_rate * 0.3),
520
+ dtype=np.float16 if is_half == True else np.float32,
521
+ )
522
+ if not ref_free:
523
+ with torch.no_grad():
524
+ wav16k, sr = librosa.load(ref_wav_path, sr=16000)
525
+ if (wav16k.shape[0] > 160000 or wav16k.shape[0] < 48000):
526
+ gr.Warning(i18n("参考音频在3~10秒范围外,请更换!"))
527
+ raise OSError(i18n("参考音频在3~10秒范围外,请更换!"))
528
+ wav16k = torch.from_numpy(wav16k)
529
+ zero_wav_torch = torch.from_numpy(zero_wav)
530
+ if is_half == True:
531
+ wav16k = wav16k.half().to(device)
532
+ zero_wav_torch = zero_wav_torch.half().to(device)
533
+ else:
534
+ wav16k = wav16k.to(device)
535
+ zero_wav_torch = zero_wav_torch.to(device)
536
+ wav16k = torch.cat([wav16k, zero_wav_torch])
537
+ ssl_content = ssl_model.model(wav16k.unsqueeze(0))[
538
+ "last_hidden_state"
539
+ ].transpose(
540
+ 1, 2
541
+ ) # .float()
542
+ codes = vq_model.extract_latent(ssl_content)
543
+ prompt_semantic = codes[0, 0]
544
+ prompt = prompt_semantic.unsqueeze(0).to(device)
545
+
546
+ t1 = ttime()
547
+ t.append(t1-t0)
548
+
549
+ if (how_to_cut == i18n("凑四句一切")):
550
+ text = cut1(text)
551
+ elif (how_to_cut == i18n("凑50字一切")):
552
+ text = cut2(text)
553
+ elif (how_to_cut == i18n("按中文句号。切")):
554
+ text = cut3(text)
555
+ elif (how_to_cut == i18n("按英文句号.切")):
556
+ text = cut4(text)
557
+ elif (how_to_cut == i18n("按标点符号切")):
558
+ text = cut5(text)
559
+ while "\n\n" in text:
560
+ text = text.replace("\n\n", "\n")
561
+ print(i18n("实际输入的目标文本(切句后):"), text)
562
+ texts = text.split("\n")
563
+ texts = process_text(texts)
564
+ texts = merge_short_text_in_array(texts, 5)
565
+ audio_opt = []
566
+ ###s2v3暂不支持ref_free
567
+ if not ref_free:
568
+ phones1,bert1,norm_text1=get_phones_and_bert(prompt_text, prompt_language, version)
569
+
570
+ for i_text,text in enumerate(texts):
571
+ # 解决输入目标文本的空行导致报错的问题
572
+ if (len(text.strip()) == 0):
573
+ continue
574
+ if (text[-1] not in splits): text += "。" if text_language != "en" else "."
575
+ print(i18n("实际输入的目标文本(每句):"), text)
576
+ phones2,bert2,norm_text2=get_phones_and_bert(text, text_language, version)
577
+ print(i18n("前端处理后的文本(每句):"), norm_text2)
578
+ if not ref_free:
579
+ bert = torch.cat([bert1, bert2], 1)
580
+ all_phoneme_ids = torch.LongTensor(phones1+phones2).to(device).unsqueeze(0)
581
+ else:
582
+ bert = bert2
583
+ all_phoneme_ids = torch.LongTensor(phones2).to(device).unsqueeze(0)
584
+
585
+ bert = bert.to(device).unsqueeze(0)
586
+ all_phoneme_len = torch.tensor([all_phoneme_ids.shape[-1]]).to(device)
587
+
588
+ t2 = ttime()
589
+ # cache_key="%s-%s-%s-%s-%s-%s-%s-%s"%(ref_wav_path,prompt_text,prompt_language,text,text_language,top_k,top_p,temperature)
590
+ # print(cache.keys(),if_freeze)
591
+ if(i_text in cache and if_freeze==True):pred_semantic=cache[i_text]
592
+ else:
593
+ with torch.no_grad():
594
+ pred_semantic, idx = t2s_model.model.infer_panel(
595
+ all_phoneme_ids,
596
+ all_phoneme_len,
597
+ None if ref_free else prompt,
598
+ bert,
599
+ # prompt_phone_len=ph_offset,
600
+ top_k=top_k,
601
+ top_p=top_p,
602
+ temperature=temperature,
603
+ early_stop_num=hz * max_sec,
604
+ )
605
+ pred_semantic = pred_semantic[:, -idx:].unsqueeze(0)
606
+ cache[i_text]=pred_semantic
607
+ t3 = ttime()
608
+ ###v3不存在以下逻辑和inp_refs
609
+ if model_version!="v3":
610
+ refers=[]
611
+ if(inp_refs):
612
+ for path in inp_refs:
613
+ try:
614
+ refer = get_spepc(hps, path.name).to(dtype).to(device)
615
+ refers.append(refer)
616
+ except:
617
+ traceback.print_exc()
618
+ if(len(refers)==0):refers = [get_spepc(hps, ref_wav_path).to(dtype).to(device)]
619
+ audio = (vq_model.decode(pred_semantic, torch.LongTensor(phones2).to(device).unsqueeze(0), refers,speed=speed).detach().cpu().numpy()[0, 0])
620
+ else:
621
+ refer = get_spepc(hps, ref_wav_path).to(device).to(dtype)#######这里要重采样切到32k,因为src是24k的,没有单独的32k的src,所以不能改成2个路径
622
+ phoneme_ids0=torch.LongTensor(phones1).to(device).unsqueeze(0)
623
+ phoneme_ids1=torch.LongTensor(phones2).to(device).unsqueeze(0)
624
+ fea_ref,ge = vq_model.decode_encp(prompt.unsqueeze(0), phoneme_ids0, refer)
625
+ ref_audio, sr = torchaudio.load(ref_wav_path)
626
+ ref_audio=ref_audio.to(device).float()
627
+ if (ref_audio.shape[0] == 2):
628
+ ref_audio = ref_audio.mean(0).unsqueeze(0)
629
+ if sr!=24000:
630
+ ref_audio=resample(ref_audio,sr)
631
+ mel2 = mel_fn(ref_audio.to(dtype))
632
+ mel2 = norm_spec(mel2)
633
+ T_min = min(mel2.shape[2], fea_ref.shape[2])
634
+ mel2 = mel2[:, :, :T_min]
635
+ fea_ref = fea_ref[:, :, :T_min]
636
+ if (T_min > 468):
637
+ mel2 = mel2[:, :, -468:]
638
+ fea_ref = fea_ref[:, :, -468:]
639
+ T_min = 468
640
+ chunk_len = 934 - T_min
641
+ fea_todo, ge = vq_model.decode_encp(pred_semantic, phoneme_ids1, refer, ge)
642
+ cfm_resss = []
643
+ idx = 0
644
+ while (1):
645
+ fea_todo_chunk = fea_todo[:, :, idx:idx + chunk_len]
646
+ if (fea_todo_chunk.shape[-1] == 0): break
647
+ idx += chunk_len
648
+ fea = torch.cat([fea_ref, fea_todo_chunk], 2).transpose(2, 1)
649
+ cfm_res = vq_model.cfm.inference(fea, torch.LongTensor([fea.size(1)]).to(fea.device), mel2, sample_steps, inference_cfg_rate=0)
650
+ cfm_res = cfm_res[:, :, mel2.shape[2]:]
651
+ mel2 = cfm_res[:, :, -T_min:]
652
+ fea_ref = fea_todo_chunk[:, :, -T_min:]
653
+ cfm_resss.append(cfm_res)
654
+ cmf_res = torch.cat(cfm_resss, 2)
655
+ cmf_res = denorm_spec(cmf_res)
656
+ if model==None:init_bigvgan()
657
+ with torch.inference_mode():
658
+ wav_gen = model(cmf_res)
659
+ audio=wav_gen[0][0].cpu().detach().numpy()
660
+ max_audio=np.abs(audio).max()#简单防止16bit爆音
661
+ if max_audio>1:audio/=max_audio
662
+ audio_opt.append(audio)
663
+ audio_opt.append(zero_wav)
664
+ t4 = ttime()
665
+ t.extend([t2 - t1,t3 - t2, t4 - t3])
666
+ t1 = ttime()
667
+ print("%.3f\t%.3f\t%.3f\t%.3f" %
668
+ (t[0], sum(t[1::3]), sum(t[2::3]), sum(t[3::3]))
669
+ )
670
+ sr=hps.data.sampling_rate if model_version!="v3"else 24000
671
+ yield sr, (np.concatenate(audio_opt, 0) * 32768).astype(np.int16)
672
+
673
+
674
+ def split(todo_text):
675
+ todo_text = todo_text.replace("……", "。").replace("——", ",")
676
+ if todo_text[-1] not in splits:
677
+ todo_text += "。"
678
+ i_split_head = i_split_tail = 0
679
+ len_text = len(todo_text)
680
+ todo_texts = []
681
+ while 1:
682
+ if i_split_head >= len_text:
683
+ break # 结尾一定有标点,所以直接跳出即可,最后一段在上次已加入
684
+ if todo_text[i_split_head] in splits:
685
+ i_split_head += 1
686
+ todo_texts.append(todo_text[i_split_tail:i_split_head])
687
+ i_split_tail = i_split_head
688
+ else:
689
+ i_split_head += 1
690
+ return todo_texts
691
+
692
+
693
+ def cut1(inp):
694
+ inp = inp.strip("\n")
695
+ inps = split(inp)
696
+ split_idx = list(range(0, len(inps), 4))
697
+ split_idx[-1] = None
698
+ if len(split_idx) > 1:
699
+ opts = []
700
+ for idx in range(len(split_idx) - 1):
701
+ opts.append("".join(inps[split_idx[idx]: split_idx[idx + 1]]))
702
+ else:
703
+ opts = [inp]
704
+ opts = [item for item in opts if not set(item).issubset(punctuation)]
705
+ return "\n".join(opts)
706
+
707
+
708
+ def cut2(inp):
709
+ inp = inp.strip("\n")
710
+ inps = split(inp)
711
+ if len(inps) < 2:
712
+ return inp
713
+ opts = []
714
+ summ = 0
715
+ tmp_str = ""
716
+ for i in range(len(inps)):
717
+ summ += len(inps[i])
718
+ tmp_str += inps[i]
719
+ if summ > 50:
720
+ summ = 0
721
+ opts.append(tmp_str)
722
+ tmp_str = ""
723
+ if tmp_str != "":
724
+ opts.append(tmp_str)
725
+ # print(opts)
726
+ if len(opts) > 1 and len(opts[-1]) < 50: ##如果最后一个太短了,和前一个合一起
727
+ opts[-2] = opts[-2] + opts[-1]
728
+ opts = opts[:-1]
729
+ opts = [item for item in opts if not set(item).issubset(punctuation)]
730
+ return "\n".join(opts)
731
+
732
+
733
+ def cut3(inp):
734
+ inp = inp.strip("\n")
735
+ opts = ["%s" % item for item in inp.strip("。").split("。")]
736
+ opts = [item for item in opts if not set(item).issubset(punctuation)]
737
+ return "\n".join(opts)
738
+
739
+ def cut4(inp):
740
+ inp = inp.strip("\n")
741
+ opts = ["%s" % item for item in inp.strip(".").split(".")]
742
+ opts = [item for item in opts if not set(item).issubset(punctuation)]
743
+ return "\n".join(opts)
744
+
745
+
746
+ # contributed by https://github.com/AI-Hobbyist/GPT-SoVITS/blob/main/GPT_SoVITS/inference_webui.py
747
+ def cut5(inp):
748
+ inp = inp.strip("\n")
749
+ punds = {',', '.', ';', '?', '!', '、', ',', '。', '?', '!', ';', ':', '…'}
750
+ mergeitems = []
751
+ items = []
752
+
753
+ for i, char in enumerate(inp):
754
+ if char in punds:
755
+ if char == '.' and i > 0 and i < len(inp) - 1 and inp[i - 1].isdigit() and inp[i + 1].isdigit():
756
+ items.append(char)
757
+ else:
758
+ items.append(char)
759
+ mergeitems.append("".join(items))
760
+ items = []
761
+ else:
762
+ items.append(char)
763
+
764
+ if items:
765
+ mergeitems.append("".join(items))
766
+
767
+ opt = [item for item in mergeitems if not set(item).issubset(punds)]
768
+ return "\n".join(opt)
769
+
770
+
771
+ def custom_sort_key(s):
772
+ # 使用正则表达式提取字符串中的数字部分和非数字部分
773
+ parts = re.split('(\d+)', s)
774
+ # 将数字部分转换为整数,非数字部分保持不变
775
+ parts = [int(part) if part.isdigit() else part for part in parts]
776
+ return parts
777
+
778
+ def process_text(texts):
779
+ _text=[]
780
+ if all(text in [None, " ", "\n",""] for text in texts):
781
+ raise ValueError(i18n("请输入有效文本"))
782
+ for text in texts:
783
+ if text in [None, " ", ""]:
784
+ pass
785
+ else:
786
+ _text.append(text)
787
+ return _text
788
+
789
+
790
+ def change_choices():
791
+ SoVITS_names, GPT_names = get_weights_names(GPT_weight_root, SoVITS_weight_root)
792
+ return {"choices": sorted(SoVITS_names, key=custom_sort_key), "__type__": "update"}, {"choices": sorted(GPT_names, key=custom_sort_key), "__type__": "update"}
793
+
794
+
795
+ SoVITS_weight_root=["SoVITS_weights","SoVITS_weights_v2","SoVITS_weights_v3"]
796
+ GPT_weight_root=["GPT_weights","GPT_weights_v2","GPT_weights_v3"]
797
+ for path in SoVITS_weight_root+GPT_weight_root:
798
+ os.makedirs(path,exist_ok=True)
799
+
800
+
801
+ def get_weights_names(GPT_weight_root, SoVITS_weight_root):
802
+ SoVITS_names = [i for i in pretrained_sovits_name]
803
+ for path in SoVITS_weight_root:
804
+ for name in os.listdir(path):
805
+ if name.endswith(".pth"): SoVITS_names.append("%s/%s" % (path, name))
806
+ GPT_names = [i for i in pretrained_gpt_name]
807
+ for path in GPT_weight_root:
808
+ for name in os.listdir(path):
809
+ if name.endswith(".ckpt"): GPT_names.append("%s/%s" % (path, name))
810
+ return SoVITS_names, GPT_names
811
+
812
+
813
+ SoVITS_names, GPT_names = get_weights_names(GPT_weight_root, SoVITS_weight_root)
814
+
815
+ def html_center(text, label='p'):
816
+ return f"""<div style="text-align: center; margin: 100; padding: 50;">
817
+ <{label} style="margin: 0; padding: 0;">{text}</{label}>
818
+ </div>"""
819
+
820
+ def html_left(text, label='p'):
821
+ return f"""<div style="text-align: left; margin: 0; padding: 0;">
822
+ <{label} style="margin: 0; padding: 0;">{text}</{label}>
823
+ </div>"""
824
+
825
+
826
+ with gr.Blocks(title="GPT-SoVITS WebUI") as app:
827
+ gr.Markdown(
828
+ value=i18n("本软件以MIT协议开源, 作者不对软件具备任何控制力, 使用软件者、传播软件导出的声音者自负全责. <br>如不认可该条款, 则不能使用或引用软件包内任何代码和文件. 详见根目录<b>LICENSE</b>.")
829
+ )
830
+ with gr.Group():
831
+ gr.Markdown(html_center(i18n("模型切换"),'h3'))
832
+ with gr.Row():
833
+ GPT_dropdown = gr.Dropdown(label=i18n("GPT模型列表"), choices=sorted(GPT_names, key=custom_sort_key), value=gpt_path, interactive=True, scale=14)
834
+ SoVITS_dropdown = gr.Dropdown(label=i18n("SoVITS模型列表"), choices=sorted(SoVITS_names, key=custom_sort_key), value=sovits_path, interactive=True, scale=14)
835
+ refresh_button = gr.Button(i18n("刷新模型路径"), variant="primary", scale=14)
836
+ refresh_button.click(fn=change_choices, inputs=[], outputs=[SoVITS_dropdown, GPT_dropdown])
837
+ gr.Markdown(html_center(i18n("*请上传并填写参考信息"),'h3'))
838
+ with gr.Row():
839
+ inp_ref = gr.Audio(label=i18n("请上传3~10秒内参考音频,超过会报错!"), type="filepath", scale=13)
840
+ with gr.Column(scale=13):
841
+ ref_text_free = gr.Checkbox(label=i18n("开启无参考文本模式。不填参考文本亦相当于开启。v3暂不支持该模式,使用了会报错。"), value=False, interactive=True, show_label=True,scale=1)
842
+ gr.Markdown(html_left(i18n("使用无参考文本模式时建议使用微调的GPT,听不清参考音频说的啥(不晓得写啥)可以开。<br>开启后无视填写的参考文本。")))
843
+ prompt_text = gr.Textbox(label=i18n("参考音频的文本"), value="", lines=5, max_lines=5,scale=1)
844
+ with gr.Column(scale=14):
845
+ prompt_language = gr.Dropdown(
846
+ label=i18n("参考音频的语种"), choices=list(dict_language.keys()), value=i18n("中文"),
847
+ )
848
+ inp_refs = gr.File(label=i18n("可选项:通过拖拽多个文件上传多个参考音频(建议同性),平均融合他们的音色。如不填写此项,音色由左侧单个参考音频控制。如是微调模型,建议参考音频全部在微调训练集音色内,底模不用管。"),file_count="multiple")if model_version!="v3"else gr.File(label=i18n("可选项:通过拖拽多个文件上传多个参考音频(建议同性),平均融合他们的音色。如不填写此项,音色由左侧单个参考音频控制。如是微调模型,建议参考音频全部在微调训练集音色内,底模不用管。"),file_count="multiple",visible=False)
849
+ sample_steps = gr.Radio(label=i18n("采样步数,如果觉得电,提高试试,如果觉得慢,降低试试"),value=32,choices=[4,8,16,32],visible=True)if model_version=="v3"else gr.Radio(label=i18n("采样步数,如果觉得电,提高试试,如果觉得慢,降低试试"),value=8,choices=[4,8,16,32],visible=False)
850
+ gr.Markdown(html_center(i18n("*请填写需要合成的目标文本和语种模式"),'h3'))
851
+ with gr.Row():
852
+ with gr.Column(scale=13):
853
+ text = gr.Textbox(label=i18n("需要合成的文本"), value="", lines=26, max_lines=26)
854
+ with gr.Column(scale=7):
855
+ text_language = gr.Dropdown(
856
+ label=i18n("需要合成的语种")+i18n(".限制范围越小判别效果越好。"), choices=list(dict_language.keys()), value=i18n("中文"), scale=1
857
+ )
858
+ how_to_cut = gr.Dropdown(
859
+ label=i18n("怎么切"),
860
+ choices=[i18n("不切"), i18n("凑四句一切"), i18n("凑50字一切"), i18n("按中文句号。切"), i18n("按英文句号.切"), i18n("按标点符号切"), ],
861
+ value=i18n("凑四句一切"),
862
+ interactive=True, scale=1
863
+ )
864
+ gr.Markdown(value=html_center(i18n("语速调整,高为更快")))
865
+ if_freeze=gr.Checkbox(label=i18n("是否直接对上次合成结果调整语速和音色。防止随机性。"), value=False, interactive=True,show_label=True, scale=1)
866
+ speed = gr.Slider(minimum=0.6,maximum=1.65,step=0.05,label=i18n("语速"),value=1,interactive=True, scale=1)
867
+ gr.Markdown(html_center(i18n("GPT采样参数(无参考文本时不要太低。不懂就用默认):")))
868
+ top_k = gr.Slider(minimum=1,maximum=100,step=1,label=i18n("top_k"),value=15,interactive=True, scale=1)
869
+ top_p = gr.Slider(minimum=0,maximum=1,step=0.05,label=i18n("top_p"),value=1,interactive=True, scale=1)
870
+ temperature = gr.Slider(minimum=0,maximum=1,step=0.05,label=i18n("temperature"),value=1,interactive=True, scale=1)
871
+ # with gr.Column():
872
+ # gr.Markdown(value=i18n("手工调整音素。当音素框不为空时使用手工音素输入推理,无视目标文本框。"))
873
+ # phoneme=gr.Textbox(label=i18n("音素框"), value="")
874
+ # get_phoneme_button = gr.Button(i18n("目标文本转音素"), variant="primary")
875
+ with gr.Row():
876
+ inference_button = gr.Button(i18n("合成语音"), variant="primary", size='lg', scale=25)
877
+ output = gr.Audio(label=i18n("输出的语音"), scale=14)
878
+
879
+ inference_button.click(
880
+ get_tts_wav,
881
+ [inp_ref, prompt_text, prompt_language, text, text_language, how_to_cut, top_k, top_p, temperature, ref_text_free,speed,if_freeze,inp_refs,sample_steps],
882
+ [output],
883
+ )
884
+ SoVITS_dropdown.change(change_sovits_weights, [SoVITS_dropdown,prompt_language,text_language], [prompt_language,text_language,prompt_text,prompt_language,text,text_language,sample_steps,inp_refs,ref_text_free])
885
+ GPT_dropdown.change(change_gpt_weights, [GPT_dropdown], [])
886
+
887
+ # gr.Markdown(value=i18n("文本切分工具。太长的文本合成出来效果不一定好,所以太长建议先切。合成会根据文本的换行分开合成再拼起来。"))
888
+ # with gr.Row():
889
+ # text_inp = gr.Textbox(label=i18n("需要合成的切分前文本"), value="")
890
+ # button1 = gr.Button(i18n("凑四句一切"), variant="primary")
891
+ # button2 = gr.Button(i18n("凑50字一切"), variant="primary")
892
+ # button3 = gr.Button(i18n("按中文句号。切"), variant="primary")
893
+ # button4 = gr.Button(i18n("按英文句号.切"), variant="primary")
894
+ # button5 = gr.Button(i18n("按标点符号切"), variant="primary")
895
+ # text_opt = gr.Textbox(label=i18n("切分后文本"), value="")
896
+ # button1.click(cut1, [text_inp], [text_opt])
897
+ # button2.click(cut2, [text_inp], [text_opt])
898
+ # button3.click(cut3, [text_inp], [text_opt])
899
+ # button4.click(cut4, [text_inp], [text_opt])
900
+ # button5.click(cut5, [text_inp], [text_opt])
901
+ # gr.Markdown(html_center(i18n("后续将支持转音素、手工修改音素、语音合成分步执行。")))
902
+
903
+ if __name__ == '__main__':
904
+ app.queue().launch(#concurrency_count=511, max_size=1022
905
+ inbrowser=True,
906
+ share=True,
907
+ quiet=True,
908
+ )
GPT_SoVITS/AR/__init__.py ADDED
File without changes
GPT_SoVITS/AR/data/__init__.py ADDED
File without changes
GPT_SoVITS/AR/data/bucket_sampler.py ADDED
@@ -0,0 +1,163 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # modified from https://github.com/yangdongchao/SoundStorm/blob/master/soundstorm/s1/AR/data/bucket_sampler.py
2
+ # reference: https://github.com/lifeiteng/vall-e
3
+ import itertools
4
+ import math
5
+ import random
6
+ from random import shuffle
7
+ from typing import Iterator
8
+ from typing import Optional
9
+ from typing import TypeVar
10
+
11
+ import torch
12
+ import torch.distributed as dist
13
+ from torch.utils.data import Dataset
14
+ from torch.utils.data import Sampler
15
+
16
+ __all__ = [
17
+ "DistributedBucketSampler",
18
+ ]
19
+
20
+ T_co = TypeVar("T_co", covariant=True)
21
+
22
+
23
+ class DistributedBucketSampler(Sampler[T_co]):
24
+ r"""
25
+ sort the dataset wrt. input length
26
+ divide samples into buckets
27
+ sort within buckets
28
+ divide buckets into batches
29
+ sort batches
30
+ """
31
+
32
+ def __init__(
33
+ self,
34
+ dataset: Dataset,
35
+ num_replicas: Optional[int] = None,
36
+ rank: Optional[int] = None,
37
+ shuffle: bool = True,
38
+ seed: int = 0,
39
+ drop_last: bool = False,
40
+ batch_size: int = 32,
41
+ ) -> None:
42
+ if num_replicas is None:
43
+ if not dist.is_available():
44
+ raise RuntimeError("Requires distributed package to be available")
45
+ num_replicas = dist.get_world_size() if torch.cuda.is_available() else 1
46
+ if rank is None:
47
+ if not dist.is_available():
48
+ raise RuntimeError("Requires distributed package to be available")
49
+ rank = dist.get_rank() if torch.cuda.is_available() else 0
50
+ if torch.cuda.is_available():
51
+ torch.cuda.set_device(rank)
52
+ if rank >= num_replicas or rank < 0:
53
+ raise ValueError(
54
+ "Invalid rank {}, rank should be in the interval"
55
+ " [0, {}]".format(rank, num_replicas - 1)
56
+ )
57
+ self.dataset = dataset
58
+ self.num_replicas = num_replicas
59
+ self.rank = rank
60
+ self.epoch = 0
61
+ self.drop_last = drop_last
62
+ # If the dataset length is evenly divisible by # of replicas, then there
63
+ # is no need to drop any data, since the dataset will be split equally.
64
+ if (
65
+ self.drop_last and len(self.dataset) % self.num_replicas != 0
66
+ ): # type: ignore[arg-type]
67
+ # Split to nearest available length that is evenly divisible.
68
+ # This is to ensure each rank receives the same amount of data when
69
+ # using this Sampler.
70
+ self.num_samples = math.ceil(
71
+ (len(self.dataset) - self.num_replicas)
72
+ / self.num_replicas # type: ignore[arg-type]
73
+ )
74
+ else:
75
+ self.num_samples = math.ceil(
76
+ len(self.dataset) / self.num_replicas
77
+ ) # type: ignore[arg-type]
78
+ self.total_size = self.num_samples * self.num_replicas
79
+ self.shuffle = shuffle
80
+ self.seed = seed
81
+ self.batch_size = batch_size
82
+ self.id_with_length = self._get_sample_lengths()
83
+ self.id_buckets = self.make_buckets(bucket_width=2.0)
84
+
85
+ def _get_sample_lengths(self):
86
+ id_with_lengths = []
87
+ for i in range(len(self.dataset)):
88
+ id_with_lengths.append((i, self.dataset.get_sample_length(i)))
89
+ id_with_lengths.sort(key=lambda x: x[1])
90
+ return id_with_lengths
91
+
92
+ def make_buckets(self, bucket_width: float = 2.0):
93
+ buckets = []
94
+ cur = []
95
+ max_sec = bucket_width
96
+ for id, sec in self.id_with_length:
97
+ if sec < max_sec:
98
+ cur.append(id)
99
+ else:
100
+ buckets.append(cur)
101
+ cur = [id]
102
+ max_sec += bucket_width
103
+ if len(cur) > 0:
104
+ buckets.append(cur)
105
+ return buckets
106
+
107
+ def __iter__(self) -> Iterator[T_co]:
108
+ if self.shuffle:
109
+ # deterministically shuffle based on epoch and seed
110
+ g = torch.Generator()
111
+ g.manual_seed(self.seed + self.epoch)
112
+ random.seed(self.epoch + self.seed)
113
+ shuffled_bucket = []
114
+ for buc in self.id_buckets:
115
+ buc_copy = buc.copy()
116
+ shuffle(buc_copy)
117
+ shuffled_bucket.append(buc_copy)
118
+ grouped_batch_size = self.batch_size * self.num_replicas
119
+ shuffled_bucket = list(itertools.chain(*shuffled_bucket))
120
+ n_batch = int(math.ceil(len(shuffled_bucket) / grouped_batch_size))
121
+ batches = [
122
+ shuffled_bucket[b * grouped_batch_size : (b + 1) * grouped_batch_size]
123
+ for b in range(n_batch)
124
+ ]
125
+ shuffle(batches)
126
+ indices = list(itertools.chain(*batches))
127
+ else:
128
+ # type: ignore[arg-type]
129
+ indices = list(range(len(self.dataset)))
130
+
131
+ if not self.drop_last:
132
+ # add extra samples to make it evenly divisible
133
+ padding_size = self.total_size - len(indices)
134
+ if padding_size <= len(indices):
135
+ indices += indices[:padding_size]
136
+ else:
137
+ indices += (indices * math.ceil(padding_size / len(indices)))[
138
+ :padding_size
139
+ ]
140
+ else:
141
+ # remove tail of data to make it evenly divisible.
142
+ indices = indices[: self.total_size]
143
+ assert len(indices) == self.total_size
144
+
145
+ # subsample
146
+ indices = indices[self.rank : self.total_size : self.num_replicas]
147
+ assert len(indices) == self.num_samples
148
+
149
+ return iter(indices)
150
+
151
+ def __len__(self) -> int:
152
+ return self.num_samples
153
+
154
+ def set_epoch(self, epoch: int) -> None:
155
+ r"""
156
+ Sets the epoch for this sampler. When :attr:`shuffle=True`, this ensures all replicas
157
+ use a different random ordering for each epoch. Otherwise, the next iteration of this
158
+ sampler will yield the same ordering.
159
+
160
+ Args:
161
+ epoch (int): Epoch number.
162
+ """
163
+ self.epoch = epoch
GPT_SoVITS/AR/data/data_module.py ADDED
@@ -0,0 +1,76 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # modified from https://github.com/yangdongchao/SoundStorm/blob/master/soundstorm/s1/AR/data/data_module.py
2
+ # reference: https://github.com/lifeiteng/vall-e
3
+ from pytorch_lightning import LightningDataModule
4
+ from AR.data.bucket_sampler import DistributedBucketSampler
5
+ from AR.data.dataset import Text2SemanticDataset
6
+ from torch.utils.data import DataLoader
7
+
8
+
9
+ class Text2SemanticDataModule(LightningDataModule):
10
+ def __init__(
11
+ self,
12
+ config,
13
+ train_semantic_path,
14
+ train_phoneme_path,
15
+ dev_semantic_path=None,
16
+ dev_phoneme_path=None,
17
+ ):
18
+ super().__init__()
19
+ self.config = config
20
+ self.train_semantic_path = train_semantic_path
21
+ self.train_phoneme_path = train_phoneme_path
22
+ self.dev_semantic_path = dev_semantic_path
23
+ self.dev_phoneme_path = dev_phoneme_path
24
+ self.num_workers = self.config["data"]["num_workers"]
25
+
26
+ def prepare_data(self):
27
+ pass
28
+
29
+ def setup(self, stage=None, output_logs=False):
30
+ self._train_dataset = Text2SemanticDataset(
31
+ phoneme_path=self.train_phoneme_path,
32
+ semantic_path=self.train_semantic_path,
33
+ max_sec=self.config["data"]["max_sec"],
34
+ pad_val=self.config["data"]["pad_val"],
35
+ )
36
+ self._dev_dataset = self._train_dataset
37
+ # self._dev_dataset = Text2SemanticDataset(
38
+ # phoneme_path=self.dev_phoneme_path,
39
+ # semantic_path=self.dev_semantic_path,
40
+ # max_sample=self.config['data']['max_eval_sample'],
41
+ # max_sec=self.config['data']['max_sec'],
42
+ # pad_val=self.config['data']['pad_val'])
43
+
44
+ def train_dataloader(self):
45
+ batch_size=self.config["train"]["batch_size"]//2 if self.config["train"].get("if_dpo",False)==True else self.config["train"]["batch_size"]
46
+ batch_size = max(min(batch_size,len(self._train_dataset)//4),1)#防止不保存
47
+ sampler = DistributedBucketSampler(self._train_dataset, batch_size=batch_size)
48
+ return DataLoader(
49
+ self._train_dataset,
50
+ batch_size=batch_size,
51
+ sampler=sampler,
52
+ collate_fn=self._train_dataset.collate,
53
+ num_workers=self.num_workers,
54
+ persistent_workers=True,
55
+ prefetch_factor=16,
56
+ )
57
+
58
+ def val_dataloader(self):
59
+ return DataLoader(
60
+ self._dev_dataset,
61
+ batch_size=1,
62
+ shuffle=False,
63
+ collate_fn=self._train_dataset.collate,
64
+ num_workers=max(self.num_workers, 12),
65
+ persistent_workers=True,
66
+ prefetch_factor=16,
67
+ )
68
+
69
+ # 这个会使用到嘛?
70
+ def test_dataloader(self):
71
+ return DataLoader(
72
+ self._dev_dataset,
73
+ batch_size=1,
74
+ shuffle=False,
75
+ collate_fn=self._train_dataset.collate,
76
+ )
GPT_SoVITS/AR/data/dataset.py ADDED
@@ -0,0 +1,323 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # modified from https://github.com/yangdongchao/SoundStorm/blob/master/soundstorm/s1/AR/data/dataset.py
2
+ # reference: https://github.com/lifeiteng/vall-e
3
+ import pdb
4
+ import sys
5
+
6
+ # sys.path.append("/data/docker/liujing04/gpt-vits/mq-vits-s1bert_no_bert")
7
+ import traceback, os
8
+ from typing import Dict
9
+ from typing import List
10
+
11
+ import numpy as np
12
+ import pandas as pd
13
+ import torch, json
14
+ from torch.utils.data import DataLoader
15
+ from torch.utils.data import Dataset
16
+ from transformers import AutoTokenizer
17
+
18
+ version = os.environ.get('version',None)
19
+
20
+ from text import cleaned_text_to_sequence
21
+
22
+ # from config import exp_dir
23
+
24
+
25
+ def batch_sequences(sequences: List[np.array], axis: int = 0, pad_value: int = 0):
26
+ seq = sequences[0]
27
+ ndim = seq.ndim
28
+ if axis < 0:
29
+ axis += ndim
30
+ dtype = seq.dtype
31
+ pad_value = dtype.type(pad_value)
32
+ seq_lengths = [seq.shape[axis] for seq in sequences]
33
+ max_length = np.max(seq_lengths)
34
+
35
+ padded_sequences = []
36
+ for seq, length in zip(sequences, seq_lengths):
37
+ padding = (
38
+ [(0, 0)] * axis + [(0, max_length - length)] + [(0, 0)] * (ndim - axis - 1)
39
+ )
40
+ padded_seq = np.pad(seq, padding, mode="constant", constant_values=pad_value)
41
+ padded_sequences.append(padded_seq)
42
+ batch = np.stack(padded_sequences)
43
+ return batch
44
+
45
+
46
+ class Text2SemanticDataset(Dataset):
47
+ """dataset class for text tokens to semantic model training."""
48
+
49
+ def __init__(
50
+ self,
51
+ phoneme_path: str,
52
+ semantic_path: str,
53
+ max_sample: int = None,
54
+ max_sec: int = 100,
55
+ pad_val: int = 1024,
56
+ # min value of phoneme/sec
57
+ min_ps_ratio: int = 3,
58
+ # max value of phoneme/sec
59
+ max_ps_ratio: int = 25,
60
+ ) -> None:
61
+ super().__init__()
62
+
63
+ self.semantic_data = pd.read_csv(
64
+ semantic_path, delimiter="\t", encoding="utf-8"
65
+ )
66
+ # get dict
67
+ self.path2 = phoneme_path # "%s/2-name2text.txt"%exp_dir#phoneme_path
68
+ self.path3 = "%s/3-bert" % (
69
+ os.path.dirname(phoneme_path)
70
+ ) # "%s/3-bert"%exp_dir#bert_dir
71
+ self.path6 = semantic_path # "%s/6-name2semantic.tsv"%exp_dir#semantic_path
72
+ assert os.path.exists(self.path2)
73
+ assert os.path.exists(self.path6)
74
+ self.phoneme_data = {}
75
+ with open(self.path2, "r", encoding="utf8") as f:
76
+ lines = f.read().strip("\n").split("\n")
77
+
78
+ for line in lines:
79
+ tmp = line.split("\t")
80
+ if len(tmp) != 4:
81
+ continue
82
+ self.phoneme_data[tmp[0]] = [tmp[1], tmp[2], tmp[3]]
83
+
84
+ # self.phoneme_data = np.load(phoneme_path, allow_pickle=True).item()
85
+ # pad for semantic tokens
86
+ self.PAD: int = pad_val
87
+ # self.hz = 25
88
+ # with open("/data/docker/liujing04/gpt-vits/mq-vits-s1bert_no_bert/configs/s2.json", "r") as f:data = f.read()
89
+ # data=json.loads(data)["model"]["semantic_frame_rate"]#50hz
90
+ # self.hz=int(data[:-2])#
91
+ self.hz = int(os.environ.get("hz", "25hz")[:-2])
92
+
93
+ # max seconds of semantic token
94
+ self.max_sec = max_sec
95
+ self.min_ps_ratio = min_ps_ratio
96
+ self.max_ps_ratio = max_ps_ratio
97
+
98
+ if max_sample is not None:
99
+ self.semantic_data = self.semantic_data[:max_sample]
100
+
101
+ # {idx: (semantic, phoneme)}
102
+ # semantic list, phoneme list
103
+ self.semantic_phoneme = []
104
+ self.item_names = []
105
+
106
+ self.inited = False
107
+
108
+ if not self.inited:
109
+ # 调用初始化函数
110
+ self.init_batch()
111
+ self.inited = True
112
+ del self.semantic_data
113
+ del self.phoneme_data
114
+ # self.tokenizer = AutoTokenizer.from_pretrained("hfl/chinese-roberta-wwm-ext-large")
115
+ # self.tokenizer = AutoTokenizer.from_pretrained("/data/docker/liujing04/bert-vits2/Bert-VITS2-master20231106/bert/chinese-roberta-wwm-ext-large")
116
+
117
+ def init_batch(self):
118
+ semantic_data_len = len(self.semantic_data)
119
+ phoneme_data_len = len(self.phoneme_data.keys())
120
+ print("semantic_data_len:", semantic_data_len)
121
+ print("phoneme_data_len:", phoneme_data_len)
122
+ print(self.semantic_data)
123
+ idx = 0
124
+ num_not_in = 0
125
+ num_deleted_bigger = 0
126
+ num_deleted_ps = 0
127
+ for i in range(semantic_data_len):
128
+ # 先依次遍历
129
+ # get str
130
+ item_name = self.semantic_data.iloc[i,0]
131
+ # print(self.phoneme_data)
132
+ try:
133
+ phoneme, word2ph, text = self.phoneme_data[item_name]
134
+ except Exception:
135
+ traceback.print_exc()
136
+ # print(f"{item_name} not in self.phoneme_data !")
137
+ num_not_in += 1
138
+ continue
139
+
140
+ semantic_str = self.semantic_data.iloc[i,1]
141
+ # get token list
142
+ semantic_ids = [int(idx) for idx in semantic_str.split(" ")]
143
+ # (T), 是否需要变成 (1, T) -> 不需要,因为需要求 len
144
+ # 过滤掉太长的样本
145
+ if (
146
+ len(semantic_ids) > self.max_sec * self.hz
147
+ ): #########1###根据token个数推测总时长过滤时长60s(config里)#40*25=1k
148
+ num_deleted_bigger += 1
149
+ continue
150
+ # (T, ), 这个速度不会很慢,所以可以在一开始就处理,无需在 __getitem__ 里面单个处理####
151
+ phoneme = phoneme.split(" ")
152
+
153
+ try:
154
+ phoneme_ids = cleaned_text_to_sequence(phoneme, version)
155
+ except:
156
+ traceback.print_exc()
157
+ # print(f"{item_name} not in self.phoneme_data !")
158
+ num_not_in += 1
159
+ continue
160
+ # if len(phoneme_ids) >400:###########2:改为恒定限制为semantic/2.5就行
161
+ if (
162
+ len(phoneme_ids) > self.max_sec * self.hz / 2.5
163
+ ): ###########2:改为恒定限制为semantic/2.5就行
164
+ num_deleted_ps += 1
165
+ continue
166
+ # if len(semantic_ids) > 1000:###########3
167
+ # num_deleted_bigger += 1
168
+ # continue
169
+
170
+ ps_ratio = len(phoneme_ids) / (len(semantic_ids) / self.hz)
171
+
172
+ if (
173
+ ps_ratio > self.max_ps_ratio or ps_ratio < self.min_ps_ratio
174
+ ): ##########4#3~25#每秒多少个phone
175
+ num_deleted_ps += 1
176
+ # print(item_name)
177
+ continue
178
+
179
+ self.semantic_phoneme.append((semantic_ids, phoneme_ids))
180
+ idx += 1
181
+ self.item_names.append(item_name)
182
+
183
+ min_num = 100 # 20直接不补#30补了也不存ckpt
184
+ leng = len(self.semantic_phoneme)
185
+ if leng < min_num:
186
+ tmp1 = self.semantic_phoneme
187
+ tmp2 = self.item_names
188
+ self.semantic_phoneme = []
189
+ self.item_names = []
190
+ for _ in range(max(2, int(min_num / leng))):
191
+ self.semantic_phoneme += tmp1
192
+ self.item_names += tmp2
193
+ if num_not_in > 0:
194
+ print(f"there are {num_not_in} semantic datas not in phoneme datas")
195
+ if num_deleted_bigger > 0:
196
+ print(
197
+ f"deleted {num_deleted_bigger} audios who's duration are bigger than {self.max_sec} seconds"
198
+ )
199
+ if num_deleted_ps > 0:
200
+ # 4702 for LibriTTS, LirbriTTS 是标注数据, 是否需要筛?=> 需要,有值为 100 的极端值
201
+ print(
202
+ f"deleted {num_deleted_ps} audios who's phoneme/sec are bigger than {self.max_ps_ratio} or smaller than {self.min_ps_ratio}"
203
+ )
204
+ """
205
+ there are 31 semantic datas not in phoneme datas
206
+ deleted 34 audios who's duration are bigger than 54 seconds
207
+ deleted 3190 audios who's phoneme/sec are bigger than 25 or smaller than 3
208
+ dataset.__len__(): 366463
209
+
210
+ """
211
+ # 345410 for LibriTTS
212
+ print("dataset.__len__():", self.__len__())
213
+
214
+ def __get_item_names__(self) -> List[str]:
215
+ return self.item_names
216
+
217
+ def __len__(self) -> int:
218
+ return len(self.semantic_phoneme)
219
+
220
+ def __getitem__(self, idx: int) -> Dict:
221
+ semantic_ids, phoneme_ids = self.semantic_phoneme[idx]
222
+ item_name = self.item_names[idx]
223
+ phoneme_ids_len = len(phoneme_ids)
224
+ # semantic tokens target
225
+ semantic_ids_len = len(semantic_ids)
226
+
227
+ flag = 0
228
+ path_bert = "%s/%s.pt" % (self.path3, item_name)
229
+ if os.path.exists(path_bert) == True:
230
+ bert_feature = torch.load(path_bert, map_location="cpu")
231
+ else:
232
+ flag = 1
233
+ if flag == 1:
234
+ # bert_feature=torch.zeros_like(phoneme_ids,dtype=torch.float32)
235
+ bert_feature = None
236
+ else:
237
+ assert bert_feature.shape[-1] == len(phoneme_ids)
238
+ return {
239
+ "idx": idx,
240
+ "phoneme_ids": phoneme_ids,
241
+ "phoneme_ids_len": phoneme_ids_len,
242
+ "semantic_ids": semantic_ids,
243
+ "semantic_ids_len": semantic_ids_len,
244
+ "bert_feature": bert_feature,
245
+ }
246
+
247
+ def get_sample_length(self, idx: int):
248
+ semantic_ids = self.semantic_phoneme[idx][0]
249
+ sec = 1.0 * len(semantic_ids) / self.hz
250
+ return sec
251
+
252
+ def collate(self, examples: List[Dict]) -> Dict:
253
+ sample_index: List[int] = []
254
+ phoneme_ids: List[torch.Tensor] = []
255
+ phoneme_ids_lens: List[int] = []
256
+ semantic_ids: List[torch.Tensor] = []
257
+ semantic_ids_lens: List[int] = []
258
+ # return
259
+
260
+ for item in examples:
261
+ sample_index.append(item["idx"])
262
+ phoneme_ids.append(np.array(item["phoneme_ids"], dtype=np.int64))
263
+ semantic_ids.append(np.array(item["semantic_ids"], dtype=np.int64))
264
+ phoneme_ids_lens.append(item["phoneme_ids_len"])
265
+ semantic_ids_lens.append(item["semantic_ids_len"])
266
+
267
+ # pad 0
268
+ phoneme_ids = batch_sequences(phoneme_ids)
269
+ semantic_ids = batch_sequences(semantic_ids, pad_value=self.PAD)
270
+
271
+ # # convert each batch to torch.tensor
272
+ phoneme_ids = torch.tensor(phoneme_ids)
273
+ semantic_ids = torch.tensor(semantic_ids)
274
+ phoneme_ids_lens = torch.tensor(phoneme_ids_lens)
275
+ semantic_ids_lens = torch.tensor(semantic_ids_lens)
276
+ bert_padded = torch.FloatTensor(len(examples), 1024, max(phoneme_ids_lens))
277
+ bert_padded.zero_()
278
+
279
+ for idx, item in enumerate(examples):
280
+ bert = item["bert_feature"]
281
+ if bert != None:
282
+ bert_padded[idx, :, : bert.shape[-1]] = bert
283
+
284
+ return {
285
+ # List[int]
286
+ "ids": sample_index,
287
+ # torch.Tensor (B, max_phoneme_length)
288
+ "phoneme_ids": phoneme_ids,
289
+ # torch.Tensor (B)
290
+ "phoneme_ids_len": phoneme_ids_lens,
291
+ # torch.Tensor (B, max_semantic_ids_length)
292
+ "semantic_ids": semantic_ids,
293
+ # torch.Tensor (B)
294
+ "semantic_ids_len": semantic_ids_lens,
295
+ # torch.Tensor (B, 1024, max_phoneme_length)
296
+ "bert_feature": bert_padded,
297
+ }
298
+
299
+
300
+ if __name__ == "__main__":
301
+ root_dir = "/data/docker/liujing04/gpt-vits/prepare/dump_mix/"
302
+ dataset = Text2SemanticDataset(
303
+ phoneme_path=root_dir + "phoneme_train.npy",
304
+ semantic_path=root_dir + "semantic_train.tsv",
305
+ )
306
+
307
+ batch_size = 12
308
+ dataloader = DataLoader(
309
+ dataset, batch_size=batch_size, collate_fn=dataset.collate, shuffle=False
310
+ )
311
+ for i, batch in enumerate(dataloader):
312
+ if i % 1000 == 0:
313
+ print(i)
314
+ # if i == 0:
315
+ # print('batch["ids"]:', batch["ids"])
316
+ # print('batch["phoneme_ids"]:', batch["phoneme_ids"],
317
+ # batch["phoneme_ids"].shape)
318
+ # print('batch["phoneme_ids_len"]:', batch["phoneme_ids_len"],
319
+ # batch["phoneme_ids_len"].shape)
320
+ # print('batch["semantic_ids"]:', batch["semantic_ids"],
321
+ # batch["semantic_ids"].shape)
322
+ # print('batch["semantic_ids_len"]:', batch["semantic_ids_len"],
323
+ # batch["semantic_ids_len"].shape)
GPT_SoVITS/AR/models/__init__.py ADDED
File without changes
GPT_SoVITS/AR/models/t2s_lightning_module.py ADDED
@@ -0,0 +1,141 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # modified from https://github.com/yangdongchao/SoundStorm/blob/master/soundstorm/s1/AR/models/t2s_lightning_module.py
2
+ # reference: https://github.com/lifeiteng/vall-e
3
+ import os, sys
4
+
5
+ now_dir = os.getcwd()
6
+ sys.path.append(now_dir)
7
+ from typing import Dict
8
+
9
+ import torch
10
+ from pytorch_lightning import LightningModule
11
+ from AR.models.t2s_model import Text2SemanticDecoder
12
+ from AR.modules.lr_schedulers import WarmupCosineLRSchedule
13
+ from AR.modules.optim import ScaledAdam
14
+
15
+ class Text2SemanticLightningModule(LightningModule):
16
+ def __init__(self, config, output_dir, is_train=True):
17
+ super().__init__()
18
+ self.config = config
19
+ self.top_k = 3
20
+ self.model = Text2SemanticDecoder(config=config, top_k=self.top_k)
21
+ pretrained_s1 = config.get("pretrained_s1")
22
+ if pretrained_s1 and is_train:
23
+ # print(self.load_state_dict(torch.load(pretrained_s1,map_location="cpu")["state_dict"]))
24
+ print(
25
+ self.load_state_dict(
26
+ torch.load(pretrained_s1, map_location="cpu")["weight"]
27
+ )
28
+ )
29
+ if is_train:
30
+ self.automatic_optimization = False
31
+ self.save_hyperparameters()
32
+ self.eval_dir = output_dir / "eval"
33
+ self.eval_dir.mkdir(parents=True, exist_ok=True)
34
+
35
+ def training_step(self, batch: Dict, batch_idx: int):
36
+ opt = self.optimizers()
37
+ scheduler = self.lr_schedulers()
38
+ forward=self.model.forward if self.config["train"].get("if_dpo",False)==True else self.model.forward_old
39
+ loss, acc = forward(
40
+ batch["phoneme_ids"],
41
+ batch["phoneme_ids_len"],
42
+ batch["semantic_ids"],
43
+ batch["semantic_ids_len"],
44
+ batch["bert_feature"],
45
+ )
46
+ self.manual_backward(loss)
47
+ if batch_idx > 0 and batch_idx % 4 == 0:
48
+ opt.step()
49
+ opt.zero_grad()
50
+ scheduler.step()
51
+
52
+ self.log(
53
+ "total_loss",
54
+ loss,
55
+ on_step=True,
56
+ on_epoch=True,
57
+ prog_bar=True,
58
+ sync_dist=True,
59
+ )
60
+ self.log(
61
+ "lr",
62
+ scheduler.get_last_lr()[0],
63
+ on_epoch=True,
64
+ prog_bar=True,
65
+ sync_dist=True,
66
+ )
67
+ self.log(
68
+ f"top_{self.top_k}_acc",
69
+ acc,
70
+ on_step=True,
71
+ on_epoch=True,
72
+ prog_bar=True,
73
+ sync_dist=True,
74
+ )
75
+
76
+ def validation_step(self, batch: Dict, batch_idx: int):
77
+ return
78
+
79
+ # # get loss
80
+ # loss, acc = self.model.forward(
81
+ # batch['phoneme_ids'], batch['phoneme_ids_len'],
82
+ # batch['semantic_ids'], batch['semantic_ids_len'],
83
+ # batch['bert_feature']
84
+ # )
85
+ #
86
+ # self.log(
87
+ # "val_total_loss",
88
+ # loss,
89
+ # on_step=True,
90
+ # on_epoch=True,
91
+ # prog_bar=True,
92
+ # sync_dist=True)
93
+ # self.log(
94
+ # f"val_top_{self.top_k}_acc",
95
+ # acc,
96
+ # on_step=True,
97
+ # on_epoch=True,
98
+ # prog_bar=True,
99
+ # sync_dist=True)
100
+ #
101
+ # # get infer output
102
+ # semantic_len = batch['semantic_ids'].size(1)
103
+ # prompt_len = min(int(semantic_len * 0.5), 150)
104
+ # prompt = batch['semantic_ids'][:, :prompt_len]
105
+ # pred_semantic = self.model.infer(batch['phoneme_ids'],
106
+ # batch['phoneme_ids_len'], prompt,
107
+ # batch['bert_feature']
108
+ # )
109
+ # save_name = f'semantic_toks_{batch_idx}.pt'
110
+ # save_path = os.path.join(self.eval_dir, save_name)
111
+ # torch.save(pred_semantic.detach().cpu(), save_path)
112
+
113
+ def configure_optimizers(self):
114
+ model_parameters = self.model.parameters()
115
+ parameters_names = []
116
+ parameters_names.append(
117
+ [name_param_pair[0] for name_param_pair in self.model.named_parameters()]
118
+ )
119
+ lm_opt = ScaledAdam(
120
+ model_parameters,
121
+ lr=0.01,
122
+ betas=(0.9, 0.95),
123
+ clipping_scale=2.0,
124
+ parameters_names=parameters_names,
125
+ show_dominant_parameters=False,
126
+ clipping_update_period=1000,
127
+ )
128
+
129
+ return {
130
+ "optimizer": lm_opt,
131
+ "lr_scheduler": {
132
+ "scheduler": WarmupCosineLRSchedule(
133
+ lm_opt,
134
+ init_lr=self.config["optimizer"]["lr_init"],
135
+ peak_lr=self.config["optimizer"]["lr"],
136
+ end_lr=self.config["optimizer"]["lr_end"],
137
+ warmup_steps=self.config["optimizer"]["warmup_steps"],
138
+ total_steps=self.config["optimizer"]["decay_steps"],
139
+ )
140
+ },
141
+ }
GPT_SoVITS/AR/models/t2s_lightning_module_onnx.py ADDED
@@ -0,0 +1,107 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # modified from https://github.com/yangdongchao/SoundStorm/blob/master/soundstorm/s1/AR/models/t2s_lightning_module.py
2
+ # reference: https://github.com/lifeiteng/vall-e
3
+ import os, sys
4
+
5
+ now_dir = os.getcwd()
6
+ sys.path.append(now_dir)
7
+ from typing import Dict
8
+
9
+ import torch
10
+ from pytorch_lightning import LightningModule
11
+ from AR.models.t2s_model_onnx import Text2SemanticDecoder
12
+ from AR.modules.lr_schedulers import WarmupCosineLRSchedule
13
+ from AR.modules.optim import ScaledAdam
14
+
15
+
16
+ class Text2SemanticLightningModule(LightningModule):
17
+ def __init__(self, config, output_dir, is_train=True):
18
+ super().__init__()
19
+ self.config = config
20
+ self.top_k = 3
21
+ self.model = Text2SemanticDecoder(config=config, top_k=self.top_k)
22
+ pretrained_s1 = config.get("pretrained_s1")
23
+ if pretrained_s1 and is_train:
24
+ # print(self.load_state_dict(torch.load(pretrained_s1,map_location="cpu")["state_dict"]))
25
+ print(
26
+ self.load_state_dict(
27
+ torch.load(pretrained_s1, map_location="cpu")["weight"]
28
+ )
29
+ )
30
+ if is_train:
31
+ self.automatic_optimization = False
32
+ self.save_hyperparameters()
33
+ self.eval_dir = output_dir / "eval"
34
+ self.eval_dir.mkdir(parents=True, exist_ok=True)
35
+
36
+ def training_step(self, batch: Dict, batch_idx: int):
37
+ opt = self.optimizers()
38
+ scheduler = self.lr_schedulers()
39
+ loss, acc = self.model.forward(
40
+ batch["phoneme_ids"],
41
+ batch["phoneme_ids_len"],
42
+ batch["semantic_ids"],
43
+ batch["semantic_ids_len"],
44
+ batch["bert_feature"],
45
+ )
46
+ self.manual_backward(loss)
47
+ if batch_idx > 0 and batch_idx % 4 == 0:
48
+ opt.step()
49
+ opt.zero_grad()
50
+ scheduler.step()
51
+
52
+ self.log(
53
+ "total_loss",
54
+ loss,
55
+ on_step=True,
56
+ on_epoch=True,
57
+ prog_bar=True,
58
+ sync_dist=True,
59
+ )
60
+ self.log(
61
+ "lr",
62
+ scheduler.get_last_lr()[0],
63
+ on_epoch=True,
64
+ prog_bar=True,
65
+ sync_dist=True,
66
+ )
67
+ self.log(
68
+ f"top_{self.top_k}_acc",
69
+ acc,
70
+ on_step=True,
71
+ on_epoch=True,
72
+ prog_bar=True,
73
+ sync_dist=True,
74
+ )
75
+
76
+ def validation_step(self, batch: Dict, batch_idx: int):
77
+ return
78
+
79
+ def configure_optimizers(self):
80
+ model_parameters = self.model.parameters()
81
+ parameters_names = []
82
+ parameters_names.append(
83
+ [name_param_pair[0] for name_param_pair in self.model.named_parameters()]
84
+ )
85
+ lm_opt = ScaledAdam(
86
+ model_parameters,
87
+ lr=0.01,
88
+ betas=(0.9, 0.95),
89
+ clipping_scale=2.0,
90
+ parameters_names=parameters_names,
91
+ show_dominant_parameters=False,
92
+ clipping_update_period=1000,
93
+ )
94
+
95
+ return {
96
+ "optimizer": lm_opt,
97
+ "lr_scheduler": {
98
+ "scheduler": WarmupCosineLRSchedule(
99
+ lm_opt,
100
+ init_lr=self.config["optimizer"]["lr_init"],
101
+ peak_lr=self.config["optimizer"]["lr"],
102
+ end_lr=self.config["optimizer"]["lr_end"],
103
+ warmup_steps=self.config["optimizer"]["warmup_steps"],
104
+ total_steps=self.config["optimizer"]["decay_steps"],
105
+ )
106
+ },
107
+ }
GPT_SoVITS/AR/models/t2s_model.py ADDED
@@ -0,0 +1,876 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # modified from https://github.com/yangdongchao/SoundStorm/blob/master/soundstorm/s1/AR/models/t2s_model.py
2
+ # reference: https://github.com/lifeiteng/vall-e
3
+ import math
4
+ from typing import List, Optional
5
+ import torch
6
+ from tqdm import tqdm
7
+
8
+ from AR.models.utils import make_pad_mask
9
+ from AR.models.utils import (
10
+ topk_sampling,
11
+ sample,
12
+ logits_to_probs,
13
+ multinomial_sample_one_no_sync,
14
+ dpo_loss,
15
+ make_reject_y,
16
+ get_batch_logps
17
+ )
18
+ from AR.modules.embedding import SinePositionalEmbedding
19
+ from AR.modules.embedding import TokenEmbedding
20
+ from AR.modules.transformer import LayerNorm
21
+ from AR.modules.transformer import TransformerEncoder
22
+ from AR.modules.transformer import TransformerEncoderLayer
23
+ from torch import nn
24
+ from torch.nn import functional as F
25
+ from torchmetrics.classification import MulticlassAccuracy
26
+
27
+ default_config = {
28
+ "embedding_dim": 512,
29
+ "hidden_dim": 512,
30
+ "num_head": 8,
31
+ "num_layers": 12,
32
+ "num_codebook": 8,
33
+ "p_dropout": 0.0,
34
+ "vocab_size": 1024 + 1,
35
+ "phoneme_vocab_size": 512,
36
+ "EOS": 1024,
37
+ }
38
+
39
+ # @torch.jit.script ## 使用的话首次推理会非常慢,而且推理速度不稳定
40
+ # Efficient implementation equivalent to the following:
41
+ def scaled_dot_product_attention(query:torch.Tensor, key:torch.Tensor, value:torch.Tensor, attn_mask:Optional[torch.Tensor]=None, scale:Optional[torch.Tensor]=None) -> torch.Tensor:
42
+ B, H, L, S =query.size(0), query.size(1), query.size(-2), key.size(-2)
43
+ if scale is None:
44
+ scale_factor = torch.tensor(1 / math.sqrt(query.size(-1)))
45
+ else:
46
+ scale_factor = scale
47
+ attn_bias = torch.zeros(B, H, L, S, dtype=query.dtype, device=query.device)
48
+
49
+ if attn_mask is not None:
50
+ if attn_mask.dtype == torch.bool:
51
+ attn_bias.masked_fill_(attn_mask, float("-inf"))
52
+ else:
53
+ attn_bias += attn_mask
54
+ attn_weight = query @ key.transpose(-2, -1) * scale_factor
55
+ attn_weight += attn_bias
56
+ attn_weight = torch.softmax(attn_weight, dim=-1)
57
+
58
+ if attn_mask is not None:
59
+ if attn_mask.dtype == torch.bool:
60
+ attn_weight.masked_fill_(attn_mask, 0)
61
+ else:
62
+ attn_mask[attn_mask!=float("-inf")] =0
63
+ attn_mask[attn_mask==float("-inf")] =1
64
+ attn_weight.masked_fill_(attn_mask, 0)
65
+
66
+ return attn_weight @ value
67
+
68
+ @torch.jit.script
69
+ class T2SMLP:
70
+ def __init__(self, w1, b1, w2, b2):
71
+ self.w1 = w1
72
+ self.b1 = b1
73
+ self.w2 = w2
74
+ self.b2 = b2
75
+
76
+ def forward(self, x):
77
+ x = F.relu(F.linear(x, self.w1, self.b1))
78
+ x = F.linear(x, self.w2, self.b2)
79
+ return x
80
+
81
+
82
+ @torch.jit.script
83
+ class T2SBlock:
84
+ def __init__(
85
+ self,
86
+ num_heads,
87
+ hidden_dim: int,
88
+ mlp: T2SMLP,
89
+ qkv_w,
90
+ qkv_b,
91
+ out_w,
92
+ out_b,
93
+ norm_w1,
94
+ norm_b1,
95
+ norm_eps1,
96
+ norm_w2,
97
+ norm_b2,
98
+ norm_eps2,
99
+ ):
100
+ self.num_heads = num_heads
101
+ self.mlp = mlp
102
+ self.hidden_dim: int = hidden_dim
103
+ self.qkv_w = qkv_w
104
+ self.qkv_b = qkv_b
105
+ self.out_w = out_w
106
+ self.out_b = out_b
107
+ self.norm_w1 = norm_w1
108
+ self.norm_b1 = norm_b1
109
+ self.norm_eps1 = norm_eps1
110
+ self.norm_w2 = norm_w2
111
+ self.norm_b2 = norm_b2
112
+ self.norm_eps2 = norm_eps2
113
+
114
+ self.false = torch.tensor(False, dtype=torch.bool)
115
+
116
+ @torch.jit.ignore
117
+ def to_mask(self, x:torch.Tensor, padding_mask:Optional[torch.Tensor]):
118
+ if padding_mask is None:
119
+ return x
120
+
121
+ if padding_mask.dtype == torch.bool:
122
+ return x.masked_fill(padding_mask, 0)
123
+ else:
124
+ return x * padding_mask
125
+
126
+ def process_prompt(self, x:torch.Tensor, attn_mask : torch.Tensor, padding_mask:Optional[torch.Tensor]=None, torch_sdpa:bool=True):
127
+
128
+
129
+ q, k, v = F.linear(self.to_mask(x, padding_mask), self.qkv_w, self.qkv_b).chunk(3, dim=-1)
130
+
131
+ batch_size = q.shape[0]
132
+ q_len = q.shape[1]
133
+ kv_len = k.shape[1]
134
+
135
+ q = self.to_mask(q, padding_mask)
136
+ k_cache = self.to_mask(k, padding_mask)
137
+ v_cache = self.to_mask(v, padding_mask)
138
+
139
+ q = q.view(batch_size, q_len, self.num_heads, -1).transpose(1, 2)
140
+ k = k_cache.view(batch_size, kv_len, self.num_heads, -1).transpose(1, 2)
141
+ v = v_cache.view(batch_size, kv_len, self.num_heads, -1).transpose(1, 2)
142
+
143
+ if torch_sdpa:
144
+ attn = F.scaled_dot_product_attention(q, k, v, ~attn_mask)
145
+ else:
146
+ attn = scaled_dot_product_attention(q, k, v, attn_mask)
147
+
148
+ attn = attn.transpose(1, 2).reshape(batch_size, q_len, -1)
149
+ attn = F.linear(self.to_mask(attn, padding_mask), self.out_w, self.out_b)
150
+
151
+ x = x + attn
152
+ x = F.layer_norm(
153
+ x, [self.hidden_dim], self.norm_w1, self.norm_b1, self.norm_eps1
154
+ )
155
+ x = x + self.mlp.forward(x)
156
+ x = F.layer_norm(
157
+ x,
158
+ [self.hidden_dim],
159
+ self.norm_w2,
160
+ self.norm_b2,
161
+ self.norm_eps2,
162
+ )
163
+ return x, k_cache, v_cache
164
+
165
+ def decode_next_token(self, x:torch.Tensor, k_cache:torch.Tensor, v_cache:torch.Tensor, attn_mask:Optional[torch.Tensor]=None, torch_sdpa:bool=True):
166
+ q, k, v = F.linear(x, self.qkv_w, self.qkv_b).chunk(3, dim=-1)
167
+
168
+ k_cache = torch.cat([k_cache, k], dim=1)
169
+ v_cache = torch.cat([v_cache, v], dim=1)
170
+
171
+ batch_size = q.shape[0]
172
+ q_len = q.shape[1]
173
+ kv_len = k_cache.shape[1]
174
+
175
+ q = q.view(batch_size, q_len, self.num_heads, -1).transpose(1, 2)
176
+ k = k_cache.view(batch_size, kv_len, self.num_heads, -1).transpose(1, 2)
177
+ v = v_cache.view(batch_size, kv_len, self.num_heads, -1).transpose(1, 2)
178
+
179
+
180
+ if torch_sdpa:
181
+ attn = F.scaled_dot_product_attention(q, k, v)
182
+ else:
183
+ attn = scaled_dot_product_attention(q, k, v, attn_mask)
184
+
185
+ attn = attn.transpose(1, 2).reshape(batch_size, q_len, -1)
186
+ attn = F.linear(attn, self.out_w, self.out_b)
187
+
188
+ x = x + attn
189
+ x = F.layer_norm(
190
+ x, [self.hidden_dim], self.norm_w1, self.norm_b1, self.norm_eps1
191
+ )
192
+ x = x + self.mlp.forward(x)
193
+ x = F.layer_norm(
194
+ x,
195
+ [self.hidden_dim],
196
+ self.norm_w2,
197
+ self.norm_b2,
198
+ self.norm_eps2,
199
+ )
200
+ return x, k_cache, v_cache
201
+
202
+
203
+ @torch.jit.script
204
+ class T2STransformer:
205
+ def __init__(self, num_blocks : int, blocks: List[T2SBlock]):
206
+ self.num_blocks : int = num_blocks
207
+ self.blocks = blocks
208
+
209
+ def process_prompt(
210
+ self, x:torch.Tensor, attn_mask : torch.Tensor,
211
+ padding_mask : Optional[torch.Tensor]=None,
212
+ torch_sdpa:bool=True
213
+ ):
214
+ k_cache : List[torch.Tensor] = []
215
+ v_cache : List[torch.Tensor] = []
216
+ for i in range(self.num_blocks):
217
+ x, k_cache_, v_cache_ = self.blocks[i].process_prompt(x, attn_mask, padding_mask, torch_sdpa)
218
+ k_cache.append(k_cache_)
219
+ v_cache.append(v_cache_)
220
+ return x, k_cache, v_cache
221
+
222
+ def decode_next_token(
223
+ self, x:torch.Tensor,
224
+ k_cache: List[torch.Tensor],
225
+ v_cache: List[torch.Tensor],
226
+ attn_mask : Optional[torch.Tensor]=None,
227
+ torch_sdpa:bool=True
228
+ ):
229
+ for i in range(self.num_blocks):
230
+ x, k_cache[i], v_cache[i] = self.blocks[i].decode_next_token(x, k_cache[i], v_cache[i], attn_mask, torch_sdpa)
231
+ return x, k_cache, v_cache
232
+
233
+
234
+ class Text2SemanticDecoder(nn.Module):
235
+ def __init__(self, config, norm_first=False, top_k=3):
236
+ super(Text2SemanticDecoder, self).__init__()
237
+ self.model_dim = config["model"]["hidden_dim"]
238
+ self.embedding_dim = config["model"]["embedding_dim"]
239
+ self.num_head = config["model"]["head"]
240
+ self.num_layers = config["model"]["n_layer"]
241
+ self.norm_first = norm_first
242
+ self.vocab_size = config["model"]["vocab_size"]
243
+ self.phoneme_vocab_size = config["model"]["phoneme_vocab_size"]
244
+ self.p_dropout = config["model"]["dropout"]
245
+ self.EOS = config["model"]["EOS"]
246
+ self.norm_first = norm_first
247
+ assert self.EOS == self.vocab_size - 1
248
+ # should be same as num of kmeans bin
249
+ # assert self.EOS == 1024
250
+ self.bert_proj = nn.Linear(1024, self.embedding_dim)
251
+ self.ar_text_embedding = TokenEmbedding(
252
+ self.embedding_dim, self.phoneme_vocab_size, self.p_dropout
253
+ )
254
+ self.ar_text_position = SinePositionalEmbedding(
255
+ self.embedding_dim, dropout=0.1, scale=False, alpha=True
256
+ )
257
+ self.ar_audio_embedding = TokenEmbedding(
258
+ self.embedding_dim, self.vocab_size, self.p_dropout
259
+ )
260
+ self.ar_audio_position = SinePositionalEmbedding(
261
+ self.embedding_dim, dropout=0.1, scale=False, alpha=True
262
+ )
263
+
264
+ self.h = TransformerEncoder(
265
+ TransformerEncoderLayer(
266
+ d_model=self.model_dim,
267
+ nhead=self.num_head,
268
+ dim_feedforward=self.model_dim * 4,
269
+ dropout=0.1,
270
+ batch_first=True,
271
+ norm_first=norm_first,
272
+ ),
273
+ num_layers=self.num_layers,
274
+ norm=LayerNorm(self.model_dim) if norm_first else None,
275
+ )
276
+
277
+ self.ar_predict_layer = nn.Linear(self.model_dim, self.vocab_size, bias=False)
278
+ self.loss_fct = nn.CrossEntropyLoss(reduction="sum")
279
+
280
+ self.ar_accuracy_metric = MulticlassAccuracy(
281
+ self.vocab_size,
282
+ top_k=top_k,
283
+ average="micro",
284
+ multidim_average="global",
285
+ ignore_index=self.EOS,
286
+ )
287
+
288
+ blocks = []
289
+
290
+ for i in range(self.num_layers):
291
+ layer = self.h.layers[i]
292
+ t2smlp = T2SMLP(
293
+ layer.linear1.weight,
294
+ layer.linear1.bias,
295
+ layer.linear2.weight,
296
+ layer.linear2.bias
297
+ )
298
+
299
+ block = T2SBlock(
300
+ self.num_head,
301
+ self.model_dim,
302
+ t2smlp,
303
+ layer.self_attn.in_proj_weight,
304
+ layer.self_attn.in_proj_bias,
305
+ layer.self_attn.out_proj.weight,
306
+ layer.self_attn.out_proj.bias,
307
+ layer.norm1.weight,
308
+ layer.norm1.bias,
309
+ layer.norm1.eps,
310
+ layer.norm2.weight,
311
+ layer.norm2.bias,
312
+ layer.norm2.eps
313
+ )
314
+
315
+ blocks.append(block)
316
+
317
+ self.t2s_transformer = T2STransformer(self.num_layers, blocks)
318
+
319
+ def make_input_data(self, x, x_lens, y, y_lens, bert_feature):
320
+ x = self.ar_text_embedding(x)
321
+ x = x + self.bert_proj(bert_feature.transpose(1, 2))
322
+ x = self.ar_text_position(x)
323
+ x_mask = make_pad_mask(x_lens)
324
+
325
+ y_mask = make_pad_mask(y_lens)
326
+ y_mask_int = y_mask.type(torch.int64)
327
+ codes = y.type(torch.int64) * (1 - y_mask_int)
328
+
329
+ # Training
330
+ # AR Decoder
331
+ y, targets = self.pad_y_eos(codes, y_mask_int, eos_id=self.EOS)
332
+ x_len = x_lens.max()
333
+ y_len = y_lens.max()
334
+ y_emb = self.ar_audio_embedding(y)
335
+ y_pos = self.ar_audio_position(y_emb)
336
+
337
+ xy_padding_mask = torch.concat([x_mask, y_mask], dim=1)
338
+
339
+ ar_xy_padding_mask = xy_padding_mask
340
+
341
+ x_attn_mask = F.pad(
342
+ torch.zeros((x_len, x_len), dtype=torch.bool, device=x.device),
343
+ (0, y_len),
344
+ value=True,
345
+ )
346
+ # x_attn_mask[:, x_len]=False
347
+ y_attn_mask = F.pad(
348
+ torch.triu(
349
+ torch.ones(y_len, y_len, dtype=torch.bool, device=x.device),
350
+ diagonal=1,
351
+ ),
352
+ (x_len, 0),
353
+ value=False,
354
+ )
355
+
356
+ xy_attn_mask = torch.concat([x_attn_mask, y_attn_mask], dim=0)
357
+ bsz, src_len = x.shape[0], x_len + y_len
358
+ _xy_padding_mask = (
359
+ ar_xy_padding_mask.view(bsz, 1, 1, src_len)
360
+ .expand(-1, self.num_head, -1, -1)
361
+ .reshape(bsz * self.num_head, 1, src_len)
362
+ )
363
+ xy_attn_mask = xy_attn_mask.logical_or(_xy_padding_mask)
364
+ new_attn_mask = torch.zeros_like(xy_attn_mask, dtype=x.dtype)
365
+ new_attn_mask.masked_fill_(xy_attn_mask, float("-inf"))
366
+ xy_attn_mask = new_attn_mask
367
+ # x 和完整的 y 一次性输入模型
368
+ xy_pos = torch.concat([x, y_pos], dim=1)
369
+
370
+ return xy_pos, xy_attn_mask, targets
371
+
372
+ def forward(self, x, x_lens, y, y_lens, bert_feature):
373
+ """
374
+ x: phoneme_ids
375
+ y: semantic_ids
376
+ """
377
+
378
+ reject_y, reject_y_lens = make_reject_y(y, y_lens)
379
+
380
+ xy_pos, xy_attn_mask, targets = self.make_input_data(x, x_lens, y, y_lens, bert_feature)
381
+
382
+ xy_dec, _ = self.h(
383
+ (xy_pos, None),
384
+ mask=xy_attn_mask,
385
+ )
386
+ x_len = x_lens.max()
387
+ logits = self.ar_predict_layer(xy_dec[:, x_len:])
388
+
389
+ ###### DPO #############
390
+ reject_xy_pos, reject_xy_attn_mask, reject_targets = self.make_input_data(x, x_lens, reject_y, reject_y_lens, bert_feature)
391
+
392
+ reject_xy_dec, _ = self.h(
393
+ (reject_xy_pos, None),
394
+ mask=reject_xy_attn_mask,
395
+ )
396
+ x_len = x_lens.max()
397
+ reject_logits = self.ar_predict_layer(reject_xy_dec[:, x_len:])
398
+
399
+ # loss
400
+ # from feiteng: 每次 duration 越多, 梯度更新也应该更多, 所以用 sum
401
+
402
+ loss_1 = F.cross_entropy(logits.permute(0, 2, 1), targets, reduction="sum")
403
+ acc = self.ar_accuracy_metric(logits.permute(0, 2, 1).detach(), targets).item()
404
+
405
+ A_logits, R_logits = get_batch_logps(logits, reject_logits, targets, reject_targets)
406
+ loss_2, _, _ = dpo_loss(A_logits, R_logits, 0, 0, 0.2, reference_free=True)
407
+
408
+ loss = loss_1 + loss_2
409
+
410
+ return loss, acc
411
+
412
+ def forward_old(self, x, x_lens, y, y_lens, bert_feature):
413
+ """
414
+ x: phoneme_ids
415
+ y: semantic_ids
416
+ """
417
+ x = self.ar_text_embedding(x)
418
+ x = x + self.bert_proj(bert_feature.transpose(1, 2))
419
+ x = self.ar_text_position(x)
420
+ x_mask = make_pad_mask(x_lens)
421
+
422
+ y_mask = make_pad_mask(y_lens)
423
+ y_mask_int = y_mask.type(torch.int64)
424
+ codes = y.type(torch.int64) * (1 - y_mask_int)
425
+
426
+ # Training
427
+ # AR Decoder
428
+ y, targets = self.pad_y_eos(codes, y_mask_int, eos_id=self.EOS)
429
+ x_len = x_lens.max()
430
+ y_len = y_lens.max()
431
+ y_emb = self.ar_audio_embedding(y)
432
+ y_pos = self.ar_audio_position(y_emb)
433
+
434
+ xy_padding_mask = torch.concat([x_mask, y_mask], dim=1)
435
+ ar_xy_padding_mask = xy_padding_mask
436
+
437
+ x_attn_mask = F.pad(
438
+ torch.zeros((x_len, x_len), dtype=torch.bool, device=x.device),
439
+ (0, y_len),
440
+ value=True,
441
+ )
442
+ y_attn_mask = F.pad(
443
+ torch.triu(
444
+ torch.ones(y_len, y_len, dtype=torch.bool, device=x.device),
445
+ diagonal=1,
446
+ ),
447
+ (x_len, 0),
448
+ value=False,
449
+ )
450
+ xy_attn_mask = torch.concat([x_attn_mask, y_attn_mask], dim=0)
451
+ bsz, src_len = x.shape[0], x_len + y_len
452
+ _xy_padding_mask = (
453
+ ar_xy_padding_mask.view(bsz, 1, 1, src_len)
454
+ .expand(-1, self.num_head, -1, -1)
455
+ .reshape(bsz * self.num_head, 1, src_len)
456
+ )
457
+ xy_attn_mask = xy_attn_mask.logical_or(_xy_padding_mask)
458
+ new_attn_mask = torch.zeros_like(xy_attn_mask, dtype=x.dtype)
459
+ new_attn_mask.masked_fill_(xy_attn_mask, float("-inf"))
460
+ xy_attn_mask = new_attn_mask
461
+ # x 和完整的 y 一次性输入模型
462
+ xy_pos = torch.concat([x, y_pos], dim=1)
463
+ xy_dec, _ = self.h(
464
+ (xy_pos, None),
465
+ mask=xy_attn_mask,
466
+ )
467
+ logits = self.ar_predict_layer(xy_dec[:, x_len:]).permute(0, 2, 1)
468
+ # loss
469
+ # from feiteng: 每次 duration 越多, 梯度更新也应该更多, 所以用 sum
470
+ loss = F.cross_entropy(logits, targets, reduction="sum")
471
+ acc = self.ar_accuracy_metric(logits.detach(), targets).item()
472
+ return loss, acc
473
+
474
+ # 需要看下这个函数和 forward 的区别以及没有 semantic 的时候 prompts 输入什么
475
+ def infer(
476
+ self,
477
+ x,
478
+ x_lens,
479
+ prompts,
480
+ bert_feature,
481
+ top_k: int = -100,
482
+ early_stop_num: int = -1,
483
+ temperature: float = 1.0,
484
+ ):
485
+ x = self.ar_text_embedding(x)
486
+ x = x + self.bert_proj(bert_feature.transpose(1, 2))
487
+ x = self.ar_text_position(x)
488
+
489
+ # AR Decoder
490
+ y = prompts
491
+ prefix_len = y.shape[1]
492
+ x_len = x.shape[1]
493
+ x_attn_mask = torch.zeros((x_len, x_len), dtype=torch.bool)
494
+ stop = False
495
+ for _ in tqdm(range(1500)):
496
+ y_emb = self.ar_audio_embedding(y)
497
+ y_pos = self.ar_audio_position(y_emb)
498
+ # x 和逐渐增长的 y 一起输入给模型
499
+ xy_pos = torch.concat([x, y_pos], dim=1)
500
+ y_len = y.shape[1]
501
+ x_attn_mask_pad = F.pad(
502
+ x_attn_mask,
503
+ (0, y_len),
504
+ value=True,
505
+ )
506
+ y_attn_mask = F.pad(
507
+ torch.triu(torch.ones(y_len, y_len, dtype=torch.bool), diagonal=1),
508
+ (x_len, 0),
509
+ value=False,
510
+ )
511
+ xy_attn_mask = torch.concat([x_attn_mask_pad, y_attn_mask], dim=0).to(
512
+ y.device
513
+ )
514
+
515
+ xy_dec, _ = self.h(
516
+ (xy_pos, None),
517
+ mask=xy_attn_mask,
518
+ )
519
+ logits = self.ar_predict_layer(xy_dec[:, -1])
520
+ samples = topk_sampling(
521
+ logits, top_k=top_k, top_p=1.0, temperature=temperature
522
+ )
523
+
524
+ if early_stop_num != -1 and (y.shape[1] - prefix_len) > early_stop_num:
525
+ print("use early stop num:", early_stop_num)
526
+ stop = True
527
+
528
+ if torch.argmax(logits, dim=-1)[0] == self.EOS or samples[0, 0] == self.EOS:
529
+ # print(torch.argmax(logits, dim=-1)[0] == self.EOS, samples[0, 0] == self.EOS)
530
+ stop = True
531
+ if stop:
532
+ if prompts.shape[1] == y.shape[1]:
533
+ y = torch.concat([y, torch.zeros_like(samples)], dim=1)
534
+ print("bad zero prediction")
535
+ print(f"T2S Decoding EOS [{prefix_len} -> {y.shape[1]}]")
536
+ break
537
+ # 本次生成的 semantic_ids 和之前的 y 构成新的 y
538
+ # print(samples.shape)#[1,1]#第一个1是bs
539
+ # import os
540
+ # os._exit(2333)
541
+ y = torch.concat([y, samples], dim=1)
542
+ return y
543
+
544
+ def pad_y_eos(self, y, y_mask_int, eos_id):
545
+ targets = F.pad(y, (0, 1), value=0) + eos_id * F.pad(
546
+ y_mask_int, (0, 1), value=1
547
+ )
548
+ # 错位
549
+ return targets[:, :-1], targets[:, 1:]
550
+
551
+ def infer_panel_batch_infer(
552
+ self,
553
+ x:List[torch.LongTensor], #####全部文本token
554
+ x_lens:torch.LongTensor,
555
+ prompts:torch.LongTensor, ####参考音频token
556
+ bert_feature:List[torch.LongTensor],
557
+ top_k: int = -100,
558
+ top_p: int = 100,
559
+ early_stop_num: int = -1,
560
+ temperature: float = 1.0,
561
+ repetition_penalty: float = 1.35,
562
+ **kwargs,
563
+ ):
564
+ if prompts is None:
565
+ print("Warning: Prompt free is not supported batch_infer! switch to naive_infer")
566
+ return self.infer_panel_naive_batched(x, x_lens, prompts, bert_feature, top_k=top_k, top_p=top_p, early_stop_num=early_stop_num, temperature=temperature, **kwargs)
567
+
568
+
569
+ max_len = kwargs.get("max_len",x_lens.max())
570
+ x_list = []
571
+ for x_item, bert_item in zip(x, bert_feature):
572
+ # max_len = max(max_len, x_item.shape[0], bert_item.shape[1])
573
+ x_item = self.ar_text_embedding(x_item.unsqueeze(0))
574
+ x_item = x_item + self.bert_proj(bert_item.transpose(0, 1).unsqueeze(0))
575
+ x_item = self.ar_text_position(x_item).squeeze(0)
576
+ x_item = F.pad(x_item,(0,0,0,max_len-x_item.shape[0]),value=0) if x_item.shape[0]<max_len else x_item
577
+ x_list.append(x_item)
578
+ x = torch.stack(x_list, dim=0)
579
+
580
+
581
+ # AR Decoder
582
+ y = prompts
583
+
584
+ x_len = x.shape[1]
585
+ x_attn_mask = torch.zeros((x_len, x_len), dtype=torch.bool)
586
+ stop = False
587
+
588
+ k_cache = None
589
+ v_cache = None
590
+ ################### first step ##########################
591
+ if y is not None:
592
+ y_emb = self.ar_audio_embedding(y)
593
+ y_len = y_emb.shape[1]
594
+ prefix_len = y.shape[1]
595
+ y_lens = torch.LongTensor([y_emb.shape[1]]*y_emb.shape[0]).to(x.device)
596
+ y_pos = self.ar_audio_position(y_emb)
597
+ xy_pos = torch.concat([x, y_pos], dim=1)
598
+ ref_free = False
599
+ else:
600
+ y_emb = None
601
+ y_len = 0
602
+ prefix_len = 0
603
+ y_lens = torch.LongTensor([y_len]*x.shape[0]).to(x.device)
604
+ y_pos = None
605
+ xy_pos = x
606
+ y = torch.zeros(x.shape[0], 0, dtype=torch.int, device=x.device)
607
+ ref_free = True
608
+
609
+
610
+ ##### create mask #####
611
+ bsz = x.shape[0]
612
+ src_len = x_len + y_len
613
+ y_paddind_mask = make_pad_mask(y_lens, y_len)
614
+ x_paddind_mask = make_pad_mask(x_lens, max_len)
615
+
616
+ # (bsz, x_len + y_len)
617
+ xy_padding_mask = torch.concat([x_paddind_mask, y_paddind_mask], dim=1)
618
+
619
+ x_mask = F.pad(
620
+ x_attn_mask,
621
+ (0, y_len), ###xx的纯0扩展到xx纯0+xy纯1,(x,x+y)
622
+ value=True,
623
+ )
624
+ y_mask = F.pad( ###yy的右上1扩展到左边xy的0,(y,x+y)
625
+ torch.triu(torch.ones(y_len, y_len, dtype=torch.bool), diagonal=1),
626
+ (x_len, 0),
627
+ value=False,
628
+ )
629
+
630
+ xy_mask = torch.concat([x_mask, y_mask], dim=0).view(1 , src_len, src_len).repeat(bsz, 1, 1).to(x.device)
631
+ _xy_padding_mask = xy_padding_mask.view(bsz, 1, src_len).repeat(1, src_len, 1)
632
+
633
+ for i in range(bsz):
634
+ l = x_lens[i]
635
+ _xy_padding_mask[i,l:max_len,:]=True
636
+
637
+ xy_attn_mask = xy_mask.logical_or(_xy_padding_mask)
638
+ xy_attn_mask = xy_attn_mask.unsqueeze(1).expand(-1, self.num_head, -1, -1)
639
+ xy_attn_mask = xy_attn_mask.bool()
640
+ xy_padding_mask = xy_padding_mask.view(bsz, src_len, 1)
641
+
642
+ ###### decode #####
643
+ y_list = [None]*y.shape[0]
644
+ batch_idx_map = list(range(y.shape[0]))
645
+ idx_list = [None]*y.shape[0]
646
+ for idx in tqdm(range(1500)):
647
+ if idx == 0:
648
+ xy_dec, k_cache, v_cache = self.t2s_transformer.process_prompt(xy_pos, xy_attn_mask, xy_padding_mask, False)
649
+ else:
650
+ xy_dec, k_cache, v_cache = self.t2s_transformer.decode_next_token(xy_pos, k_cache, v_cache, xy_attn_mask, False)
651
+ logits = self.ar_predict_layer(
652
+ xy_dec[:, -1]
653
+ )
654
+
655
+ if idx == 0:
656
+ xy_attn_mask = F.pad(xy_attn_mask[:,:,-1].unsqueeze(-2),(0,1),value=False)
657
+ logits = logits[:, :-1]
658
+ else:
659
+ xy_attn_mask = F.pad(xy_attn_mask,(0,1),value=False)
660
+
661
+ samples = sample(
662
+ logits, y, top_k=top_k, top_p=top_p, repetition_penalty=repetition_penalty, temperature=temperature
663
+ )[0]
664
+
665
+ y = torch.concat([y, samples], dim=1)
666
+
667
+ ####### 移除batch中已经生成完毕的序列,进一步优化计算量
668
+ tokens = torch.argmax(logits, dim=-1)
669
+ reserved_idx_of_batch_for_y = None
670
+ if (self.EOS in samples[:, 0]) or \
671
+ (self.EOS in tokens): ###如果生成到EOS,则停止
672
+ l1 = samples[:, 0]==self.EOS
673
+ l2 = tokens==self.EOS
674
+ l = l1.logical_or(l2)
675
+ removed_idx_of_batch_for_y = torch.where(l==True)[0].tolist()
676
+ reserved_idx_of_batch_for_y = torch.where(l==False)[0]
677
+ # batch_indexs = torch.tensor(batch_idx_map, device=y.device)[removed_idx_of_batch_for_y]
678
+ for i in removed_idx_of_batch_for_y:
679
+ batch_index = batch_idx_map[i]
680
+ idx_list[batch_index] = idx - 1
681
+ y_list[batch_index] = y[i, :-1]
682
+
683
+ batch_idx_map = [batch_idx_map[i] for i in reserved_idx_of_batch_for_y.tolist()]
684
+
685
+ # 只保留batch中未生成完毕的序列
686
+ if reserved_idx_of_batch_for_y is not None:
687
+ # index = torch.LongTensor(batch_idx_map).to(y.device)
688
+ y = torch.index_select(y, dim=0, index=reserved_idx_of_batch_for_y)
689
+ xy_attn_mask = torch.index_select(xy_attn_mask, dim=0, index=reserved_idx_of_batch_for_y)
690
+ if k_cache is not None :
691
+ for i in range(len(k_cache)):
692
+ k_cache[i] = torch.index_select(k_cache[i], dim=0, index=reserved_idx_of_batch_for_y)
693
+ v_cache[i] = torch.index_select(v_cache[i], dim=0, index=reserved_idx_of_batch_for_y)
694
+
695
+
696
+ if (early_stop_num != -1 and (y.shape[1] - prefix_len) > early_stop_num) or idx==1499:
697
+ print("use early stop num:", early_stop_num)
698
+ stop = True
699
+ for i, batch_index in enumerate(batch_idx_map):
700
+ batch_index = batch_idx_map[i]
701
+ idx_list[batch_index] = idx
702
+ y_list[batch_index] = y[i, :-1]
703
+
704
+ if not (None in idx_list):
705
+ stop = True
706
+
707
+ if stop:
708
+ if y.shape[1]==0:
709
+ y = torch.concat([y, torch.zeros_like(samples)], dim=1)
710
+ print("bad zero prediction")
711
+ print(f"T2S Decoding EOS [{prefix_len} -> {y.shape[1]}]")
712
+ break
713
+
714
+ ####################### update next step ###################################
715
+ y_emb = self.ar_audio_embedding(y[:, -1:])
716
+ xy_pos = y_emb * self.ar_audio_position.x_scale + self.ar_audio_position.alpha * self.ar_audio_position.pe[:, y_len + idx].to( dtype= y_emb.dtype,device=y_emb.device)
717
+
718
+ if (None in idx_list):
719
+ for i in range(x.shape[0]):
720
+ if idx_list[i] is None:
721
+ idx_list[i] = 1500-1 ###如果没有生成到EOS,就用最大长度代替
722
+
723
+ if ref_free:
724
+ return y_list, [0]*x.shape[0]
725
+ # print(idx_list)
726
+ return y_list, idx_list
727
+
728
+ def infer_panel_naive_batched(self,
729
+ x:List[torch.LongTensor], #####全部文本token
730
+ x_lens:torch.LongTensor,
731
+ prompts:torch.LongTensor, ####参考音频token
732
+ bert_feature:List[torch.LongTensor],
733
+ top_k: int = -100,
734
+ top_p: int = 100,
735
+ early_stop_num: int = -1,
736
+ temperature: float = 1.0,
737
+ repetition_penalty: float = 1.35,
738
+ **kwargs
739
+ ):
740
+ y_list = []
741
+ idx_list = []
742
+ for i in range(len(x)):
743
+ y, idx = self.infer_panel_naive(x[i].unsqueeze(0),
744
+ x_lens[i],
745
+ prompts[i].unsqueeze(0) if prompts is not None else None,
746
+ bert_feature[i].unsqueeze(0),
747
+ top_k,
748
+ top_p,
749
+ early_stop_num,
750
+ temperature,
751
+ repetition_penalty,
752
+ **kwargs)
753
+ y_list.append(y[0])
754
+ idx_list.append(idx)
755
+
756
+ return y_list, idx_list
757
+
758
+ def infer_panel_naive(
759
+ self,
760
+ x:torch.LongTensor, #####全部文本token
761
+ x_lens:torch.LongTensor,
762
+ prompts:torch.LongTensor, ####参考音频token
763
+ bert_feature:torch.LongTensor,
764
+ top_k: int = -100,
765
+ top_p: int = 100,
766
+ early_stop_num: int = -1,
767
+ temperature: float = 1.0,
768
+ repetition_penalty: float = 1.35,
769
+ **kwargs
770
+ ):
771
+ x = self.ar_text_embedding(x)
772
+ x = x + self.bert_proj(bert_feature.transpose(1, 2))
773
+ x = self.ar_text_position(x)
774
+
775
+ # AR Decoder
776
+ y = prompts
777
+
778
+ x_len = x.shape[1]
779
+ x_attn_mask = torch.zeros((x_len, x_len), dtype=torch.bool)
780
+ stop = False
781
+ # print(1111111,self.num_layers)
782
+
783
+ k_cache = None
784
+ v_cache = None
785
+ ################### first step ##########################
786
+ if y is not None:
787
+ y_emb = self.ar_audio_embedding(y)
788
+ y_len = y_emb.shape[1]
789
+ prefix_len = y.shape[1]
790
+ y_pos = self.ar_audio_position(y_emb)
791
+ xy_pos = torch.concat([x, y_pos], dim=1)
792
+ ref_free = False
793
+ else:
794
+ y_emb = None
795
+ y_len = 0
796
+ prefix_len = 0
797
+ y_pos = None
798
+ xy_pos = x
799
+ y = torch.zeros(x.shape[0], 0, dtype=torch.int, device=x.device)
800
+ ref_free = True
801
+
802
+ bsz = x.shape[0]
803
+ src_len = x_len + y_len
804
+ x_attn_mask_pad = F.pad(
805
+ x_attn_mask,
806
+ (0, y_len), ###xx的纯0扩展到xx纯0+xy纯1,(x,x+y)
807
+ value=True,
808
+ )
809
+ y_attn_mask = F.pad( ###yy的右上1扩展到左边xy的0,(y,x+y)
810
+ torch.triu(torch.ones(y_len, y_len, dtype=torch.bool), diagonal=1),
811
+ (x_len, 0),
812
+ value=False,
813
+ )
814
+ xy_attn_mask = torch.concat([x_attn_mask_pad, y_attn_mask], dim=0)\
815
+ .unsqueeze(0)\
816
+ .expand(bsz*self.num_head, -1, -1)\
817
+ .view(bsz, self.num_head, src_len, src_len)\
818
+ .to(device=x.device, dtype=torch.bool)
819
+
820
+ for idx in tqdm(range(1500)):
821
+ if xy_attn_mask is not None:
822
+ xy_dec, k_cache, v_cache = self.t2s_transformer.process_prompt(xy_pos, xy_attn_mask, None)
823
+ else:
824
+ xy_dec, k_cache, v_cache = self.t2s_transformer.decode_next_token(xy_pos, k_cache, v_cache)
825
+
826
+ logits = self.ar_predict_layer(
827
+ xy_dec[:, -1]
828
+ )
829
+
830
+ if idx == 0:
831
+ xy_attn_mask = None
832
+ if(idx<11):###至少预测出10个token不然不给停止(0.4s)
833
+ logits = logits[:, :-1]
834
+
835
+ samples = sample(
836
+ logits, y, top_k=top_k, top_p=top_p, repetition_penalty=repetition_penalty, temperature=temperature
837
+ )[0]
838
+
839
+ y = torch.concat([y, samples], dim=1)
840
+
841
+ if early_stop_num != -1 and (y.shape[1] - prefix_len) > early_stop_num:
842
+ print("use early stop num:", early_stop_num)
843
+ stop = True
844
+
845
+ if torch.argmax(logits, dim=-1)[0] == self.EOS or samples[0, 0] == self.EOS:
846
+ stop = True
847
+ if stop:
848
+ if y.shape[1] == 0:
849
+ y = torch.concat([y, torch.zeros_like(samples)], dim=1)
850
+ print("bad zero prediction")
851
+ print(f"T2S Decoding EOS [{prefix_len} -> {y.shape[1]}]")
852
+ break
853
+
854
+ ####################### update next step ###################################
855
+ y_emb = self.ar_audio_embedding(y[:, -1:])
856
+ xy_pos = y_emb * self.ar_audio_position.x_scale + self.ar_audio_position.alpha * self.ar_audio_position.pe[:, y_len + idx].to(dtype=y_emb.dtype,device=y_emb.device)
857
+
858
+ if ref_free:
859
+ return y[:, :-1], 0
860
+ return y[:, :-1], idx - 1
861
+
862
+
863
+ def infer_panel(
864
+ self,
865
+ x:torch.LongTensor, #####全部文本token
866
+ x_lens:torch.LongTensor,
867
+ prompts:torch.LongTensor, ####参考音频token
868
+ bert_feature:torch.LongTensor,
869
+ top_k: int = -100,
870
+ top_p: int = 100,
871
+ early_stop_num: int = -1,
872
+ temperature: float = 1.0,
873
+ repetition_penalty: float = 1.35,
874
+ **kwargs
875
+ ):
876
+ return self.infer_panel_naive(x, x_lens, prompts, bert_feature, top_k, top_p, early_stop_num, temperature, repetition_penalty, **kwargs)
GPT_SoVITS/AR/models/t2s_model_onnx.py ADDED
@@ -0,0 +1,338 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # modified from https://github.com/yangdongchao/SoundStorm/blob/master/soundstorm/s1/AR/models/t2s_model.py
2
+ # reference: https://github.com/lifeiteng/vall-e
3
+ import torch
4
+ from tqdm import tqdm
5
+
6
+ from AR.modules.embedding_onnx import SinePositionalEmbedding
7
+ from AR.modules.embedding_onnx import TokenEmbedding
8
+ from AR.modules.transformer_onnx import LayerNorm
9
+ from AR.modules.transformer_onnx import TransformerEncoder
10
+ from AR.modules.transformer_onnx import TransformerEncoderLayer
11
+ from torch import nn
12
+ from torch.nn import functional as F
13
+ from torchmetrics.classification import MulticlassAccuracy
14
+
15
+ default_config = {
16
+ "embedding_dim": 512,
17
+ "hidden_dim": 512,
18
+ "num_head": 8,
19
+ "num_layers": 12,
20
+ "num_codebook": 8,
21
+ "p_dropout": 0.0,
22
+ "vocab_size": 1024 + 1,
23
+ "phoneme_vocab_size": 512,
24
+ "EOS": 1024,
25
+ }
26
+
27
+ inf_tensor_value = torch.FloatTensor([-float("Inf")]).float()
28
+
29
+ def logits_to_probs(
30
+ logits,
31
+ previous_tokens = None,
32
+ temperature: float = 1.0,
33
+ top_k = None,
34
+ top_p = None,
35
+ repetition_penalty: float = 1.0,
36
+ ):
37
+ previous_tokens = previous_tokens.squeeze()
38
+ if previous_tokens is not None and repetition_penalty != 1.0:
39
+ previous_tokens = previous_tokens.long()
40
+ score = torch.gather(logits, dim=0, index=previous_tokens)
41
+ score = torch.where(
42
+ score < 0, score * repetition_penalty, score / repetition_penalty
43
+ )
44
+ logits.scatter_(dim=0, index=previous_tokens, src=score)
45
+
46
+ if top_p is not None and top_p < 1.0:
47
+ sorted_logits, sorted_indices = torch.sort(logits, descending=True)
48
+ cum_probs = torch.cumsum(
49
+ torch.nn.functional.softmax(sorted_logits, dim=-1), dim=-1
50
+ )
51
+ sorted_indices_to_remove = cum_probs > top_p
52
+ sorted_indices_to_remove[0] = False # keep at least one option
53
+ indices_to_remove = sorted_indices_to_remove.scatter(
54
+ dim=0, index=sorted_indices, src=sorted_indices_to_remove
55
+ )
56
+ logits = logits.masked_fill(indices_to_remove, -float("Inf"))
57
+
58
+ logits = logits / max(temperature, 1e-5)
59
+
60
+ if top_k is not None:
61
+ v, _ = torch.topk(logits, top_k)
62
+ pivot = v.select(-1, -1).unsqueeze(-1)
63
+ logits = torch.where(logits < pivot, inf_tensor_value, logits)
64
+
65
+ probs = torch.nn.functional.softmax(logits, dim=-1)
66
+ return probs
67
+
68
+
69
+ def multinomial_sample_one_no_sync(
70
+ probs_sort
71
+ ): # Does multinomial sampling without a cuda synchronization
72
+ q = torch.randn_like(probs_sort)
73
+ return torch.argmax(probs_sort / q, dim=-1, keepdim=True).to(dtype=torch.int)
74
+
75
+
76
+ def sample(
77
+ logits,
78
+ previous_tokens,
79
+ **sampling_kwargs,
80
+ ):
81
+ probs = logits_to_probs(
82
+ logits=logits, previous_tokens=previous_tokens, **sampling_kwargs
83
+ )
84
+ idx_next = multinomial_sample_one_no_sync(probs)
85
+ return idx_next, probs
86
+
87
+
88
+ class OnnxEncoder(nn.Module):
89
+ def __init__(self, ar_text_embedding, bert_proj, ar_text_position):
90
+ super().__init__()
91
+ self.ar_text_embedding = ar_text_embedding
92
+ self.bert_proj = bert_proj
93
+ self.ar_text_position = ar_text_position
94
+
95
+ def forward(self, x, bert_feature):
96
+ x = self.ar_text_embedding(x)
97
+ x = x + self.bert_proj(bert_feature.transpose(1, 2))
98
+ return self.ar_text_position(x)
99
+
100
+
101
+ class T2SFirstStageDecoder(nn.Module):
102
+ def __init__(self, ar_audio_embedding, ar_audio_position, h, ar_predict_layer, loss_fct, ar_accuracy_metric,
103
+ top_k, early_stop_num, num_layers):
104
+ super().__init__()
105
+ self.ar_audio_embedding = ar_audio_embedding
106
+ self.ar_audio_position = ar_audio_position
107
+ self.h = h
108
+ self.ar_predict_layer = ar_predict_layer
109
+ self.loss_fct = loss_fct
110
+ self.ar_accuracy_metric = ar_accuracy_metric
111
+ self.top_k = top_k
112
+ self.early_stop_num = early_stop_num
113
+ self.num_layers = num_layers
114
+
115
+ def forward(self, x, prompt):
116
+ y = prompt
117
+ x_example = x[:,:,0] * 0.0
118
+ #N, 1, 512
119
+ cache = {
120
+ "all_stage": self.num_layers,
121
+ "k": None,
122
+ "v": None,
123
+ "y_emb": None,
124
+ "first_infer": 1,
125
+ "stage": 0,
126
+ }
127
+
128
+ y_emb = self.ar_audio_embedding(y)
129
+
130
+ cache["y_emb"] = y_emb
131
+ y_pos = self.ar_audio_position(y_emb)
132
+
133
+ xy_pos = torch.concat([x, y_pos], dim=1)
134
+
135
+ y_example = y_pos[:,:,0] * 0.0
136
+ x_attn_mask = torch.matmul(x_example.transpose(0, 1) , x_example).bool()
137
+ y_attn_mask = torch.ones_like(torch.matmul(y_example.transpose(0, 1), y_example), dtype=torch.int64)
138
+ y_attn_mask = torch.cumsum(y_attn_mask, dim=1) - torch.cumsum(
139
+ torch.ones_like(y_example.transpose(0, 1), dtype=torch.int64), dim=0
140
+ )
141
+ y_attn_mask = y_attn_mask > 0
142
+
143
+ x_y_pad = torch.matmul(x_example.transpose(0, 1), y_example).bool()
144
+ y_x_pad = torch.matmul(y_example.transpose(0, 1), x_example).bool()
145
+ x_attn_mask_pad = torch.cat([x_attn_mask, torch.ones_like(x_y_pad)], dim=1)
146
+ y_attn_mask = torch.cat([y_x_pad, y_attn_mask], dim=1)
147
+ xy_attn_mask = torch.concat([x_attn_mask_pad, y_attn_mask], dim=0)
148
+ cache["k"] = torch.matmul(x_attn_mask_pad[0].float().unsqueeze(-1), torch.zeros((1, 512)))\
149
+ .unsqueeze(1).repeat(self.num_layers, 1, 1, 1)
150
+ cache["v"] = torch.matmul(x_attn_mask_pad[0].float().unsqueeze(-1), torch.zeros((1, 512)))\
151
+ .unsqueeze(1).repeat(self.num_layers, 1, 1, 1)
152
+
153
+ xy_dec = self.h(xy_pos, mask=xy_attn_mask, cache=cache)
154
+ logits = self.ar_predict_layer(xy_dec[:, -1])
155
+ samples = sample(logits[0], y, top_k=self.top_k, top_p=1.0, repetition_penalty=1.35)[0].unsqueeze(0)
156
+
157
+ y = torch.concat([y, samples], dim=1)
158
+
159
+ return y, cache["k"], cache["v"], cache["y_emb"], x_example
160
+
161
+
162
+ class T2SStageDecoder(nn.Module):
163
+ def __init__(self, ar_audio_embedding, ar_audio_position, h, ar_predict_layer, loss_fct, ar_accuracy_metric,
164
+ top_k, early_stop_num, num_layers):
165
+ super().__init__()
166
+ self.ar_audio_embedding = ar_audio_embedding
167
+ self.ar_audio_position = ar_audio_position
168
+ self.h = h
169
+ self.ar_predict_layer = ar_predict_layer
170
+ self.loss_fct = loss_fct
171
+ self.ar_accuracy_metric = ar_accuracy_metric
172
+ self.top_k = top_k
173
+ self.early_stop_num = early_stop_num
174
+ self.num_layers = num_layers
175
+
176
+ def forward(self, y, k, v, y_emb, x_example):
177
+ cache = {
178
+ "all_stage": self.num_layers,
179
+ "k": torch.nn.functional.pad(k, (0, 0, 0, 0, 0, 1)),
180
+ "v": torch.nn.functional.pad(v, (0, 0, 0, 0, 0, 1)),
181
+ "y_emb": y_emb,
182
+ "first_infer": 0,
183
+ "stage": 0,
184
+ }
185
+
186
+ y_emb = torch.cat(
187
+ [cache["y_emb"], self.ar_audio_embedding(y[:, -1:])], 1
188
+ )
189
+ cache["y_emb"] = y_emb
190
+ y_pos = self.ar_audio_position(y_emb)
191
+
192
+ xy_pos = y_pos[:, -1:]
193
+
194
+ y_example = y_pos[:,:,0] * 0.0
195
+
196
+ xy_attn_mask = torch.cat([x_example, y_example], dim=1)
197
+ xy_attn_mask = torch.zeros_like(xy_attn_mask, dtype=torch.bool)
198
+
199
+ xy_dec = self.h(xy_pos, mask=xy_attn_mask, cache=cache)
200
+ logits = self.ar_predict_layer(xy_dec[:, -1])
201
+ samples = sample(logits[0], y, top_k=self.top_k, top_p=1.0, repetition_penalty=1.35)[0].unsqueeze(0)
202
+
203
+ y = torch.concat([y, samples], dim=1)
204
+
205
+ return y, cache["k"], cache["v"], cache["y_emb"], logits, samples
206
+
207
+
208
+ class Text2SemanticDecoder(nn.Module):
209
+ def __init__(self, config, norm_first=False, top_k=3):
210
+ super(Text2SemanticDecoder, self).__init__()
211
+ self.model_dim = config["model"]["hidden_dim"]
212
+ self.embedding_dim = config["model"]["embedding_dim"]
213
+ self.num_head = config["model"]["head"]
214
+ self.num_layers = config["model"]["n_layer"]
215
+ self.norm_first = norm_first
216
+ self.vocab_size = config["model"]["vocab_size"]
217
+ self.phoneme_vocab_size = config["model"]["phoneme_vocab_size"]
218
+ self.p_dropout = float(config["model"]["dropout"])
219
+ self.EOS = config["model"]["EOS"]
220
+ self.norm_first = norm_first
221
+ assert self.EOS == self.vocab_size - 1
222
+ self.bert_proj = nn.Linear(1024, self.embedding_dim)
223
+ self.ar_text_embedding = TokenEmbedding(self.embedding_dim, self.phoneme_vocab_size, self.p_dropout)
224
+ self.ar_text_position = SinePositionalEmbedding(self.embedding_dim, dropout=0.1, scale=False, alpha=True)
225
+ self.ar_audio_embedding = TokenEmbedding(self.embedding_dim, self.vocab_size, self.p_dropout)
226
+ self.ar_audio_position = SinePositionalEmbedding(self.embedding_dim, dropout=0.1, scale=False, alpha=True)
227
+ self.h = TransformerEncoder(
228
+ TransformerEncoderLayer(
229
+ d_model=self.model_dim,
230
+ nhead=self.num_head,
231
+ dim_feedforward=self.model_dim * 4,
232
+ dropout=0.1,
233
+ batch_first=True,
234
+ norm_first=norm_first,
235
+ ),
236
+ num_layers=self.num_layers,
237
+ norm=LayerNorm(self.model_dim) if norm_first else None,
238
+ )
239
+ self.ar_predict_layer = nn.Linear(self.model_dim, self.vocab_size, bias=False)
240
+ self.loss_fct = nn.CrossEntropyLoss(reduction="sum")
241
+ self.ar_accuracy_metric = MulticlassAccuracy(
242
+ self.vocab_size,
243
+ top_k=top_k,
244
+ average="micro",
245
+ multidim_average="global",
246
+ ignore_index=self.EOS,
247
+ )
248
+ self.top_k = torch.LongTensor([1])
249
+ self.early_stop_num = torch.LongTensor([-1])
250
+
251
+ def init_onnx(self):
252
+ self.onnx_encoder = OnnxEncoder(self.ar_text_embedding, self.bert_proj, self.ar_text_position)
253
+ self.first_stage_decoder = T2SFirstStageDecoder(self.ar_audio_embedding, self.ar_audio_position, self.h,
254
+ self.ar_predict_layer, self.loss_fct, self.ar_accuracy_metric, self.top_k, self.early_stop_num,
255
+ self.num_layers)
256
+ self.stage_decoder = T2SStageDecoder(self.ar_audio_embedding, self.ar_audio_position, self.h,
257
+ self.ar_predict_layer, self.loss_fct, self.ar_accuracy_metric, self.top_k, self.early_stop_num,
258
+ self.num_layers)
259
+
260
+ def forward(self, x, prompts, bert_feature):
261
+ early_stop_num = self.early_stop_num
262
+ prefix_len = prompts.shape[1]
263
+
264
+ x = self.onnx_encoder(x, bert_feature)
265
+ y, k, v, y_emb, stage, x_example = self.first_stage_decoder(x, prompts)
266
+
267
+ stop = False
268
+ for idx in range(1, 1500):
269
+ enco = self.stage_decoder(y, k, v, y_emb, stage, x_example)
270
+ y, k, v, y_emb, stage, logits, samples = enco
271
+ if early_stop_num != -1 and (y.shape[1] - prefix_len) > early_stop_num:
272
+ stop = True
273
+ if torch.argmax(logits, dim=-1)[0] == self.EOS or samples[0, 0] == self.EOS:
274
+ stop = True
275
+ if stop:
276
+ break
277
+ y[0, -1] = 0
278
+ return y, idx
279
+
280
+ def infer(self, x, prompts, bert_feature):
281
+ top_k = self.top_k
282
+ early_stop_num = self.early_stop_num
283
+
284
+ x = self.onnx_encoder(x, bert_feature)
285
+
286
+ y = prompts
287
+ prefix_len = y.shape[1]
288
+ x_len = x.shape[1]
289
+ x_example = x[:,:,0] * 0.0
290
+ x_attn_mask = torch.matmul(x_example.transpose(0, 1), x_example)
291
+ x_attn_mask = torch.zeros_like(x_attn_mask, dtype=torch.bool)
292
+
293
+ stop = False
294
+ cache = {
295
+ "all_stage": self.num_layers,
296
+ "k": [None] * self.num_layers,
297
+ "v": [None] * self.num_layers,
298
+ "y_emb": None,
299
+ "first_infer": 1,
300
+ "stage": 0,
301
+ }
302
+ for idx in range(1500):
303
+ if cache["first_infer"] == 1:
304
+ y_emb = self.ar_audio_embedding(y)
305
+ else:
306
+ y_emb = torch.cat(
307
+ [cache["y_emb"], self.ar_audio_embedding(y[:, -1:])], 1
308
+ )
309
+ cache["y_emb"] = y_emb
310
+ y_pos = self.ar_audio_position(y_emb)
311
+ if cache["first_infer"] == 1:
312
+ xy_pos = torch.concat([x, y_pos], dim=1)
313
+ else:
314
+ xy_pos = y_pos[:, -1:]
315
+ y_len = y_pos.shape[1]
316
+ if cache["first_infer"] == 1:
317
+ x_attn_mask_pad = F.pad(x_attn_mask, (0, y_len), value=True)
318
+ y_attn_mask = F.pad(
319
+ torch.triu(torch.ones(y_len, y_len, dtype=torch.bool), diagonal=1),
320
+ (x_len, 0), value=False
321
+ )
322
+ xy_attn_mask = torch.concat([x_attn_mask_pad, y_attn_mask], dim=0)
323
+ else:
324
+ xy_attn_mask = torch.zeros((1, x_len + y_len), dtype=torch.bool)
325
+ xy_dec = self.h(xy_pos, mask=xy_attn_mask, cache=cache)
326
+ logits = self.ar_predict_layer(xy_dec[:, -1])
327
+ samples = sample(logits[0], y, top_k=top_k, top_p=1.0, repetition_penalty=1.35)[0].unsqueeze(0)
328
+ if early_stop_num != -1 and (y.shape[1] - prefix_len) > early_stop_num:
329
+ stop = True
330
+ if torch.argmax(logits, dim=-1)[0] == self.EOS or samples[0, 0] == self.EOS:
331
+ stop = True
332
+ if stop:
333
+ if prompts.shape[1] == y.shape[1]:
334
+ y = torch.concat([y, torch.zeros_like(samples)], dim=1)
335
+ break
336
+ y = torch.concat([y, samples], dim=1)
337
+ cache["first_infer"] = 0
338
+ return y, idx
GPT_SoVITS/AR/models/utils.py ADDED
@@ -0,0 +1,229 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # modified from https://github.com/yangdongchao/SoundStorm/blob/master/soundstorm/s1/AR/models/utils.py
2
+ # reference: https://github.com/lifeiteng/vall-e
3
+ import torch
4
+ import torch.nn.functional as F
5
+ from typing import Tuple
6
+
7
+ def sequence_mask(length, max_length=None):
8
+ if max_length is None:
9
+ max_length = length.max()
10
+ x = torch.arange(max_length, dtype=length.dtype, device=length.device)
11
+ return x.unsqueeze(0) < length.unsqueeze(1)
12
+
13
+
14
+ def make_pad_mask(lengths: torch.Tensor, max_len: int = 0) -> torch.Tensor:
15
+ """
16
+ Args:
17
+ lengths:
18
+ A 1-D tensor containing sentence lengths.
19
+ max_len:
20
+ The length of masks.
21
+ Returns:
22
+ Return a 2-D bool tensor, where masked positions
23
+ are filled with `True` and non-masked positions are
24
+ filled with `False`.
25
+
26
+ #>>> lengths = torch.tensor([1, 3, 2, 5])
27
+ #>>> make_pad_mask(lengths)
28
+ tensor([[False, True, True, True, True],
29
+ [False, False, False, True, True],
30
+ [False, False, True, True, True],
31
+ [False, False, False, False, False]])
32
+ """
33
+ assert lengths.ndim == 1, lengths.ndim
34
+ max_len = max(max_len, lengths.max())
35
+ n = lengths.size(0)
36
+ seq_range = torch.arange(0, max_len, device=lengths.device)
37
+ expaned_lengths = seq_range.unsqueeze(0).expand(n, max_len)
38
+
39
+ return expaned_lengths >= lengths.unsqueeze(-1)
40
+
41
+
42
+ # https://github.com/microsoft/unilm/blob/master/xtune/src/transformers/modeling_utils.py
43
+ def top_k_top_p_filtering(
44
+ logits, top_k=0, top_p=1.0, filter_value=-float("Inf"), min_tokens_to_keep=1
45
+ ):
46
+ """Filter a distribution of logits using top-k and/or nucleus (top-p) filtering
47
+ Args:
48
+ logits: logits distribution shape (batch size, vocabulary size)
49
+ if top_k > 0: keep only top k tokens with highest probability (top-k filtering).
50
+ if top_p < 1.0: keep the top tokens with cumulative probability >= top_p (nucleus filtering).
51
+ Nucleus filtering is described in Holtzman et al. (http://arxiv.org/abs/1904.09751)
52
+ Make sure we keep at least min_tokens_to_keep per batch example in the output
53
+ From: https://gist.github.com/thomwolf/1a5a29f6962089e871b94cbd09daf317
54
+ """
55
+ if top_k > 0:
56
+ top_k = min(max(top_k, min_tokens_to_keep), logits.size(-1)) # Safety check
57
+ # Remove all tokens with a probability less than the last token of the top-k
58
+ indices_to_remove = logits < torch.topk(logits, top_k)[0][..., -1, None]
59
+ logits[indices_to_remove] = filter_value
60
+
61
+ if top_p < 1.0:
62
+ sorted_logits, sorted_indices = torch.sort(logits, descending=True)
63
+ cumulative_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1)
64
+
65
+ # Remove tokens with cumulative probability above the threshold (token with 0 are kept)
66
+ sorted_indices_to_remove = cumulative_probs > top_p
67
+ if min_tokens_to_keep > 1:
68
+ # Keep at least min_tokens_to_keep (set to min_tokens_to_keep-1 because we add the first one below)
69
+ sorted_indices_to_remove[..., :min_tokens_to_keep] = 0
70
+ # Shift the indices to the right to keep also the first token above the threshold
71
+ sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone()
72
+ sorted_indices_to_remove[..., 0] = 0
73
+
74
+ # scatter sorted tensors to original indexing
75
+ indices_to_remove = sorted_indices_to_remove.scatter(
76
+ 1, sorted_indices, sorted_indices_to_remove
77
+ )
78
+ logits[indices_to_remove] = filter_value
79
+ return logits
80
+
81
+
82
+ def topk_sampling(logits, top_k=10, top_p=1.0, temperature=1.0):
83
+ # temperature: (`optional`) float
84
+ # The value used to module the next token probabilities. Must be strictly positive. Default to 1.0.
85
+ # top_k: (`optional`) int
86
+ # The number of highest probability vocabulary tokens to keep for top-k-filtering. Between 1 and infinity. Default to 50.
87
+ # top_p: (`optional`) float
88
+ # The cumulative probability of parameter highest probability vocabulary tokens to keep for nucleus sampling. Must be between 0 and 1. Default to 1.
89
+
90
+ # Temperature (higher temperature => more likely to sample low probability tokens)
91
+ if temperature != 1.0:
92
+ logits = logits / temperature
93
+ # Top-p/top-k filtering
94
+ logits = top_k_top_p_filtering(logits, top_k=top_k, top_p=top_p)
95
+ # Sample
96
+ token = torch.multinomial(F.softmax(logits, dim=-1), num_samples=1)
97
+ return token
98
+
99
+
100
+ from typing import Optional, Tuple
101
+
102
+
103
+ def multinomial_sample_one_no_sync(
104
+ probs_sort,
105
+ ): # Does multinomial sampling without a cuda synchronization
106
+ q = torch.empty_like(probs_sort).exponential_(1)
107
+ return torch.argmax(probs_sort / q, dim=-1, keepdim=True).to(dtype=torch.int)
108
+
109
+
110
+ def logits_to_probs(
111
+ logits,
112
+ previous_tokens: Optional[torch.Tensor] = None,
113
+ temperature: float = 1.0,
114
+ top_k: Optional[int] = None,
115
+ top_p: Optional[int] = None,
116
+ repetition_penalty: float = 1.0,
117
+ ):
118
+ # if previous_tokens is not None:
119
+ # previous_tokens = previous_tokens.squeeze()
120
+ # print(logits.shape,previous_tokens.shape)
121
+ # pdb.set_trace()
122
+ if previous_tokens is not None and repetition_penalty != 1.0:
123
+ previous_tokens = previous_tokens.long()
124
+ score = torch.gather(logits, dim=1, index=previous_tokens)
125
+ score = torch.where(
126
+ score < 0, score * repetition_penalty, score / repetition_penalty
127
+ )
128
+ logits.scatter_(dim=1, index=previous_tokens, src=score)
129
+
130
+ if top_p is not None and top_p < 1.0:
131
+ sorted_logits, sorted_indices = torch.sort(logits, descending=True)
132
+ cum_probs = torch.cumsum(
133
+ torch.nn.functional.softmax(sorted_logits, dim=-1), dim=-1
134
+ )
135
+ sorted_indices_to_remove = cum_probs > top_p
136
+ sorted_indices_to_remove[:, 0] = False # keep at least one option
137
+ indices_to_remove = sorted_indices_to_remove.scatter(
138
+ dim=1, index=sorted_indices, src=sorted_indices_to_remove
139
+ )
140
+ logits = logits.masked_fill(indices_to_remove, -float("Inf"))
141
+
142
+ logits = logits / max(temperature, 1e-5)
143
+
144
+ if top_k is not None:
145
+ v, _ = torch.topk(logits, min(top_k, logits.size(-1)))
146
+ pivot = v[: , -1].unsqueeze(-1)
147
+ logits = torch.where(logits < pivot, -float("Inf"), logits)
148
+
149
+ probs = torch.nn.functional.softmax(logits, dim=-1)
150
+ return probs
151
+
152
+
153
+ def sample(
154
+ logits,
155
+ previous_tokens: Optional[torch.Tensor] = None,
156
+ **sampling_kwargs,
157
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
158
+ probs = logits_to_probs(
159
+ logits=logits, previous_tokens=previous_tokens, **sampling_kwargs
160
+ )
161
+ idx_next = multinomial_sample_one_no_sync(probs)
162
+ return idx_next, probs
163
+
164
+ def dpo_loss(policy_chosen_logps: torch.FloatTensor,
165
+ policy_rejected_logps: torch.FloatTensor,
166
+ reference_chosen_logps: torch.FloatTensor,
167
+ reference_rejected_logps: torch.FloatTensor,
168
+ beta: float,
169
+ reference_free: bool = False) -> Tuple[torch.FloatTensor, torch.FloatTensor, torch.FloatTensor]:
170
+ pi_logratios = policy_chosen_logps - policy_rejected_logps
171
+ ref_logratios = reference_chosen_logps - reference_rejected_logps
172
+
173
+ if reference_free:
174
+ ref_logratios = 0
175
+
176
+ logits = pi_logratios - ref_logratios
177
+
178
+ losses = -F.logsigmoid(beta * logits)
179
+ chosen_rewards = beta * (policy_chosen_logps - reference_chosen_logps).detach()
180
+ rejected_rewards = beta * (policy_rejected_logps - reference_rejected_logps).detach()
181
+
182
+ return losses.mean(), chosen_rewards, rejected_rewards
183
+
184
+ def get_batch_logps(logits_target: torch.FloatTensor, logits_reject: torch.FloatTensor, labels_target: torch.LongTensor, labels_reject: torch.LongTensor, average_log_prob: bool = False) -> Tuple[torch.FloatTensor, torch.FloatTensor]:
185
+
186
+ # dummy token; we'll ignore the losses on these tokens later
187
+
188
+ per_token_logps_target = torch.gather(logits_target.log_softmax(-1), dim=2, index=labels_target.unsqueeze(2)).squeeze(2)
189
+ per_token_logps_reject = torch.gather(logits_reject.log_softmax(-1), dim=2, index=labels_reject.unsqueeze(2)).squeeze(2)
190
+
191
+ return per_token_logps_target.sum(-1), per_token_logps_reject.sum(-1)
192
+
193
+ def make_reject_y(y_o, y_lens):
194
+ def repeat_P(y):
195
+ range_idx, _ = torch.randint(0, len(y), size=(2,)).sort()
196
+ pre = y[:range_idx[0]]
197
+ shf = y[range_idx[1]:]
198
+ range_text = y[range_idx[0]:range_idx[1]]
199
+ new_y = torch.cat([pre, range_text, range_text, shf])
200
+ return new_y
201
+ def lost_P(y):
202
+ range_idx, _ = torch.randint(0, len(y), size=(2,)).sort()
203
+ pre = y[:range_idx[0]]
204
+ shf = y[range_idx[1]:]
205
+ range_text = y[range_idx[0]:range_idx[1]]
206
+ new_y = torch.cat([pre, shf])
207
+ return new_y
208
+ bs = len(y_lens)
209
+ reject_y = []
210
+ reject_y_lens = []
211
+ for b in range(bs):
212
+ process_item_idx = torch.randint(0, 1, size=(1, ))[0]
213
+ if process_item_idx == 0:
214
+ new_y = repeat_P(y_o[b])
215
+ reject_y.append(new_y)
216
+ reject_y_lens.append(len(new_y))
217
+ elif process_item_idx==1:
218
+ new_y = lost_P(y_o[b])
219
+ reject_y.append(new_y)
220
+ reject_y_lens.append(len(new_y))
221
+ max_length = max(reject_y_lens)
222
+ for b in range(bs):
223
+ pad_length = max_length - reject_y_lens[b]
224
+ reject_y[b] = torch.cat([reject_y[b], torch.zeros(pad_length, dtype=y_o.dtype, device=y_o.device)], dim=0)
225
+
226
+ reject_y = torch.stack(reject_y, dim = 0)
227
+ reject_y_lens = torch.tensor(reject_y_lens, device=y_lens.device)
228
+
229
+ return reject_y, reject_y_lens
GPT_SoVITS/AR/modules/__init__.py ADDED
File without changes
GPT_SoVITS/AR/modules/activation.py ADDED
@@ -0,0 +1,428 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # modified from https://github.com/lifeiteng/vall-e/blob/main/valle/modules/activation.py
2
+ from typing import Optional
3
+ from typing import Tuple
4
+ import torch
5
+ from torch import Tensor
6
+ from torch.nn import Linear
7
+ from torch.nn import Module
8
+ from torch.nn.init import constant_
9
+ from torch.nn.init import xavier_normal_
10
+ from torch.nn.init import xavier_uniform_
11
+ from torch.nn.modules.linear import NonDynamicallyQuantizableLinear
12
+ from torch.nn.parameter import Parameter
13
+
14
+ from torch.nn import functional as F
15
+ from AR.modules.patched_mha_with_cache import multi_head_attention_forward_patched
16
+
17
+ F.multi_head_attention_forward = multi_head_attention_forward_patched
18
+
19
+
20
+ class MultiheadAttention(Module):
21
+ r"""Allows the model to jointly attend to information
22
+ from different representation subspaces as described in the paper:
23
+ `Attention Is All You Need <https://arxiv.org/abs/1706.03762>`_.
24
+
25
+ Multi-Head Attention is defined as:
26
+
27
+ .. math::
28
+ \text{MultiHead}(Q, K, V) = \text{Concat}(head_1,\dots,head_h)W^O
29
+
30
+ where :math:`head_i = \text{Attention}(QW_i^Q, KW_i^K, VW_i^V)`.
31
+
32
+ ``forward()`` will use a special optimized implementation if all of the following
33
+ conditions are met:
34
+
35
+ - self attention is being computed (i.e., ``query``, ``key``, and ``value`` are the same tensor. This
36
+ restriction will be loosened in the future.)
37
+ - Either autograd is disabled (using ``torch.inference_mode`` or ``torch.no_grad``) or no tensor argument ``requires_grad``
38
+ - training is disabled (using ``.eval()``)
39
+ - dropout is 0
40
+ - ``add_bias_kv`` is ``False``
41
+ - ``add_zero_attn`` is ``False``
42
+ - ``batch_first`` is ``True`` and the input is batched
43
+ - ``kdim`` and ``vdim`` are equal to ``embed_dim``
44
+ - at most one of ``key_padding_mask`` or ``attn_mask`` is passed
45
+ - if a `NestedTensor <https://pytorch.org/docs/stable/nested.html>`_ is passed, neither ``key_padding_mask``
46
+ nor ``attn_mask`` is passed
47
+
48
+ If the optimized implementation is in use, a
49
+ `NestedTensor <https://pytorch.org/docs/stable/nested.html>`_ can be passed for
50
+ ``query``/``key``/``value`` to represent padding more efficiently than using a
51
+ padding mask. In this case, a `NestedTensor <https://pytorch.org/docs/stable/nested.html>`_
52
+ will be returned, and an additional speedup proportional to the fraction of the input
53
+ that is padding can be expected.
54
+
55
+ Args:
56
+ embed_dim: Total dimension of the model.
57
+ num_heads: Number of parallel attention heads. Note that ``embed_dim`` will be split
58
+ across ``num_heads`` (i.e. each head will have dimension ``embed_dim // num_heads``).
59
+ dropout: Dropout probability on ``attn_output_weights``. Default: ``0.0`` (no dropout).
60
+ bias: If specified, adds bias to input / output projection layers. Default: ``True``.
61
+ add_bias_kv: If specified, adds bias to the key and value sequences at dim=0. Default: ``False``.
62
+ add_zero_attn: If specified, adds a new batch of zeros to the key and value sequences at dim=1.
63
+ Default: ``False``.
64
+ kdim: Total number of features for keys. Default: ``None`` (uses ``kdim=embed_dim``).
65
+ vdim: Total number of features for values. Default: ``None`` (uses ``vdim=embed_dim``).
66
+ batch_first: If ``True``, then the input and output tensors are provided
67
+ as (batch, seq, feature). Default: ``False`` (seq, batch, feature).
68
+
69
+ Examples::
70
+
71
+ >>> # xdoctest: +SKIP
72
+ >>> multihead_attn = nn.MultiheadAttention(embed_dim, num_heads)
73
+ >>> attn_output, attn_output_weights = multihead_attn(query, key, value)
74
+
75
+ """
76
+ __constants__ = ["batch_first"]
77
+ bias_k: Optional[torch.Tensor]
78
+ bias_v: Optional[torch.Tensor]
79
+
80
+ def __init__(
81
+ self,
82
+ embed_dim,
83
+ num_heads,
84
+ dropout=0.0,
85
+ bias=True,
86
+ add_bias_kv=False,
87
+ add_zero_attn=False,
88
+ kdim=None,
89
+ vdim=None,
90
+ batch_first=False,
91
+ linear1_cls=Linear,
92
+ linear2_cls=Linear,
93
+ device=None,
94
+ dtype=None,
95
+ ) -> None:
96
+ factory_kwargs = {"device": device, "dtype": dtype}
97
+ super(MultiheadAttention, self).__init__()
98
+ self.embed_dim = embed_dim
99
+ self.kdim = kdim if kdim is not None else embed_dim
100
+ self.vdim = vdim if vdim is not None else embed_dim
101
+ self._qkv_same_embed_dim = self.kdim == embed_dim and self.vdim == embed_dim
102
+
103
+ self.num_heads = num_heads
104
+ self.dropout = dropout
105
+ self.batch_first = batch_first
106
+ self.head_dim = embed_dim // num_heads
107
+ assert (
108
+ self.head_dim * num_heads == self.embed_dim
109
+ ), "embed_dim must be divisible by num_heads"
110
+
111
+ if add_bias_kv:
112
+ self.bias_k = Parameter(torch.empty((1, 1, embed_dim), **factory_kwargs))
113
+ self.bias_v = Parameter(torch.empty((1, 1, embed_dim), **factory_kwargs))
114
+ else:
115
+ self.bias_k = self.bias_v = None
116
+
117
+ if linear1_cls == Linear:
118
+ if not self._qkv_same_embed_dim:
119
+ self.q_proj_weight = Parameter(
120
+ torch.empty((embed_dim, embed_dim), **factory_kwargs)
121
+ )
122
+ self.k_proj_weight = Parameter(
123
+ torch.empty((embed_dim, self.kdim), **factory_kwargs)
124
+ )
125
+ self.v_proj_weight = Parameter(
126
+ torch.empty((embed_dim, self.vdim), **factory_kwargs)
127
+ )
128
+ self.register_parameter("in_proj_weight", None)
129
+ else:
130
+ self.in_proj_weight = Parameter(
131
+ torch.empty((3 * embed_dim, embed_dim), **factory_kwargs)
132
+ )
133
+ self.register_parameter("q_proj_weight", None)
134
+ self.register_parameter("k_proj_weight", None)
135
+ self.register_parameter("v_proj_weight", None)
136
+
137
+ if bias:
138
+ self.in_proj_bias = Parameter(
139
+ torch.empty(3 * embed_dim, **factory_kwargs)
140
+ )
141
+ else:
142
+ self.register_parameter("in_proj_bias", None)
143
+ self.out_proj = NonDynamicallyQuantizableLinear(
144
+ embed_dim, embed_dim, bias=bias, **factory_kwargs
145
+ )
146
+
147
+ self._reset_parameters()
148
+ else:
149
+ if not self._qkv_same_embed_dim:
150
+ raise NotImplementedError
151
+ else:
152
+ self.in_proj_linear = linear1_cls(
153
+ embed_dim, 3 * embed_dim, bias=bias, **factory_kwargs
154
+ )
155
+ self.in_proj_weight = self.in_proj_linear.weight
156
+
157
+ self.register_parameter("q_proj_weight", None)
158
+ self.register_parameter("k_proj_weight", None)
159
+ self.register_parameter("v_proj_weight", None)
160
+
161
+ if bias:
162
+ self.in_proj_bias = self.in_proj_linear.bias
163
+ else:
164
+ self.register_parameter("in_proj_bias", None)
165
+
166
+ self.out_proj = linear2_cls(
167
+ embed_dim, embed_dim, bias=bias, **factory_kwargs
168
+ )
169
+
170
+ if self.bias_k is not None:
171
+ xavier_normal_(self.bias_k)
172
+ if self.bias_v is not None:
173
+ xavier_normal_(self.bias_v)
174
+
175
+ self.add_zero_attn = add_zero_attn
176
+
177
+ def _reset_parameters(self):
178
+ if self._qkv_same_embed_dim:
179
+ xavier_uniform_(self.in_proj_weight)
180
+ else:
181
+ xavier_uniform_(self.q_proj_weight)
182
+ xavier_uniform_(self.k_proj_weight)
183
+ xavier_uniform_(self.v_proj_weight)
184
+
185
+ if self.in_proj_bias is not None:
186
+ constant_(self.in_proj_bias, 0.0)
187
+ constant_(self.out_proj.bias, 0.0)
188
+
189
+ if self.bias_k is not None:
190
+ xavier_normal_(self.bias_k)
191
+ if self.bias_v is not None:
192
+ xavier_normal_(self.bias_v)
193
+
194
+ def __setstate__(self, state):
195
+ # Support loading old MultiheadAttention checkpoints generated by v1.1.0
196
+ if "_qkv_same_embed_dim" not in state:
197
+ state["_qkv_same_embed_dim"] = True
198
+
199
+ super(MultiheadAttention, self).__setstate__(state)
200
+
201
+ def forward(
202
+ self,
203
+ query: Tensor,
204
+ key: Tensor,
205
+ value: Tensor,
206
+ key_padding_mask: Optional[Tensor] = None,
207
+ need_weights: bool = True,
208
+ attn_mask: Optional[Tensor] = None,
209
+ average_attn_weights: bool = True,
210
+ cache=None,
211
+ ) -> Tuple[Tensor, Optional[Tensor]]:
212
+ r"""
213
+ Args:
214
+ query: Query embeddings of shape :math:`(L, E_q)` for unbatched input, :math:`(L, N, E_q)` when ``batch_first=False``
215
+ or :math:`(N, L, E_q)` when ``batch_first=True``, where :math:`L` is the target sequence length,
216
+ :math:`N` is the batch size, and :math:`E_q` is the query embedding dimension ``embed_dim``.
217
+ Queries are compared against key-value pairs to produce the output.
218
+ See "Attention Is All You Need" for more details.
219
+ key: Key embeddings of shape :math:`(S, E_k)` for unbatched input, :math:`(S, N, E_k)` when ``batch_first=False``
220
+ or :math:`(N, S, E_k)` when ``batch_first=True``, where :math:`S` is the source sequence length,
221
+ :math:`N` is the batch size, and :math:`E_k` is the key embedding dimension ``kdim``.
222
+ See "Attention Is All You Need" for more details.
223
+ value: Value embeddings of shape :math:`(S, E_v)` for unbatched input, :math:`(S, N, E_v)` when
224
+ ``batch_first=False`` or :math:`(N, S, E_v)` when ``batch_first=True``, where :math:`S` is the source
225
+ sequence length, :math:`N` is the batch size, and :math:`E_v` is the value embedding dimension ``vdim``.
226
+ See "Attention Is All You Need" for more details.
227
+ key_padding_mask: If specified, a mask of shape :math:`(N, S)` indicating which elements within ``key``
228
+ to ignore for the purpose of attention (i.e. treat as "padding"). For unbatched `query`, shape should be :math:`(S)`.
229
+ Binary and byte masks are supported.
230
+ For a binary mask, a ``True`` value indicates that the corresponding ``key`` value will be ignored for
231
+ the purpose of attention. For a float mask, it will be directly added to the corresponding ``key`` value.
232
+ need_weights: If specified, returns ``attn_output_weights`` in addition to ``attn_outputs``.
233
+ Default: ``True``.
234
+ attn_mask: If specified, a 2D or 3D mask preventing attention to certain positions. Must be of shape
235
+ :math:`(L, S)` or :math:`(N\cdot\text{num\_heads}, L, S)`, where :math:`N` is the batch size,
236
+ :math:`L` is the target sequence length, and :math:`S` is the source sequence length. A 2D mask will be
237
+ broadcasted across the batch while a 3D mask allows for a different mask for each entry in the batch.
238
+ Binary, byte, and float masks are supported. For a binary mask, a ``True`` value indicates that the
239
+ corresponding position is not allowed to attend. For a byte mask, a non-zero value indicates that the
240
+ corresponding position is not allowed to attend. For a float mask, the mask values will be added to
241
+ the attention weight.
242
+ average_attn_weights: If true, indicates that the returned ``attn_weights`` should be averaged across
243
+ heads. Otherwise, ``attn_weights`` are provided separately per head. Note that this flag only has an
244
+ effect when ``need_weights=True``. Default: ``True`` (i.e. average weights across heads)
245
+
246
+ Outputs:
247
+ - **attn_output** - Attention outputs of shape :math:`(L, E)` when input is unbatched,
248
+ :math:`(L, N, E)` when ``batch_first=False`` or :math:`(N, L, E)` when ``batch_first=True``,
249
+ where :math:`L` is the target sequence length, :math:`N` is the batch size, and :math:`E` is the
250
+ embedding dimension ``embed_dim``.
251
+ - **attn_output_weights** - Only returned when ``need_weights=True``. If ``average_attn_weights=True``,
252
+ returns attention weights averaged across heads of shape :math:`(L, S)` when input is unbatched or
253
+ :math:`(N, L, S)`, where :math:`N` is the batch size, :math:`L` is the target sequence length, and
254
+ :math:`S` is the source sequence length. If ``average_attn_weights=False``, returns attention weights per
255
+ head of shape :math:`(\text{num\_heads}, L, S)` when input is unbatched or :math:`(N, \text{num\_heads}, L, S)`.
256
+
257
+ .. note::
258
+ `batch_first` argument is ignored for unbatched inputs.
259
+ """
260
+ is_batched = query.dim() == 3
261
+ if key_padding_mask is not None:
262
+ _kpm_dtype = key_padding_mask.dtype
263
+ if _kpm_dtype != torch.bool and not torch.is_floating_point(
264
+ key_padding_mask
265
+ ):
266
+ raise AssertionError(
267
+ "only bool and floating types of key_padding_mask are supported"
268
+ )
269
+ why_not_fast_path = ""
270
+ if not is_batched:
271
+ why_not_fast_path = (
272
+ f"input not batched; expected query.dim() of 3 but got {query.dim()}"
273
+ )
274
+ elif query is not key or key is not value:
275
+ # When lifting this restriction, don't forget to either
276
+ # enforce that the dtypes all match or test cases where
277
+ # they don't!
278
+ why_not_fast_path = "non-self attention was used (query, key, and value are not the same Tensor)"
279
+ elif self.in_proj_bias is not None and query.dtype != self.in_proj_bias.dtype:
280
+ why_not_fast_path = f"dtypes of query ({query.dtype}) and self.in_proj_bias ({self.in_proj_bias.dtype}) don't match"
281
+ elif (
282
+ self.in_proj_weight is not None and query.dtype != self.in_proj_weight.dtype
283
+ ):
284
+ # this case will fail anyway, but at least they'll get a useful error message.
285
+ why_not_fast_path = f"dtypes of query ({query.dtype}) and self.in_proj_weight ({self.in_proj_weight.dtype}) don't match"
286
+ elif self.training:
287
+ why_not_fast_path = "training is enabled"
288
+ elif not self.batch_first:
289
+ why_not_fast_path = "batch_first was not True"
290
+ elif self.bias_k is not None:
291
+ why_not_fast_path = "self.bias_k was not None"
292
+ elif self.bias_v is not None:
293
+ why_not_fast_path = "self.bias_v was not None"
294
+ elif self.dropout:
295
+ why_not_fast_path = f"dropout was {self.dropout}, required zero"
296
+ elif self.add_zero_attn:
297
+ why_not_fast_path = "add_zero_attn was enabled"
298
+ elif not self._qkv_same_embed_dim:
299
+ why_not_fast_path = "_qkv_same_embed_dim was not True"
300
+ elif attn_mask is not None:
301
+ why_not_fast_path = "attn_mask was not None"
302
+ elif query.is_nested and key_padding_mask is not None:
303
+ why_not_fast_path = (
304
+ "key_padding_mask is not supported with NestedTensor input"
305
+ )
306
+ elif self.num_heads % 2 == 1:
307
+ why_not_fast_path = "num_heads is odd"
308
+ elif torch.is_autocast_enabled():
309
+ why_not_fast_path = "autocast is enabled"
310
+
311
+ if not why_not_fast_path:
312
+ tensor_args = (
313
+ query,
314
+ key,
315
+ value,
316
+ self.in_proj_weight,
317
+ self.in_proj_bias,
318
+ self.out_proj.weight,
319
+ self.out_proj.bias,
320
+ )
321
+ # We have to use list comprehensions below because TorchScript does not support
322
+ # generator expressions.
323
+ if torch.overrides.has_torch_function(tensor_args):
324
+ why_not_fast_path = "some Tensor argument has_torch_function"
325
+ elif not all(
326
+ [
327
+ (x is None or x.is_cuda or "cpu" in str(x.device))
328
+ for x in tensor_args
329
+ ]
330
+ ):
331
+ why_not_fast_path = "some Tensor argument is neither CUDA nor CPU"
332
+ elif torch.is_grad_enabled() and any(
333
+ [x is not None and x.requires_grad for x in tensor_args]
334
+ ):
335
+ why_not_fast_path = (
336
+ "grad is enabled and at least one of query or the "
337
+ "input/output projection weights or biases requires_grad"
338
+ )
339
+ if not why_not_fast_path:
340
+ return torch._native_multi_head_attention(
341
+ query,
342
+ key,
343
+ value,
344
+ self.embed_dim,
345
+ self.num_heads,
346
+ self.in_proj_weight,
347
+ self.in_proj_bias,
348
+ self.out_proj.weight,
349
+ self.out_proj.bias,
350
+ key_padding_mask if key_padding_mask is not None else attn_mask,
351
+ need_weights,
352
+ average_attn_weights,
353
+ 1
354
+ if key_padding_mask is not None
355
+ else 0
356
+ if attn_mask is not None
357
+ else None,
358
+ )
359
+
360
+ any_nested = query.is_nested or key.is_nested or value.is_nested
361
+ assert not any_nested, (
362
+ "MultiheadAttention does not support NestedTensor outside of its fast path. "
363
+ + f"The fast path was not hit because {why_not_fast_path}"
364
+ )
365
+
366
+ if self.batch_first and is_batched:
367
+ # make sure that the transpose op does not affect the "is" property
368
+ if key is value:
369
+ if query is key:
370
+ query = key = value = query.transpose(1, 0)
371
+ else:
372
+ query, key = [x.transpose(1, 0) for x in (query, key)]
373
+ value = key
374
+ else:
375
+ query, key, value = [x.transpose(1, 0) for x in (query, key, value)]
376
+
377
+ if not self._qkv_same_embed_dim:
378
+ attn_output, attn_output_weights = F.multi_head_attention_forward(
379
+ query,
380
+ key,
381
+ value,
382
+ self.embed_dim,
383
+ self.num_heads,
384
+ self.in_proj_weight,
385
+ self.in_proj_bias,
386
+ self.bias_k,
387
+ self.bias_v,
388
+ self.add_zero_attn,
389
+ self.dropout,
390
+ self.out_proj.weight,
391
+ self.out_proj.bias,
392
+ training=self.training,
393
+ key_padding_mask=key_padding_mask,
394
+ need_weights=need_weights,
395
+ attn_mask=attn_mask,
396
+ use_separate_proj_weight=True,
397
+ q_proj_weight=self.q_proj_weight,
398
+ k_proj_weight=self.k_proj_weight,
399
+ v_proj_weight=self.v_proj_weight,
400
+ average_attn_weights=average_attn_weights,
401
+ cache=cache,
402
+ )
403
+ else:
404
+ attn_output, attn_output_weights = F.multi_head_attention_forward(
405
+ query,
406
+ key,
407
+ value,
408
+ self.embed_dim,
409
+ self.num_heads,
410
+ self.in_proj_weight,
411
+ self.in_proj_bias,
412
+ self.bias_k,
413
+ self.bias_v,
414
+ self.add_zero_attn,
415
+ self.dropout,
416
+ self.out_proj.weight,
417
+ self.out_proj.bias,
418
+ training=self.training,
419
+ key_padding_mask=key_padding_mask,
420
+ need_weights=need_weights,
421
+ attn_mask=attn_mask,
422
+ average_attn_weights=average_attn_weights,
423
+ cache=cache,
424
+ )
425
+ if self.batch_first and is_batched:
426
+ return attn_output.transpose(1, 0), attn_output_weights
427
+ else:
428
+ return attn_output, attn_output_weights
GPT_SoVITS/AR/modules/activation_onnx.py ADDED
@@ -0,0 +1,178 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # modified from https://github.com/lifeiteng/vall-e/blob/main/valle/modules/activation.py
2
+ from typing import Optional
3
+ from typing import Tuple
4
+ import torch
5
+ from torch import Tensor
6
+ from torch.nn import Linear
7
+ from torch.nn import Module
8
+ from torch.nn.init import constant_
9
+ from torch.nn.init import xavier_normal_
10
+ from torch.nn.init import xavier_uniform_
11
+ from torch.nn.modules.linear import NonDynamicallyQuantizableLinear
12
+ from torch.nn.parameter import Parameter
13
+
14
+ from torch.nn import functional as F
15
+ from AR.modules.patched_mha_with_cache_onnx import multi_head_attention_forward_patched
16
+
17
+
18
+ class MultiheadAttention(Module):
19
+ __constants__ = ["batch_first"]
20
+ bias_k: Optional[torch.Tensor]
21
+ bias_v: Optional[torch.Tensor]
22
+
23
+ def __init__(
24
+ self,
25
+ embed_dim,
26
+ num_heads,
27
+ dropout=0.0,
28
+ bias=True,
29
+ add_bias_kv=False,
30
+ add_zero_attn=False,
31
+ kdim=None,
32
+ vdim=None,
33
+ batch_first=False,
34
+ linear1_cls=Linear,
35
+ linear2_cls=Linear,
36
+ device=None,
37
+ dtype=None,
38
+ ) -> None:
39
+ factory_kwargs = {"device": device, "dtype": dtype}
40
+ super(MultiheadAttention, self).__init__()
41
+ self.embed_dim = embed_dim
42
+ self.kdim = kdim if kdim is not None else embed_dim
43
+ self.vdim = vdim if vdim is not None else embed_dim
44
+ self._qkv_same_embed_dim = self.kdim == embed_dim and self.vdim == embed_dim
45
+
46
+ self.num_heads = num_heads
47
+ self.dropout = dropout
48
+ self.batch_first = batch_first
49
+ self.head_dim = embed_dim // num_heads
50
+ assert (
51
+ self.head_dim * num_heads == self.embed_dim
52
+ ), "embed_dim must be divisible by num_heads"
53
+
54
+ if add_bias_kv:
55
+ self.bias_k = Parameter(torch.empty((1, 1, embed_dim), **factory_kwargs))
56
+ self.bias_v = Parameter(torch.empty((1, 1, embed_dim), **factory_kwargs))
57
+ else:
58
+ self.bias_k = self.bias_v = None
59
+
60
+ if linear1_cls == Linear:
61
+ if not self._qkv_same_embed_dim:
62
+ self.q_proj_weight = Parameter(
63
+ torch.empty((embed_dim, embed_dim), **factory_kwargs)
64
+ )
65
+ self.k_proj_weight = Parameter(
66
+ torch.empty((embed_dim, self.kdim), **factory_kwargs)
67
+ )
68
+ self.v_proj_weight = Parameter(
69
+ torch.empty((embed_dim, self.vdim), **factory_kwargs)
70
+ )
71
+ self.register_parameter("in_proj_weight", None)
72
+ else:
73
+ self.in_proj_weight = Parameter(
74
+ torch.empty((3 * embed_dim, embed_dim), **factory_kwargs)
75
+ )
76
+ self.register_parameter("q_proj_weight", None)
77
+ self.register_parameter("k_proj_weight", None)
78
+ self.register_parameter("v_proj_weight", None)
79
+
80
+ if bias:
81
+ self.in_proj_bias = Parameter(
82
+ torch.empty(3 * embed_dim, **factory_kwargs)
83
+ )
84
+ else:
85
+ self.register_parameter("in_proj_bias", None)
86
+ self.out_proj = NonDynamicallyQuantizableLinear(
87
+ embed_dim, embed_dim, bias=bias, **factory_kwargs
88
+ )
89
+
90
+ self._reset_parameters()
91
+ else:
92
+ if not self._qkv_same_embed_dim:
93
+ raise NotImplementedError
94
+ else:
95
+ self.in_proj_linear = linear1_cls(
96
+ embed_dim, 3 * embed_dim, bias=bias, **factory_kwargs
97
+ )
98
+ self.in_proj_weight = self.in_proj_linear.weight
99
+
100
+ self.register_parameter("q_proj_weight", None)
101
+ self.register_parameter("k_proj_weight", None)
102
+ self.register_parameter("v_proj_weight", None)
103
+
104
+ if bias:
105
+ self.in_proj_bias = self.in_proj_linear.bias
106
+ else:
107
+ self.register_parameter("in_proj_bias", None)
108
+
109
+ self.out_proj = linear2_cls(
110
+ embed_dim, embed_dim, bias=bias, **factory_kwargs
111
+ )
112
+
113
+ if self.bias_k is not None:
114
+ xavier_normal_(self.bias_k)
115
+ if self.bias_v is not None:
116
+ xavier_normal_(self.bias_v)
117
+
118
+ self.add_zero_attn = add_zero_attn
119
+
120
+ def _reset_parameters(self):
121
+ if self._qkv_same_embed_dim:
122
+ xavier_uniform_(self.in_proj_weight)
123
+ else:
124
+ xavier_uniform_(self.q_proj_weight)
125
+ xavier_uniform_(self.k_proj_weight)
126
+ xavier_uniform_(self.v_proj_weight)
127
+
128
+ if self.in_proj_bias is not None:
129
+ constant_(self.in_proj_bias, 0.0)
130
+ constant_(self.out_proj.bias, 0.0)
131
+
132
+ if self.bias_k is not None:
133
+ xavier_normal_(self.bias_k)
134
+ if self.bias_v is not None:
135
+ xavier_normal_(self.bias_v)
136
+
137
+ def __setstate__(self, state):
138
+ # Support loading old MultiheadAttention checkpoints generated by v1.1.0
139
+ if "_qkv_same_embed_dim" not in state:
140
+ state["_qkv_same_embed_dim"] = True
141
+
142
+ super(MultiheadAttention, self).__setstate__(state)
143
+
144
+ def forward(
145
+ self,
146
+ query: Tensor,
147
+ key: Tensor,
148
+ value: Tensor,
149
+ key_padding_mask: Optional[Tensor] = None,
150
+ need_weights: bool = True,
151
+ attn_mask: Optional[Tensor] = None,
152
+ average_attn_weights: bool = True,
153
+ cache=None,
154
+ ) -> Tuple[Tensor, Optional[Tensor]]:
155
+ any_nested = query.is_nested or key.is_nested or value.is_nested
156
+ query = key = value = query.transpose(1, 0)
157
+ attn_output = multi_head_attention_forward_patched(
158
+ query,
159
+ key,
160
+ value,
161
+ self.embed_dim,
162
+ self.num_heads,
163
+ self.in_proj_weight,
164
+ self.in_proj_bias,
165
+ self.bias_k,
166
+ self.bias_v,
167
+ self.add_zero_attn,
168
+ self.dropout,
169
+ self.out_proj.weight,
170
+ self.out_proj.bias,
171
+ training=self.training,
172
+ key_padding_mask=key_padding_mask,
173
+ need_weights=need_weights,
174
+ attn_mask=attn_mask,
175
+ average_attn_weights=average_attn_weights,
176
+ cache=cache,
177
+ )
178
+ return attn_output.transpose(1, 0)
GPT_SoVITS/AR/modules/embedding.py ADDED
@@ -0,0 +1,81 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # modified from https://github.com/lifeiteng/vall-e/blob/main/valle/modules/embedding.py
2
+ import math
3
+
4
+ import torch
5
+ from torch import nn
6
+
7
+
8
+ class TokenEmbedding(nn.Module):
9
+ def __init__(
10
+ self,
11
+ embedding_dim: int,
12
+ vocab_size: int,
13
+ dropout: float = 0.0,
14
+ ):
15
+ super().__init__()
16
+
17
+ self.vocab_size = vocab_size
18
+ self.embedding_dim = embedding_dim
19
+
20
+ self.dropout = torch.nn.Dropout(p=dropout)
21
+ self.word_embeddings = nn.Embedding(self.vocab_size, self.embedding_dim)
22
+
23
+ @property
24
+ def weight(self) -> torch.Tensor:
25
+ return self.word_embeddings.weight
26
+
27
+ def embedding(self, index: int) -> torch.Tensor:
28
+ return self.word_embeddings.weight[index : index + 1]
29
+
30
+ def forward(self, x: torch.Tensor):
31
+ x = self.word_embeddings(x)
32
+ x = self.dropout(x)
33
+ return x
34
+
35
+
36
+ class SinePositionalEmbedding(nn.Module):
37
+ def __init__(
38
+ self,
39
+ embedding_dim: int,
40
+ dropout: float = 0.0,
41
+ scale: bool = False,
42
+ alpha: bool = False,
43
+ ):
44
+ super().__init__()
45
+ self.embedding_dim = embedding_dim
46
+ self.x_scale = math.sqrt(embedding_dim) if scale else 1.0
47
+ self.alpha = nn.Parameter(torch.ones(1), requires_grad=alpha)
48
+ self.dropout = torch.nn.Dropout(p=dropout)
49
+
50
+ self.reverse = False
51
+ self.pe = None
52
+ self.extend_pe(torch.tensor(0.0).expand(1, 4000))
53
+
54
+ def extend_pe(self, x):
55
+ """Reset the positional encodings."""
56
+ if self.pe is not None:
57
+ if self.pe.size(1) >= x.size(1):
58
+ if self.pe.dtype != x.dtype or self.pe.device != x.device:
59
+ self.pe = self.pe.to(dtype=x.dtype, device=x.device)
60
+ return
61
+ pe = torch.zeros(x.size(1), self.embedding_dim)
62
+ if self.reverse:
63
+ position = torch.arange(
64
+ x.size(1) - 1, -1, -1.0, dtype=torch.float32
65
+ ).unsqueeze(1)
66
+ else:
67
+ position = torch.arange(0, x.size(1), dtype=torch.float32).unsqueeze(1)
68
+ div_term = torch.exp(
69
+ torch.arange(0, self.embedding_dim, 2, dtype=torch.float32)
70
+ * -(math.log(10000.0) / self.embedding_dim)
71
+ )
72
+ pe[:, 0::2] = torch.sin(position * div_term)
73
+ pe[:, 1::2] = torch.cos(position * div_term)
74
+ pe = pe.unsqueeze(0)
75
+ self.pe = pe.to(device=x.device, dtype=x.dtype).detach()
76
+
77
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
78
+ self.extend_pe(x)
79
+ output = x.unsqueeze(-1) if x.ndim == 2 else x
80
+ output = output * self.x_scale + self.alpha * self.pe[:, : x.size(1)]
81
+ return self.dropout(output)
GPT_SoVITS/AR/modules/embedding_onnx.py ADDED
@@ -0,0 +1,63 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # modified from https://github.com/lifeiteng/vall-e/blob/main/valle/modules/embedding.py
2
+ import math
3
+
4
+ import torch
5
+ from torch import nn
6
+
7
+
8
+ class TokenEmbedding(nn.Module):
9
+ def __init__(
10
+ self,
11
+ embedding_dim: int,
12
+ vocab_size: int,
13
+ dropout: float = 0.0,
14
+ ):
15
+ super().__init__()
16
+
17
+ self.vocab_size = vocab_size
18
+ self.embedding_dim = embedding_dim
19
+
20
+ self.dropout = torch.nn.Dropout(p=dropout)
21
+ self.word_embeddings = nn.Embedding(self.vocab_size, self.embedding_dim)
22
+
23
+ @property
24
+ def weight(self) -> torch.Tensor:
25
+ return self.word_embeddings.weight
26
+
27
+ def embedding(self, index: int) -> torch.Tensor:
28
+ return self.word_embeddings.weight[index : index + 1]
29
+
30
+ def forward(self, x: torch.Tensor):
31
+ x = self.word_embeddings(x)
32
+ x = self.dropout(x)
33
+ return x
34
+
35
+
36
+ class SinePositionalEmbedding(nn.Module):
37
+ def __init__(
38
+ self,
39
+ embedding_dim: int,
40
+ dropout: float = 0.0,
41
+ scale: bool = False,
42
+ alpha: bool = False,
43
+ ):
44
+ super().__init__()
45
+ self.embedding_dim = embedding_dim
46
+ self.x_scale = math.sqrt(embedding_dim) if scale else 1.0
47
+ self.alpha = nn.Parameter(torch.ones(1), requires_grad=alpha)
48
+ self.dropout = torch.nn.Dropout(p=dropout)
49
+ self.reverse = False
50
+ self.div_term = torch.exp(torch.arange(0, self.embedding_dim, 2) * -(math.log(10000.0) / self.embedding_dim))
51
+
52
+ def extend_pe(self, x):
53
+ position = torch.cumsum(torch.ones_like(x[:,:,0]), dim=1).transpose(0, 1)
54
+ scpe = (position * self.div_term).unsqueeze(0)
55
+ pe = torch.cat([torch.sin(scpe), torch.cos(scpe)]).permute(1, 2, 0)
56
+ pe = pe.contiguous().view(1, -1, self.embedding_dim)
57
+ return pe
58
+
59
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
60
+ pe = self.extend_pe(x)
61
+ output = x.unsqueeze(-1) if x.ndim == 2 else x
62
+ output = output * self.x_scale + self.alpha * pe
63
+ return self.dropout(output)
GPT_SoVITS/AR/modules/lr_schedulers.py ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # modified from https://github.com/yangdongchao/SoundStorm/blob/master/soundstorm/s1/AR/modules/lr_schedulers.py
2
+ # reference: https://github.com/lifeiteng/vall-e
3
+ import math
4
+
5
+ import torch
6
+ from matplotlib import pyplot as plt
7
+ from torch import nn
8
+ from torch.optim import Adam
9
+
10
+
11
+ class WarmupCosineLRSchedule(torch.optim.lr_scheduler._LRScheduler):
12
+ """
13
+ Implements Warmup learning rate schedule until 'warmup_steps', going from 'init_lr' to 'peak_lr' for multiple optimizers.
14
+ """
15
+
16
+ def __init__(
17
+ self,
18
+ optimizer,
19
+ init_lr,
20
+ peak_lr,
21
+ end_lr,
22
+ warmup_steps=10000,
23
+ total_steps=400000,
24
+ current_step=0,
25
+ ):
26
+ self.init_lr = init_lr
27
+ self.peak_lr = peak_lr
28
+ self.end_lr = end_lr
29
+ self.optimizer = optimizer
30
+ self._warmup_rate = (peak_lr - init_lr) / warmup_steps
31
+ self._decay_rate = (end_lr - peak_lr) / (total_steps - warmup_steps)
32
+ self._current_step = current_step
33
+ self.lr = init_lr
34
+ self.warmup_steps = warmup_steps
35
+ self.total_steps = total_steps
36
+ self._last_lr = [self.lr]
37
+
38
+ def set_lr(self, lr):
39
+ self._last_lr = [g["lr"] for g in self.optimizer.param_groups]
40
+ for g in self.optimizer.param_groups:
41
+ # g['lr'] = lr
42
+ g["lr"] = self.end_lr ###锁定用线性
43
+
44
+ def step(self):
45
+ if self._current_step < self.warmup_steps:
46
+ lr = self.init_lr + self._warmup_rate * self._current_step
47
+
48
+ elif self._current_step > self.total_steps:
49
+ lr = self.end_lr
50
+
51
+ else:
52
+ decay_ratio = (self._current_step - self.warmup_steps) / (
53
+ self.total_steps - self.warmup_steps
54
+ )
55
+ if decay_ratio < 0.0 or decay_ratio > 1.0:
56
+ raise RuntimeError(
57
+ "Decay ratio must be in [0.0, 1.0]. Fix LR scheduler settings."
58
+ )
59
+ coeff = 0.5 * (1.0 + math.cos(math.pi * decay_ratio))
60
+ lr = self.end_lr + coeff * (self.peak_lr - self.end_lr)
61
+
62
+ self.lr = lr = self.end_lr = 0.002 ###锁定用线性###不听话,直接锁定!
63
+ self.set_lr(lr)
64
+ self.lr = lr
65
+ self._current_step += 1
66
+ return self.lr
67
+
68
+
69
+ if __name__ == "__main__":
70
+ m = nn.Linear(10, 10)
71
+ opt = Adam(m.parameters(), lr=1e-4)
72
+ s = WarmupCosineLRSchedule(
73
+ opt, 1e-6, 2e-4, 1e-6, warmup_steps=2000, total_steps=20000, current_step=0
74
+ )
75
+ lrs = []
76
+ for i in range(25000):
77
+ s.step()
78
+ lrs.append(s.lr)
79
+ print(s.lr)
80
+
81
+ plt.plot(lrs)
82
+ plt.plot(range(0, 25000), lrs)
83
+ plt.show()
GPT_SoVITS/AR/modules/optim.py ADDED
@@ -0,0 +1,622 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 Xiaomi Corp. (authors: Daniel Povey)
2
+ #
3
+ # See ../LICENSE for clarification regarding multiple authors
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ import contextlib
17
+ import logging
18
+ from collections import defaultdict
19
+ from typing import List
20
+ from typing import Tuple
21
+
22
+ import torch
23
+ from torch import Tensor
24
+ from torch.optim import Optimizer
25
+
26
+
27
+ class BatchedOptimizer(Optimizer):
28
+ """
29
+ This class adds to class Optimizer the capability to optimize parameters in batches:
30
+ it will stack the parameters and their grads for you so the optimizer can work
31
+ on tensors with an extra leading dimension. This is intended for speed with GPUs,
32
+ as it reduces the number of kernels launched in the optimizer.
33
+
34
+ Args:
35
+ params:
36
+ """
37
+
38
+ def __init__(self, params, defaults):
39
+ super(BatchedOptimizer, self).__init__(params, defaults)
40
+
41
+ @contextlib.contextmanager
42
+ def batched_params(self, param_group, group_params_names):
43
+ """
44
+ This function returns (technically, yields) a list of
45
+ of tuples (p, state), where
46
+ p is a `fake` parameter that is stacked (over axis 0) from real parameters
47
+ that share the same shape, and its gradient is also stacked;
48
+ `state` is the state corresponding to this batch of parameters
49
+ (it will be physically located in the "state" for one of the real
50
+ parameters, the last one that has any particular shape and dtype).
51
+
52
+ This function is decorated as a context manager so that it can
53
+ write parameters back to their "real" locations.
54
+
55
+ The idea is, instead of doing:
56
+ <code>
57
+ for p in group["params"]:
58
+ state = self.state[p]
59
+ ...
60
+ </code>
61
+ you can do:
62
+ <code>
63
+ with self.batched_params(group["params"]) as batches:
64
+ for p, state, p_names in batches:
65
+ ...
66
+ </code>
67
+
68
+ Args:
69
+ group: a parameter group, which is a list of parameters; should be
70
+ one of self.param_groups.
71
+ group_params_names: name for each parameter in group,
72
+ which is List[str].
73
+ """
74
+ batches = defaultdict(
75
+ list
76
+ ) # `batches` maps from tuple (dtype_as_str,*shape) to list of nn.Parameter
77
+ batches_names = defaultdict(
78
+ list
79
+ ) # `batches` maps from tuple (dtype_as_str,*shape) to list of str
80
+
81
+ assert len(param_group) == len(group_params_names)
82
+ for p, named_p in zip(param_group, group_params_names):
83
+ key = (str(p.dtype), *p.shape)
84
+ batches[key].append(p)
85
+ batches_names[key].append(named_p)
86
+
87
+ batches_names_keys = list(batches_names.keys())
88
+ sorted_idx = sorted(
89
+ range(len(batches_names)), key=lambda i: batches_names_keys[i])
90
+ batches_names = [
91
+ batches_names[batches_names_keys[idx]] for idx in sorted_idx
92
+ ]
93
+ batches = [batches[batches_names_keys[idx]] for idx in sorted_idx]
94
+
95
+ stacked_params_dict = dict()
96
+
97
+ # turn batches into a list, in deterministic order.
98
+ # tuples will contain tuples of (stacked_param, state, stacked_params_names),
99
+ # one for each batch in `batches`.
100
+ tuples = []
101
+
102
+ for batch, batch_names in zip(batches, batches_names):
103
+ p = batch[0]
104
+ # we arbitrarily store the state in the
105
+ # state corresponding to the 1st parameter in the
106
+ # group. class Optimizer will take care of saving/loading state.
107
+ state = self.state[p]
108
+ p_stacked = torch.stack(batch)
109
+ grad = torch.stack([
110
+ torch.zeros_like(p) if p.grad is None else p.grad for p in batch
111
+ ])
112
+ p_stacked.grad = grad
113
+ stacked_params_dict[key] = p_stacked
114
+ tuples.append((p_stacked, state, batch_names))
115
+
116
+ yield tuples # <-- calling code will do the actual optimization here!
117
+
118
+ for ((stacked_params, _state, _names), batch) in zip(tuples, batches):
119
+ for i, p in enumerate(batch): # batch is list of Parameter
120
+ p.copy_(stacked_params[i])
121
+
122
+
123
+ class ScaledAdam(BatchedOptimizer):
124
+ """
125
+ Implements 'Scaled Adam', a variant of Adam where we scale each parameter's update
126
+ proportional to the norm of that parameter; and also learn the scale of the parameter,
127
+ in log space, subject to upper and lower limits (as if we had factored each parameter as
128
+ param = underlying_param * log_scale.exp())
129
+
130
+
131
+ Args:
132
+ params: The parameters or param_groups to optimize (like other Optimizer subclasses)
133
+ lr: The learning rate. We will typically use a learning rate schedule that starts
134
+ at 0.03 and decreases over time, i.e. much higher than other common
135
+ optimizers.
136
+ clipping_scale: (e.g. 2.0)
137
+ A scale for gradient-clipping: if specified, the normalized gradients
138
+ over the whole model will be clipped to have 2-norm equal to
139
+ `clipping_scale` times the median 2-norm over the most recent period
140
+ of `clipping_update_period` minibatches. By "normalized gradients",
141
+ we mean after multiplying by the rms parameter value for this tensor
142
+ [for non-scalars]; this is appropriate because our update is scaled
143
+ by this quantity.
144
+ betas: beta1,beta2 are momentum constants for regular momentum, and moving sum-sq grad.
145
+ Must satisfy 0 < beta <= beta2 < 1.
146
+ scalar_lr_scale: A scaling factor on the learning rate, that we use to update the
147
+ scale of each parameter tensor and scalar parameters of the mode..
148
+ If each parameter were decomposed
149
+ as p * p_scale.exp(), where (p**2).mean().sqrt() == 1.0, scalar_lr_scale
150
+ would be a the scaling factor on the learning rate of p_scale.
151
+ eps: A general-purpose epsilon to prevent division by zero
152
+ param_min_rms: Minimum root-mean-square value of parameter tensor, for purposes of
153
+ learning the scale on the parameters (we'll constrain the rms of each non-scalar
154
+ parameter tensor to be >= this value)
155
+ param_max_rms: Maximum root-mean-square value of parameter tensor, for purposes of
156
+ learning the scale on the parameters (we'll constrain the rms of each non-scalar
157
+ parameter tensor to be <= this value)
158
+ scalar_max: Maximum absolute value for scalar parameters (applicable if your
159
+ model has any parameters with numel() == 1).
160
+ size_update_period: The periodicity, in steps, with which we update the size (scale)
161
+ of the parameter tensor. This is provided to save a little time
162
+ in the update.
163
+ clipping_update_period: if clipping_scale is specified, this is the period
164
+ """
165
+
166
+ def __init__(
167
+ self,
168
+ params,
169
+ lr=3e-02,
170
+ clipping_scale=None,
171
+ betas=(0.9, 0.98),
172
+ scalar_lr_scale=0.1,
173
+ eps=1.0e-08,
174
+ param_min_rms=1.0e-05,
175
+ param_max_rms=3.0,
176
+ scalar_max=10.0,
177
+ size_update_period=4,
178
+ clipping_update_period=100,
179
+ parameters_names=None,
180
+ show_dominant_parameters=True, ):
181
+
182
+ assert parameters_names is not None, (
183
+ "Please prepare parameters_names,"
184
+ "which is a List[List[str]]. Each List[str] is for a group"
185
+ "and each str is for a parameter")
186
+ defaults = dict(
187
+ lr=lr,
188
+ clipping_scale=clipping_scale,
189
+ betas=betas,
190
+ scalar_lr_scale=scalar_lr_scale,
191
+ eps=eps,
192
+ param_min_rms=param_min_rms,
193
+ param_max_rms=param_max_rms,
194
+ scalar_max=scalar_max,
195
+ size_update_period=size_update_period,
196
+ clipping_update_period=clipping_update_period, )
197
+
198
+ super(ScaledAdam, self).__init__(params, defaults)
199
+ assert len(self.param_groups) == len(parameters_names)
200
+ self.parameters_names = parameters_names
201
+ self.show_dominant_parameters = show_dominant_parameters
202
+
203
+ def __setstate__(self, state):
204
+ super(ScaledAdam, self).__setstate__(state)
205
+
206
+ @torch.no_grad()
207
+ def step(self, closure=None):
208
+ """Performs a single optimization step.
209
+
210
+ Arguments:
211
+ closure (callable, optional): A closure that reevaluates the model
212
+ and returns the loss.
213
+ """
214
+ loss = None
215
+ if closure is not None:
216
+ with torch.enable_grad():
217
+ loss = closure()
218
+
219
+ batch = True
220
+
221
+ for group, group_params_names in zip(self.param_groups,
222
+ self.parameters_names):
223
+
224
+ with self.batched_params(group["params"],
225
+ group_params_names) as batches:
226
+
227
+ # batches is list of pairs (stacked_param, state). stacked_param is like
228
+ # a regular parameter, and will have a .grad, but the 1st dim corresponds to
229
+ # a stacking dim, it is not a real dim.
230
+
231
+ if (len(batches[0][1]) ==
232
+ 0): # if len(first state) == 0: not yet initialized
233
+ clipping_scale = 1
234
+ else:
235
+ clipping_scale = self._get_clipping_scale(group, batches)
236
+
237
+ for p, state, _ in batches:
238
+ # Perform optimization step.
239
+ # grad is not going to be None, we handled that when creating the batches.
240
+ grad = p.grad
241
+ if grad.is_sparse:
242
+ raise RuntimeError(
243
+ "ScaledAdam optimizer does not support sparse gradients"
244
+ )
245
+ # State initialization
246
+ if len(state) == 0:
247
+ self._init_state(group, p, state)
248
+
249
+ self._step_one_batch(group, p, state, clipping_scale)
250
+
251
+ return loss
252
+
253
+ def _init_state(self, group: dict, p: Tensor, state: dict):
254
+ """
255
+ Initializes state dict for parameter 'p'. Assumes that dim 0 of tensor p
256
+ is actually the batch dimension, corresponding to batched-together
257
+ parameters of a given shape.
258
+
259
+
260
+ Args:
261
+ group: Dict to look up configuration values.
262
+ p: The parameter that we are initializing the state for
263
+ state: Dict from string to whatever state we are initializing
264
+ """
265
+ size_update_period = group["size_update_period"]
266
+
267
+ state["step"] = 0
268
+
269
+ kwargs = {"device": p.device, "dtype": p.dtype}
270
+
271
+ # 'delta' implements conventional momentum. There are
272
+ # several different kinds of update going on, so rather than
273
+ # compute "exp_avg" like in Adam, we store and decay a
274
+ # parameter-change "delta", which combines all forms of
275
+ # update. this is equivalent to how it's done in Adam,
276
+ # except for the first few steps.
277
+ state["delta"] = torch.zeros_like(
278
+ p, memory_format=torch.preserve_format)
279
+
280
+ batch_size = p.shape[0]
281
+ numel = p.numel() // batch_size
282
+ numel = p.numel()
283
+
284
+ if numel > 1:
285
+ # "param_rms" just periodically records the scalar root-mean-square value of
286
+ # the parameter tensor.
287
+ # it has a shape like (batch_size, 1, 1, 1, 1)
288
+ param_rms = (
289
+ (p**2).mean(dim=list(range(1, p.ndim)), keepdim=True).sqrt())
290
+ state["param_rms"] = param_rms
291
+
292
+ state["scale_exp_avg_sq"] = torch.zeros_like(param_rms)
293
+ state["scale_grads"] = torch.zeros(size_update_period,
294
+ *param_rms.shape, **kwargs)
295
+
296
+ # exp_avg_sq is the weighted sum of scaled gradients. as in Adam.
297
+ state["exp_avg_sq"] = torch.zeros_like(
298
+ p, memory_format=torch.preserve_format)
299
+
300
+ def _get_clipping_scale(self,
301
+ group: dict,
302
+ tuples: List[Tuple[Tensor, dict, List[str]]]
303
+ ) -> float:
304
+ """
305
+ Returns a scalar factor <= 1.0 that dictates gradient clipping, i.e. we will scale the gradients
306
+ by this amount before applying the rest of the update.
307
+
308
+ Args:
309
+ group: the parameter group, an item in self.param_groups
310
+ tuples: a list of tuples of (param, state, param_names)
311
+ where param is a batched set of parameters,
312
+ with a .grad (1st dim is batch dim)
313
+ and state is the state-dict where optimization parameters are kept.
314
+ param_names is a List[str] while each str is name for a parameter
315
+ in batched set of parameters "param".
316
+ """
317
+ assert len(tuples) >= 1
318
+ clipping_scale = group["clipping_scale"]
319
+ (first_p, first_state, _) = tuples[0]
320
+ step = first_state["step"]
321
+ if clipping_scale is None or step == 0:
322
+ # no clipping. return early on step == 0 because the other
323
+ # parameters' state won't have been initialized yet.
324
+ return 1.0
325
+ clipping_update_period = group["clipping_update_period"]
326
+
327
+ tot_sumsq = torch.tensor(0.0, device=first_p.device)
328
+ for (p, state, param_names) in tuples:
329
+ grad = p.grad
330
+ if grad.is_sparse:
331
+ raise RuntimeError(
332
+ "ScaledAdam optimizer does not support sparse gradients")
333
+ if p.numel() == p.shape[0]: # a batch of scalars
334
+ tot_sumsq += (grad**2).sum() # sum() to change shape [1] to []
335
+ else:
336
+ tot_sumsq += ((grad * state["param_rms"])**2).sum()
337
+
338
+ tot_norm = tot_sumsq.sqrt()
339
+ if "model_norms" not in first_state:
340
+ first_state["model_norms"] = torch.zeros(
341
+ clipping_update_period, device=p.device)
342
+ first_state["model_norms"][step % clipping_update_period] = tot_norm
343
+
344
+ if step % clipping_update_period == 0:
345
+ # Print some stats.
346
+ # We don't reach here if step == 0 because we would have returned
347
+ # above.
348
+ sorted_norms = first_state["model_norms"].sort()[0].to("cpu")
349
+ quartiles = []
350
+ for n in range(0, 5):
351
+ index = min(
352
+ clipping_update_period - 1,
353
+ (clipping_update_period // 4) * n, )
354
+ quartiles.append(sorted_norms[index].item())
355
+
356
+ median = quartiles[2]
357
+ threshold = clipping_scale * median
358
+ first_state["model_norm_threshold"] = threshold
359
+ percent_clipped = (first_state["num_clipped"] * 100.0 /
360
+ clipping_update_period
361
+ if "num_clipped" in first_state else 0.0)
362
+ first_state["num_clipped"] = 0
363
+ quartiles = " ".join(["%.3e" % x for x in quartiles])
364
+ logging.info(
365
+ f"Clipping_scale={clipping_scale}, grad-norm quartiles {quartiles}, "
366
+ f"threshold={threshold:.3e}, percent-clipped={percent_clipped:.1f}"
367
+ )
368
+
369
+ if step < clipping_update_period:
370
+ return 1.0 # We have not yet estimated a norm to clip to.
371
+ else:
372
+ try:
373
+ model_norm_threshold = first_state["model_norm_threshold"]
374
+ except KeyError:
375
+ logging.info(
376
+ "Warning: model_norm_threshold not in state: possibly "
377
+ "you changed config when restarting, adding clipping_scale option?"
378
+ )
379
+ return 1.0
380
+ ans = min(1.0, (model_norm_threshold / (tot_norm + 1.0e-20)).item())
381
+ if ans < 1.0:
382
+ first_state["num_clipped"] += 1
383
+ if ans < 0.1:
384
+ logging.warn(
385
+ f"Scaling gradients by {ans}, model_norm_threshold={model_norm_threshold}"
386
+ )
387
+ if self.show_dominant_parameters:
388
+ assert p.shape[0] == len(param_names)
389
+ self._show_gradient_dominating_parameter(tuples, tot_sumsq)
390
+ return ans
391
+
392
+ def _show_gradient_dominating_parameter(
393
+ self, tuples: List[Tuple[Tensor, dict, List[str]]],
394
+ tot_sumsq: Tensor):
395
+ """
396
+ Show information of parameter wihch dominanting tot_sumsq.
397
+
398
+ Args:
399
+ tuples: a list of tuples of (param, state, param_names)
400
+ where param is a batched set of parameters,
401
+ with a .grad (1st dim is batch dim)
402
+ and state is the state-dict where optimization parameters are kept.
403
+ param_names is a List[str] while each str is name for a parameter
404
+ in batched set of parameters "param".
405
+ tot_sumsq: sumsq of all parameters. Though it's could be calculated
406
+ from tuples, we still pass it to save some time.
407
+ """
408
+ all_sumsq_orig = {}
409
+ for (p, state, batch_param_names) in tuples:
410
+ # p is a stacked batch parameters.
411
+ batch_grad = p.grad
412
+ if p.numel() == p.shape[0]: # a batch of scalars
413
+ batch_sumsq_orig = batch_grad**2
414
+ # Dummpy values used by following `zip` statement.
415
+ batch_rms_orig = torch.ones(p.shape[0])
416
+ else:
417
+ batch_rms_orig = state["param_rms"]
418
+ batch_sumsq_orig = ((batch_grad * batch_rms_orig)**2).sum(
419
+ dim=list(range(1, batch_grad.ndim)))
420
+
421
+ for name, sumsq_orig, rms, grad in zip(batch_param_names,
422
+ batch_sumsq_orig,
423
+ batch_rms_orig, batch_grad):
424
+
425
+ proportion_orig = sumsq_orig / tot_sumsq
426
+ all_sumsq_orig[name] = (proportion_orig, sumsq_orig, rms, grad)
427
+
428
+ assert torch.isclose(
429
+ sum([value[0] for value in all_sumsq_orig.values()]).cpu(),
430
+ torch.tensor(1.0), )
431
+ sorted_by_proportion = {
432
+ k: v
433
+ for k, v in sorted(
434
+ all_sumsq_orig.items(),
435
+ key=lambda item: item[1][0],
436
+ reverse=True, )
437
+ }
438
+ dominant_param_name = next(iter(sorted_by_proportion))
439
+ (dominant_proportion, dominant_sumsq, dominant_rms,
440
+ dominant_grad, ) = sorted_by_proportion[dominant_param_name]
441
+ logging.info(f"Parameter Dominanting tot_sumsq {dominant_param_name}"
442
+ f" with proportion {dominant_proportion:.2f},"
443
+ f" where dominant_sumsq=(grad_sumsq*orig_rms_sq)"
444
+ f"={dominant_sumsq:.3e},"
445
+ f" grad_sumsq = {(dominant_grad**2).sum():.3e},"
446
+ f" orig_rms_sq={(dominant_rms**2).item():.3e}")
447
+
448
+ def _step_one_batch(self,
449
+ group: dict,
450
+ p: Tensor,
451
+ state: dict,
452
+ clipping_scale: float):
453
+ """
454
+ Do the step for one parameter, which is actually going to be a batch of
455
+ `real` parameters, with dim 0 as the batch dim.
456
+ Args:
457
+ group: dict to look up configuration values
458
+ p: parameter to update (actually multiple parameters stacked together
459
+ as a batch)
460
+ state: state-dict for p, to look up the optimizer state
461
+ """
462
+ lr = group["lr"]
463
+ size_update_period = group["size_update_period"]
464
+ beta1 = group["betas"][0]
465
+
466
+ grad = p.grad
467
+ if clipping_scale != 1.0:
468
+ grad = grad * clipping_scale
469
+ step = state["step"]
470
+ delta = state["delta"]
471
+
472
+ delta.mul_(beta1)
473
+ batch_size = p.shape[0]
474
+ numel = p.numel() // batch_size
475
+ if numel > 1:
476
+ # Update the size/scale of p, and set param_rms
477
+ scale_grads = state["scale_grads"]
478
+ scale_grads[step % size_update_period] = (p * grad).sum(
479
+ dim=list(range(1, p.ndim)), keepdim=True)
480
+ if step % size_update_period == size_update_period - 1:
481
+ param_rms = state["param_rms"] # shape: (batch_size, 1, 1, ..)
482
+ param_rms.copy_((p**2)
483
+ .mean(dim=list(range(1, p.ndim)), keepdim=True)
484
+ .sqrt())
485
+ if step > 0:
486
+ # self._size_update() learns the overall scale on the
487
+ # parameter, by shrinking or expanding it.
488
+ self._size_update(group, scale_grads, p, state)
489
+
490
+ if numel == 1:
491
+ # For parameters with 1 element we just use regular Adam.
492
+ # Updates delta.
493
+ self._step_scalar(group, p, state)
494
+ else:
495
+ self._step(group, p, state)
496
+
497
+ state["step"] = step + 1
498
+
499
+ def _size_update(self,
500
+ group: dict,
501
+ scale_grads: Tensor,
502
+ p: Tensor,
503
+ state: dict) -> None:
504
+ """
505
+ Called only where p.numel() > 1, this updates the scale of the parameter.
506
+ If we imagine: p = underlying_param * scale.exp(), and we are doing
507
+ gradient descent on underlying param and on scale, this function does the update
508
+ on `scale`.
509
+
510
+ Args:
511
+ group: dict to look up configuration values
512
+ scale_grads: a tensor of shape (size_update_period, batch_size, 1, 1,...) containing
513
+ grads w.r.t. the scales.
514
+ p: The parameter to update
515
+ state: The state-dict of p
516
+ """
517
+
518
+ param_rms = state["param_rms"]
519
+ beta1, beta2 = group["betas"]
520
+ size_lr = group["lr"] * group["scalar_lr_scale"]
521
+ param_min_rms = group["param_min_rms"]
522
+ param_max_rms = group["param_max_rms"]
523
+ eps = group["eps"]
524
+ step = state["step"]
525
+ batch_size = p.shape[0]
526
+
527
+ size_update_period = scale_grads.shape[0]
528
+ # correct beta2 for the size update period: we will have
529
+ # faster decay at this level.
530
+ beta2_corr = beta2**size_update_period
531
+
532
+ scale_exp_avg_sq = state[
533
+ "scale_exp_avg_sq"] # shape: (batch_size, 1, 1, ..)
534
+ scale_exp_avg_sq.mul_(beta2_corr).add_(
535
+ (scale_grads**2).mean(dim=0), # mean over dim `size_update_period`
536
+ alpha=1 - beta2_corr, ) # shape is (batch_size, 1, 1, ...)
537
+
538
+ # The 1st time we reach here is when size_step == 1.
539
+ size_step = (step + 1) // size_update_period
540
+ bias_correction2 = 1 - beta2_corr**size_step
541
+ # we don't bother with bias_correction1; this will help prevent divergence
542
+ # at the start of training.
543
+
544
+ denom = scale_exp_avg_sq.sqrt() + eps
545
+
546
+ scale_step = (-size_lr * (bias_correction2**0.5) *
547
+ scale_grads.sum(dim=0) / denom)
548
+
549
+ is_too_small = param_rms < param_min_rms
550
+ is_too_large = param_rms > param_max_rms
551
+
552
+ # when the param gets too small, just don't shrink it any further.
553
+ scale_step.masked_fill_(is_too_small, 0.0)
554
+ # when it gets too large, stop it from getting any larger.
555
+ scale_step.masked_fill_(is_too_large, -size_lr * size_update_period)
556
+ delta = state["delta"]
557
+ # the factor of (1-beta1) relates to momentum.
558
+ delta.add_(p * scale_step, alpha=(1 - beta1))
559
+
560
+ def _step(self, group: dict, p: Tensor, state: dict):
561
+ """
562
+ This function does the core update of self.step(), in the case where the members of
563
+ the batch have more than 1 element.
564
+
565
+ Args:
566
+ group: A dict which will be used to look up configuration values
567
+ p: The parameter to be updated
568
+ grad: The grad of p
569
+ state: The state-dict corresponding to parameter p
570
+
571
+ This function modifies p.
572
+ """
573
+ grad = p.grad
574
+ lr = group["lr"]
575
+ beta1, beta2 = group["betas"]
576
+ eps = group["eps"]
577
+ param_min_rms = group["param_min_rms"]
578
+ step = state["step"]
579
+
580
+ exp_avg_sq = state["exp_avg_sq"]
581
+ exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=(1 - beta2))
582
+
583
+ this_step = state["step"] - (state["zero_step"]
584
+ if "zero_step" in state else 0)
585
+ bias_correction2 = 1 - beta2**(this_step + 1)
586
+ if bias_correction2 < 0.99:
587
+ # note: not in-place.
588
+ exp_avg_sq = exp_avg_sq * (1.0 / bias_correction2)
589
+
590
+ denom = exp_avg_sq.sqrt()
591
+ denom += eps
592
+ grad = grad / denom
593
+
594
+ alpha = -lr * (1 - beta1) * state["param_rms"].clamp(min=param_min_rms)
595
+
596
+ delta = state["delta"]
597
+ delta.add_(grad * alpha)
598
+ p.add_(delta)
599
+
600
+ def _step_scalar(self, group: dict, p: Tensor, state: dict):
601
+ """
602
+ A simplified form of the core update for scalar tensors, where we cannot get a good
603
+ estimate of the parameter rms.
604
+ """
605
+ beta1, beta2 = group["betas"]
606
+ scalar_max = group["scalar_max"]
607
+ eps = group["eps"]
608
+ lr = group["lr"] * group["scalar_lr_scale"]
609
+ grad = p.grad
610
+
611
+ exp_avg_sq = state["exp_avg_sq"] # shape: (batch_size,)
612
+ exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1 - beta2)
613
+
614
+ # bias_correction2 is like in Adam. Don't bother with bias_correction1;
615
+ # slower update at the start will help stability anyway.
616
+ bias_correction2 = 1 - beta2**(state["step"] + 1)
617
+ denom = (exp_avg_sq / bias_correction2).sqrt() + eps
618
+
619
+ delta = state["delta"]
620
+ delta.add_(grad / denom, alpha=-lr * (1 - beta1))
621
+ p.clamp_(min=-scalar_max, max=scalar_max)
622
+ p.add_(delta)
GPT_SoVITS/AR/modules/patched_mha_with_cache.py ADDED
@@ -0,0 +1,465 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from torch.nn.functional import *
2
+ from torch.nn.functional import (
3
+ _mha_shape_check,
4
+ _canonical_mask,
5
+ _none_or_dtype,
6
+ _in_projection_packed,
7
+ )
8
+ from torch.nn import functional as F
9
+ import torch
10
+ # Tensor = torch.Tensor
11
+ # from typing import Callable, List, Optional, Tuple, Union
12
+
13
+
14
+ def multi_head_attention_forward_patched(
15
+ query: Tensor,
16
+ key: Tensor,
17
+ value: Tensor,
18
+ embed_dim_to_check: int,
19
+ num_heads: int,
20
+ in_proj_weight: Optional[Tensor],
21
+ in_proj_bias: Optional[Tensor],
22
+ bias_k: Optional[Tensor],
23
+ bias_v: Optional[Tensor],
24
+ add_zero_attn: bool,
25
+ dropout_p: float,
26
+ out_proj_weight: Tensor,
27
+ out_proj_bias: Optional[Tensor],
28
+ training: bool = True,
29
+ key_padding_mask: Optional[Tensor] = None,
30
+ need_weights: bool = True,
31
+ attn_mask: Optional[Tensor] = None,
32
+ use_separate_proj_weight: bool = False,
33
+ q_proj_weight: Optional[Tensor] = None,
34
+ k_proj_weight: Optional[Tensor] = None,
35
+ v_proj_weight: Optional[Tensor] = None,
36
+ static_k: Optional[Tensor] = None,
37
+ static_v: Optional[Tensor] = None,
38
+ average_attn_weights: bool = True,
39
+ is_causal: bool = False,
40
+ cache=None,
41
+ ) -> Tuple[Tensor, Optional[Tensor]]:
42
+ r"""
43
+ Args:
44
+ query, key, value: map a query and a set of key-value pairs to an output.
45
+ See "Attention Is All You Need" for more details.
46
+ embed_dim_to_check: total dimension of the model.
47
+ num_heads: parallel attention heads.
48
+ in_proj_weight, in_proj_bias: input projection weight and bias.
49
+ bias_k, bias_v: bias of the key and value sequences to be added at dim=0.
50
+ add_zero_attn: add a new batch of zeros to the key and
51
+ value sequences at dim=1.
52
+ dropout_p: probability of an element to be zeroed.
53
+ out_proj_weight, out_proj_bias: the output projection weight and bias.
54
+ training: apply dropout if is ``True``.
55
+ key_padding_mask: if provided, specified padding elements in the key will
56
+ be ignored by the attention. This is an binary mask. When the value is True,
57
+ the corresponding value on the attention layer will be filled with -inf.
58
+ need_weights: output attn_output_weights.
59
+ Default: `True`
60
+ Note: `needs_weight` defaults to `True`, but should be set to `False`
61
+ For best performance when attention weights are not nedeeded.
62
+ *Setting needs_weights to `True`
63
+ leads to a significant performance degradation.*
64
+ attn_mask: 2D or 3D mask that prevents attention to certain positions. A 2D mask will be broadcasted for all
65
+ the batches while a 3D mask allows to specify a different mask for the entries of each batch.
66
+ is_causal: If specified, applies a causal mask as attention mask, and ignores
67
+ attn_mask for computing scaled dot product attention.
68
+ Default: ``False``.
69
+ .. warning::
70
+ is_causal is provides a hint that the attn_mask is the
71
+ causal mask.Providing incorrect hints can result in
72
+ incorrect execution, including forward and backward
73
+ compatibility.
74
+ use_separate_proj_weight: the function accept the proj. weights for query, key,
75
+ and value in different forms. If false, in_proj_weight will be used, which is
76
+ a combination of q_proj_weight, k_proj_weight, v_proj_weight.
77
+ q_proj_weight, k_proj_weight, v_proj_weight, in_proj_bias: input projection weight and bias.
78
+ static_k, static_v: static key and value used for attention operators.
79
+ average_attn_weights: If true, indicates that the returned ``attn_weights`` should be averaged across heads.
80
+ Otherwise, ``attn_weights`` are provided separately per head. Note that this flag only has an effect
81
+ when ``need_weights=True.``. Default: True
82
+
83
+
84
+ Shape:
85
+ Inputs:
86
+ - query: :math:`(L, E)` or :math:`(L, N, E)` where L is the target sequence length, N is the batch size, E is
87
+ the embedding dimension.
88
+ - key: :math:`(S, E)` or :math:`(S, N, E)`, where S is the source sequence length, N is the batch size, E is
89
+ the embedding dimension.
90
+ - value: :math:`(S, E)` or :math:`(S, N, E)` where S is the source sequence length, N is the batch size, E is
91
+ the embedding dimension.
92
+ - key_padding_mask: :math:`(S)` or :math:`(N, S)` where N is the batch size, S is the source sequence length.
93
+ If a FloatTensor is provided, it will be directly added to the value.
94
+ If a BoolTensor is provided, the positions with the
95
+ value of ``True`` will be ignored while the position with the value of ``False`` will be unchanged.
96
+ - attn_mask: 2D mask :math:`(L, S)` where L is the target sequence length, S is the source sequence length.
97
+ 3D mask :math:`(N*num_heads, L, S)` where N is the batch size, L is the target sequence length,
98
+ S is the source sequence length. attn_mask ensures that position i is allowed to attend the unmasked
99
+ positions. If a BoolTensor is provided, positions with ``True``
100
+ are not allowed to attend while ``False`` values will be unchanged. If a FloatTensor
101
+ is provided, it will be added to the attention weight.
102
+ - static_k: :math:`(N*num_heads, S, E/num_heads)`, where S is the source sequence length,
103
+ N is the batch size, E is the embedding dimension. E/num_heads is the head dimension.
104
+ - static_v: :math:`(N*num_heads, S, E/num_heads)`, where S is the source sequence length,
105
+ N is the batch size, E is the embedding dimension. E/num_heads is the head dimension.
106
+
107
+ Outputs:
108
+ - attn_output: :math:`(L, E)` or :math:`(L, N, E)` where L is the target sequence length, N is the batch size,
109
+ E is the embedding dimension.
110
+ - attn_output_weights: Only returned when ``need_weights=True``. If ``average_attn_weights=True``, returns
111
+ attention weights averaged across heads of shape :math:`(L, S)` when input is unbatched or
112
+ :math:`(N, L, S)`, where :math:`N` is the batch size, :math:`L` is the target sequence length, and
113
+ :math:`S` is the source sequence length. If ``average_attn_weights=False``, returns attention weights per
114
+ head of shape :math:`(num_heads, L, S)` when input is unbatched or :math:`(N, num_heads, L, S)`.
115
+ """
116
+ tens_ops = (
117
+ query,
118
+ key,
119
+ value,
120
+ in_proj_weight,
121
+ in_proj_bias,
122
+ bias_k,
123
+ bias_v,
124
+ out_proj_weight,
125
+ out_proj_bias,
126
+ )
127
+ if has_torch_function(tens_ops):
128
+ return handle_torch_function(
129
+ multi_head_attention_forward,
130
+ tens_ops,
131
+ query,
132
+ key,
133
+ value,
134
+ embed_dim_to_check,
135
+ num_heads,
136
+ in_proj_weight,
137
+ in_proj_bias,
138
+ bias_k,
139
+ bias_v,
140
+ add_zero_attn,
141
+ dropout_p,
142
+ out_proj_weight,
143
+ out_proj_bias,
144
+ training=training,
145
+ key_padding_mask=key_padding_mask,
146
+ need_weights=need_weights,
147
+ attn_mask=attn_mask,
148
+ is_causal=is_causal,
149
+ use_separate_proj_weight=use_separate_proj_weight,
150
+ q_proj_weight=q_proj_weight,
151
+ k_proj_weight=k_proj_weight,
152
+ v_proj_weight=v_proj_weight,
153
+ static_k=static_k,
154
+ static_v=static_v,
155
+ average_attn_weights=average_attn_weights,
156
+ cache=cache,
157
+ )
158
+
159
+ is_batched = _mha_shape_check(
160
+ query, key, value, key_padding_mask, attn_mask, num_heads
161
+ )
162
+
163
+ # For unbatched input, we unsqueeze at the expected batch-dim to pretend that the input
164
+ # is batched, run the computation and before returning squeeze the
165
+ # batch dimension so that the output doesn't carry this temporary batch dimension.
166
+ if not is_batched:
167
+ # unsqueeze if the input is unbatched
168
+ query = query.unsqueeze(1)
169
+ key = key.unsqueeze(1)
170
+ value = value.unsqueeze(1)
171
+ if key_padding_mask is not None:
172
+ key_padding_mask = key_padding_mask.unsqueeze(0)
173
+
174
+ # set up shape vars
175
+ tgt_len, bsz, embed_dim = query.shape
176
+ src_len, _, _ = key.shape
177
+
178
+ key_padding_mask = _canonical_mask(
179
+ mask=key_padding_mask,
180
+ mask_name="key_padding_mask",
181
+ other_type=_none_or_dtype(attn_mask),
182
+ other_name="attn_mask",
183
+ target_type=query.dtype,
184
+ )
185
+
186
+ if is_causal and attn_mask is None:
187
+ raise RuntimeError(
188
+ "Need attn_mask if specifying the is_causal hint. "
189
+ "You may use the Transformer module method "
190
+ "`generate_square_subsequent_mask` to create this mask."
191
+ )
192
+
193
+ if is_causal and key_padding_mask is None and not need_weights:
194
+ # when we have a kpm or need weights, we need attn_mask
195
+ # Otherwise, we use the is_causal hint go as is_causal
196
+ # indicator to SDPA.
197
+ attn_mask = None
198
+ else:
199
+ attn_mask = _canonical_mask(
200
+ mask=attn_mask,
201
+ mask_name="attn_mask",
202
+ other_type=None,
203
+ other_name="",
204
+ target_type=query.dtype,
205
+ check_other=False,
206
+ )
207
+
208
+ if key_padding_mask is not None:
209
+ # We have the attn_mask, and use that to merge kpm into it.
210
+ # Turn off use of is_causal hint, as the merged mask is no
211
+ # longer causal.
212
+ is_causal = False
213
+
214
+ assert (
215
+ embed_dim == embed_dim_to_check
216
+ ), f"was expecting embedding dimension of {embed_dim_to_check}, but got {embed_dim}"
217
+ if isinstance(embed_dim, torch.Tensor):
218
+ # embed_dim can be a tensor when JIT tracing
219
+ head_dim = embed_dim.div(num_heads, rounding_mode="trunc")
220
+ else:
221
+ head_dim = embed_dim // num_heads
222
+ assert (
223
+ head_dim * num_heads == embed_dim
224
+ ), f"embed_dim {embed_dim} not divisible by num_heads {num_heads}"
225
+ if use_separate_proj_weight:
226
+ # allow MHA to have different embedding dimensions when separate projection weights are used
227
+ assert (
228
+ key.shape[:2] == value.shape[:2]
229
+ ), f"key's sequence and batch dims {key.shape[:2]} do not match value's {value.shape[:2]}"
230
+ else:
231
+ assert (
232
+ key.shape == value.shape
233
+ ), f"key shape {key.shape} does not match value shape {value.shape}"
234
+
235
+ #
236
+ # compute in-projection
237
+ #
238
+ if not use_separate_proj_weight:
239
+ assert (
240
+ in_proj_weight is not None
241
+ ), "use_separate_proj_weight is False but in_proj_weight is None"
242
+ q, k, v = _in_projection_packed(query, key, value, in_proj_weight, in_proj_bias)
243
+ else:
244
+ assert (
245
+ q_proj_weight is not None
246
+ ), "use_separate_proj_weight is True but q_proj_weight is None"
247
+ assert (
248
+ k_proj_weight is not None
249
+ ), "use_separate_proj_weight is True but k_proj_weight is None"
250
+ assert (
251
+ v_proj_weight is not None
252
+ ), "use_separate_proj_weight is True but v_proj_weight is None"
253
+ if in_proj_bias is None:
254
+ b_q = b_k = b_v = None
255
+ else:
256
+ b_q, b_k, b_v = in_proj_bias.chunk(3)
257
+ q, k, v = _in_projection(
258
+ query,
259
+ key,
260
+ value,
261
+ q_proj_weight,
262
+ k_proj_weight,
263
+ v_proj_weight,
264
+ b_q,
265
+ b_k,
266
+ b_v,
267
+ )
268
+ if cache != None:
269
+ if cache["first_infer"] == 1:
270
+ cache["k"][cache["stage"]] = k
271
+ # print(0,cache["k"].shape)
272
+ cache["v"][cache["stage"]] = v
273
+ else: ###12个layer每个都要留自己的cache_kv
274
+ # print(1,cache["k"].shape)
275
+ cache["k"][cache["stage"]] = torch.cat(
276
+ [cache["k"][cache["stage"]], k], 0
277
+ ) ##本来时序是1,但是proj的时候可能transpose了所以时序到0维了
278
+ cache["v"][cache["stage"]] = torch.cat([cache["v"][cache["stage"]], v], 0)
279
+ # print(2, cache["k"].shape)
280
+ src_len = cache["k"][cache["stage"]].shape[0]
281
+ k = cache["k"][cache["stage"]]
282
+ v = cache["v"][cache["stage"]]
283
+ # if attn_mask is not None:
284
+ # attn_mask=attn_mask[-1:,]
285
+ # print(attn_mask.shape,attn_mask)
286
+ cache["stage"] = (cache["stage"] + 1) % cache["all_stage"]
287
+ # print(2333,cache)
288
+ # prep attention mask
289
+
290
+ attn_mask = _canonical_mask(
291
+ mask=attn_mask,
292
+ mask_name="attn_mask",
293
+ other_type=None,
294
+ other_name="",
295
+ target_type=q.dtype,
296
+ check_other=False,
297
+ )
298
+
299
+ if attn_mask is not None:
300
+ # ensure attn_mask's dim is 3
301
+ if attn_mask.dim() == 2:
302
+ correct_2d_size = (tgt_len, src_len)
303
+ if attn_mask.shape != correct_2d_size:
304
+ raise RuntimeError(
305
+ f"The shape of the 2D attn_mask is {attn_mask.shape}, but should be {correct_2d_size}."
306
+ )
307
+ attn_mask = attn_mask.unsqueeze(0)
308
+ elif attn_mask.dim() == 3:
309
+ correct_3d_size = (bsz * num_heads, tgt_len, src_len)
310
+ if attn_mask.shape != correct_3d_size:
311
+ raise RuntimeError(
312
+ f"The shape of the 3D attn_mask is {attn_mask.shape}, but should be {correct_3d_size}."
313
+ )
314
+ else:
315
+ raise RuntimeError(
316
+ f"attn_mask's dimension {attn_mask.dim()} is not supported"
317
+ )
318
+
319
+ # add bias along batch dimension (currently second)
320
+ if bias_k is not None and bias_v is not None:
321
+ assert static_k is None, "bias cannot be added to static key."
322
+ assert static_v is None, "bias cannot be added to static value."
323
+ k = torch.cat([k, bias_k.repeat(1, bsz, 1)])
324
+ v = torch.cat([v, bias_v.repeat(1, bsz, 1)])
325
+ if attn_mask is not None:
326
+ attn_mask = pad(attn_mask, (0, 1))
327
+ if key_padding_mask is not None:
328
+ key_padding_mask = pad(key_padding_mask, (0, 1))
329
+ else:
330
+ assert bias_k is None
331
+ assert bias_v is None
332
+
333
+ #
334
+ # reshape q, k, v for multihead attention and make em batch first
335
+ #
336
+ q = q.view(tgt_len, bsz * num_heads, head_dim).transpose(0, 1)
337
+ if static_k is None:
338
+ k = k.view(k.shape[0], bsz * num_heads, head_dim).transpose(0, 1)
339
+ else:
340
+ # TODO finish disentangling control flow so we don't do in-projections when statics are passed
341
+ assert (
342
+ static_k.size(0) == bsz * num_heads
343
+ ), f"expecting static_k.size(0) of {bsz * num_heads}, but got {static_k.size(0)}"
344
+ assert (
345
+ static_k.size(2) == head_dim
346
+ ), f"expecting static_k.size(2) of {head_dim}, but got {static_k.size(2)}"
347
+ k = static_k
348
+ if static_v is None:
349
+ v = v.view(v.shape[0], bsz * num_heads, head_dim).transpose(0, 1)
350
+ else:
351
+ # TODO finish disentangling control flow so we don't do in-projections when statics are passed
352
+ assert (
353
+ static_v.size(0) == bsz * num_heads
354
+ ), f"expecting static_v.size(0) of {bsz * num_heads}, but got {static_v.size(0)}"
355
+ assert (
356
+ static_v.size(2) == head_dim
357
+ ), f"expecting static_v.size(2) of {head_dim}, but got {static_v.size(2)}"
358
+ v = static_v
359
+
360
+ # add zero attention along batch dimension (now first)
361
+ if add_zero_attn:
362
+ zero_attn_shape = (bsz * num_heads, 1, head_dim)
363
+ k = torch.cat(
364
+ [k, torch.zeros(zero_attn_shape, dtype=k.dtype, device=k.device)], dim=1
365
+ )
366
+ v = torch.cat(
367
+ [v, torch.zeros(zero_attn_shape, dtype=v.dtype, device=v.device)], dim=1
368
+ )
369
+ if attn_mask is not None:
370
+ attn_mask = pad(attn_mask, (0, 1))
371
+ if key_padding_mask is not None:
372
+ key_padding_mask = pad(key_padding_mask, (0, 1))
373
+
374
+ # update source sequence length after adjustments
375
+ src_len = k.size(1)
376
+
377
+ # merge key padding and attention masks
378
+ if key_padding_mask is not None:
379
+ assert key_padding_mask.shape == (
380
+ bsz,
381
+ src_len,
382
+ ), f"expecting key_padding_mask shape of {(bsz, src_len)}, but got {key_padding_mask.shape}"
383
+ key_padding_mask = (
384
+ key_padding_mask.view(bsz, 1, 1, src_len)
385
+ .expand(-1, num_heads, -1, -1)
386
+ .reshape(bsz * num_heads, 1, src_len)
387
+ )
388
+ if attn_mask is None:
389
+ attn_mask = key_padding_mask
390
+ else:
391
+ attn_mask = attn_mask + key_padding_mask
392
+
393
+ # adjust dropout probability
394
+ if not training:
395
+ dropout_p = 0.0
396
+
397
+ #
398
+ # (deep breath) calculate attention and out projection
399
+ #
400
+
401
+ if need_weights:
402
+ B, Nt, E = q.shape
403
+ q_scaled = q / math.sqrt(E)
404
+
405
+ assert not (
406
+ is_causal and attn_mask is None
407
+ ), "FIXME: is_causal not implemented for need_weights"
408
+
409
+ if attn_mask is not None:
410
+ attn_output_weights = torch.baddbmm(
411
+ attn_mask, q_scaled, k.transpose(-2, -1)
412
+ )
413
+ else:
414
+ attn_output_weights = torch.bmm(q_scaled, k.transpose(-2, -1))
415
+ attn_output_weights = softmax(attn_output_weights, dim=-1)
416
+ if dropout_p > 0.0:
417
+ attn_output_weights = dropout(attn_output_weights, p=dropout_p)
418
+
419
+ attn_output = torch.bmm(attn_output_weights, v)
420
+
421
+ attn_output = (
422
+ attn_output.transpose(0, 1).contiguous().view(tgt_len * bsz, embed_dim)
423
+ )
424
+ attn_output = linear(attn_output, out_proj_weight, out_proj_bias)
425
+ attn_output = attn_output.view(tgt_len, bsz, attn_output.size(1))
426
+
427
+ # optionally average attention weights over heads
428
+ attn_output_weights = attn_output_weights.view(bsz, num_heads, tgt_len, src_len)
429
+ if average_attn_weights:
430
+ attn_output_weights = attn_output_weights.mean(dim=1)
431
+
432
+ if not is_batched:
433
+ # squeeze the output if input was unbatched
434
+ attn_output = attn_output.squeeze(1)
435
+ attn_output_weights = attn_output_weights.squeeze(0)
436
+ return attn_output, attn_output_weights
437
+ else:
438
+ # attn_mask can be either (L,S) or (N*num_heads, L, S)
439
+ # if attn_mask's shape is (1, L, S) we need to unsqueeze to (1, 1, L, S)
440
+ # in order to match the input for SDPA of (N, num_heads, L, S)
441
+ if attn_mask is not None:
442
+ if attn_mask.size(0) == 1 and attn_mask.dim() == 3:
443
+ attn_mask = attn_mask.unsqueeze(0)
444
+ else:
445
+ attn_mask = attn_mask.view(bsz, num_heads, -1, src_len)
446
+
447
+ q = q.view(bsz, num_heads, tgt_len, head_dim)
448
+ k = k.view(bsz, num_heads, src_len, head_dim)
449
+ v = v.view(bsz, num_heads, src_len, head_dim)
450
+
451
+ # with torch.backends.cuda.sdp_kernel(enable_flash=True, enable_math=True, enable_mem_efficient=True):
452
+ attn_output = scaled_dot_product_attention(
453
+ q, k, v, attn_mask, dropout_p, is_causal
454
+ )
455
+
456
+ attn_output = (
457
+ attn_output.permute(2, 0, 1, 3).contiguous().view(bsz * tgt_len, embed_dim)
458
+ )
459
+
460
+ attn_output = linear(attn_output, out_proj_weight, out_proj_bias)
461
+ attn_output = attn_output.view(tgt_len, bsz, attn_output.size(1))
462
+ if not is_batched:
463
+ # squeeze the output if input was unbatched
464
+ attn_output = attn_output.squeeze(1)
465
+ return attn_output, None
GPT_SoVITS/AR/modules/patched_mha_with_cache_onnx.py ADDED
@@ -0,0 +1,92 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from torch.nn.functional import *
2
+ from torch.nn.functional import (
3
+ _mha_shape_check,
4
+ _canonical_mask,
5
+ _none_or_dtype,
6
+ _in_projection_packed,
7
+ )
8
+
9
+ def multi_head_attention_forward_patched(
10
+ query,
11
+ key,
12
+ value,
13
+ embed_dim_to_check: int,
14
+ num_heads: int,
15
+ in_proj_weight,
16
+ in_proj_bias: Optional[Tensor],
17
+ bias_k: Optional[Tensor],
18
+ bias_v: Optional[Tensor],
19
+ add_zero_attn: bool,
20
+ dropout_p: float,
21
+ out_proj_weight: Tensor,
22
+ out_proj_bias: Optional[Tensor],
23
+ training: bool = True,
24
+ key_padding_mask: Optional[Tensor] = None,
25
+ need_weights: bool = True,
26
+ attn_mask: Optional[Tensor] = None,
27
+ use_separate_proj_weight: bool = False,
28
+ q_proj_weight: Optional[Tensor] = None,
29
+ k_proj_weight: Optional[Tensor] = None,
30
+ v_proj_weight: Optional[Tensor] = None,
31
+ static_k: Optional[Tensor] = None,
32
+ static_v: Optional[Tensor] = None,
33
+ average_attn_weights: bool = True,
34
+ is_causal: bool = False,
35
+ cache=None,
36
+ ) -> Tuple[Tensor, Optional[Tensor]]:
37
+
38
+ # set up shape vars
39
+ _, _, embed_dim = query.shape
40
+ attn_mask = _canonical_mask(
41
+ mask=attn_mask,
42
+ mask_name="attn_mask",
43
+ other_type=None,
44
+ other_name="",
45
+ target_type=query.dtype,
46
+ check_other=False,
47
+ )
48
+ head_dim = embed_dim // num_heads
49
+
50
+ proj_qkv = linear(query, in_proj_weight, in_proj_bias)
51
+ proj_qkv = proj_qkv.unflatten(-1, (3, query.size(-1))).unsqueeze(0).transpose(0, -2).squeeze(-2).contiguous()
52
+ q, k, v = proj_qkv[0], proj_qkv[1], proj_qkv[2]
53
+
54
+ if cache["first_infer"] == 1:
55
+ cache["k"][cache["stage"]] = k
56
+ cache["v"][cache["stage"]] = v
57
+ else:
58
+ cache["k"][cache["stage"]] = torch.cat([cache["k"][cache["stage"]][:-1], k], 0)
59
+ cache["v"][cache["stage"]] = torch.cat([cache["v"][cache["stage"]][:-1], v], 0)
60
+ k = cache["k"][cache["stage"]]
61
+ v = cache["v"][cache["stage"]]
62
+ cache["stage"] = (cache["stage"] + 1) % cache["all_stage"]
63
+
64
+ attn_mask = _canonical_mask(
65
+ mask=attn_mask,
66
+ mask_name="attn_mask",
67
+ other_type=None,
68
+ other_name="",
69
+ target_type=q.dtype,
70
+ check_other=False,
71
+ )
72
+ attn_mask = attn_mask.unsqueeze(0)
73
+
74
+ q = q.view(-1, num_heads, head_dim).transpose(0, 1)
75
+ k = k.view(-1, num_heads, head_dim).transpose(0, 1)
76
+ v = v.view(-1, num_heads, head_dim).transpose(0, 1)
77
+
78
+ dropout_p = 0.0
79
+ attn_mask = attn_mask.unsqueeze(0)
80
+ q = q.view(num_heads, -1, head_dim).unsqueeze(0)
81
+ k = k.view(num_heads, -1, head_dim).unsqueeze(0)
82
+ v = v.view(num_heads, -1, head_dim).unsqueeze(0)
83
+ attn_output = scaled_dot_product_attention(
84
+ q, k, v, attn_mask, dropout_p, is_causal
85
+ )
86
+ attn_output = (
87
+ attn_output.permute(2, 0, 1, 3).contiguous().view(-1, embed_dim)
88
+ )
89
+ attn_output = linear(attn_output, out_proj_weight, out_proj_bias)
90
+ attn_output = attn_output.view(-1, 1, attn_output.size(1))
91
+
92
+ return attn_output
GPT_SoVITS/AR/modules/scaling.py ADDED
@@ -0,0 +1,335 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 Xiaomi Corp. (authors: Daniel Povey)
2
+ #
3
+ # See ../../../../LICENSE for clarification regarding multiple authors
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ import logging
17
+ import math
18
+ import random
19
+ from typing import Optional
20
+ from typing import Tuple
21
+ from typing import Union
22
+
23
+ import torch
24
+ import torch.nn as nn
25
+ from torch import Tensor
26
+
27
+
28
+ class DoubleSwishFunction(torch.autograd.Function):
29
+ """
30
+ double_swish(x) = x * torch.sigmoid(x-1)
31
+ This is a definition, originally motivated by its close numerical
32
+ similarity to swish(swish(x)), where swish(x) = x * sigmoid(x).
33
+
34
+ Memory-efficient derivative computation:
35
+ double_swish(x) = x * s, where s(x) = torch.sigmoid(x-1)
36
+ double_swish'(x) = d/dx double_swish(x) = x * s'(x) + x' * s(x) = x * s'(x) + s(x).
37
+ Now, s'(x) = s(x) * (1-s(x)).
38
+ double_swish'(x) = x * s'(x) + s(x).
39
+ = x * s(x) * (1-s(x)) + s(x).
40
+ = double_swish(x) * (1-s(x)) + s(x)
41
+ ... so we just need to remember s(x) but not x itself.
42
+ """
43
+
44
+ @staticmethod
45
+ def forward(ctx, x: Tensor) -> Tensor:
46
+ requires_grad = x.requires_grad
47
+ x_dtype = x.dtype
48
+ if x.dtype == torch.float16:
49
+ x = x.to(torch.float32)
50
+
51
+ s = torch.sigmoid(x - 1.0)
52
+ y = x * s
53
+
54
+ if requires_grad:
55
+ deriv = y * (1 - s) + s
56
+ # notes on derivative of x * sigmoid(x - 1):
57
+ # https://www.wolframalpha.com/input?i=d%2Fdx+%28x+*+sigmoid%28x-1%29%29
58
+ # min \simeq -0.043638. Take floor as -0.043637 so it's a lower bund
59
+ # max \simeq 1.1990. Take ceil to be 1.2 so it's an upper bound.
60
+ # the combination of "+ torch.rand_like(deriv)" and casting to torch.uint8 (which
61
+ # floors), should be expectation-preserving.
62
+ floor = -0.043637
63
+ ceil = 1.2
64
+ d_scaled = (deriv - floor) * (255.0 / (ceil - floor)) + torch.rand_like(
65
+ deriv
66
+ )
67
+ if __name__ == "__main__":
68
+ # for self-testing only.
69
+ assert d_scaled.min() >= 0.0
70
+ assert d_scaled.max() < 256.0
71
+ d_int = d_scaled.to(torch.uint8)
72
+ ctx.save_for_backward(d_int)
73
+ if x.dtype == torch.float16 or torch.is_autocast_enabled():
74
+ y = y.to(torch.float16)
75
+ return y
76
+
77
+ @staticmethod
78
+ def backward(ctx, y_grad: Tensor) -> Tensor:
79
+ (d,) = ctx.saved_tensors
80
+ # the same constants as used in forward pass.
81
+ floor = -0.043637
82
+ ceil = 1.2
83
+ d = d * ((ceil - floor) / 255.0) + floor
84
+ return y_grad * d
85
+
86
+
87
+ class DoubleSwish(torch.nn.Module):
88
+ def forward(self, x: Tensor) -> Tensor:
89
+ """Return double-swish activation function which is an approximation to Swish(Swish(x)),
90
+ that we approximate closely with x * sigmoid(x-1).
91
+ """
92
+ if torch.jit.is_scripting() or torch.jit.is_tracing():
93
+ return x * torch.sigmoid(x - 1.0)
94
+ return DoubleSwishFunction.apply(x)
95
+
96
+
97
+ class ActivationBalancerFunction(torch.autograd.Function):
98
+ @staticmethod
99
+ def forward(
100
+ ctx,
101
+ x: Tensor,
102
+ scale_factor: Tensor,
103
+ sign_factor: Optional[Tensor],
104
+ channel_dim: int,
105
+ ) -> Tensor:
106
+ if channel_dim < 0:
107
+ channel_dim += x.ndim
108
+ ctx.channel_dim = channel_dim
109
+ xgt0 = x > 0
110
+ if sign_factor is None:
111
+ ctx.save_for_backward(xgt0, scale_factor)
112
+ else:
113
+ ctx.save_for_backward(xgt0, scale_factor, sign_factor)
114
+ return x
115
+
116
+ @staticmethod
117
+ def backward(ctx, x_grad: Tensor) -> Tuple[Tensor, None, None, None]:
118
+ if len(ctx.saved_tensors) == 3:
119
+ xgt0, scale_factor, sign_factor = ctx.saved_tensors
120
+ for _ in range(ctx.channel_dim, x_grad.ndim - 1):
121
+ scale_factor = scale_factor.unsqueeze(-1)
122
+ sign_factor = sign_factor.unsqueeze(-1)
123
+ factor = sign_factor + scale_factor * (xgt0.to(x_grad.dtype) - 0.5)
124
+ else:
125
+ xgt0, scale_factor = ctx.saved_tensors
126
+ for _ in range(ctx.channel_dim, x_grad.ndim - 1):
127
+ scale_factor = scale_factor.unsqueeze(-1)
128
+ factor = scale_factor * (xgt0.to(x_grad.dtype) - 0.5)
129
+ neg_delta_grad = x_grad.abs() * factor
130
+ return (
131
+ x_grad - neg_delta_grad,
132
+ None,
133
+ None,
134
+ None,
135
+ )
136
+
137
+
138
+ def _compute_scale_factor(
139
+ x: Tensor,
140
+ channel_dim: int,
141
+ min_abs: float,
142
+ max_abs: float,
143
+ gain_factor: float,
144
+ max_factor: float,
145
+ ) -> Tensor:
146
+ if channel_dim < 0:
147
+ channel_dim += x.ndim
148
+ sum_dims = [d for d in range(x.ndim) if d != channel_dim]
149
+ x_abs_mean = torch.mean(x.abs(), dim=sum_dims).to(torch.float32)
150
+
151
+ if min_abs == 0.0:
152
+ below_threshold = 0.0
153
+ else:
154
+ # below_threshold is 0 if x_abs_mean > min_abs, can be at most max_factor if
155
+ # x_abs)_mean , min_abs.
156
+ below_threshold = ((min_abs - x_abs_mean) * (gain_factor / min_abs)).clamp(
157
+ min=0, max=max_factor
158
+ )
159
+
160
+ above_threshold = ((x_abs_mean - max_abs) * (gain_factor / max_abs)).clamp(
161
+ min=0, max=max_factor
162
+ )
163
+
164
+ return below_threshold - above_threshold
165
+
166
+
167
+ def _compute_sign_factor(
168
+ x: Tensor,
169
+ channel_dim: int,
170
+ min_positive: float,
171
+ max_positive: float,
172
+ gain_factor: float,
173
+ max_factor: float,
174
+ ) -> Tensor:
175
+ if channel_dim < 0:
176
+ channel_dim += x.ndim
177
+ sum_dims = [d for d in range(x.ndim) if d != channel_dim]
178
+ proportion_positive = torch.mean((x > 0).to(torch.float32), dim=sum_dims)
179
+ if min_positive == 0.0:
180
+ factor1 = 0.0
181
+ else:
182
+ # 0 if proportion_positive >= min_positive, else can be
183
+ # as large as max_factor.
184
+ factor1 = (
185
+ (min_positive - proportion_positive) * (gain_factor / min_positive)
186
+ ).clamp_(min=0, max=max_factor)
187
+
188
+ if max_positive == 1.0:
189
+ factor2 = 0.0
190
+ else:
191
+ # 0 if self.proportion_positive <= max_positive, else can be
192
+ # as large as -max_factor.
193
+ factor2 = (
194
+ (proportion_positive - max_positive) * (gain_factor / (1.0 - max_positive))
195
+ ).clamp_(min=0, max=max_factor)
196
+ sign_factor = factor1 - factor2
197
+ # require min_positive != 0 or max_positive != 1:
198
+ assert not isinstance(sign_factor, float)
199
+ return sign_factor
200
+
201
+
202
+ class ActivationBalancer(torch.nn.Module):
203
+ """
204
+ Modifies the backpropped derivatives of a function to try to encourage, for
205
+ each channel, that it is positive at least a proportion `threshold` of the
206
+ time. It does this by multiplying negative derivative values by up to
207
+ (1+max_factor), and positive derivative values by up to (1-max_factor),
208
+ interpolated from 1 at the threshold to those extremal values when none
209
+ of the inputs are positive.
210
+
211
+ Args:
212
+ num_channels: the number of channels
213
+ channel_dim: the dimension/axis corresponding to the channel, e.g.
214
+ -1, 0, 1, 2; will be interpreted as an offset from x.ndim if negative.
215
+ min_positive: the minimum, per channel, of the proportion of the time
216
+ that (x > 0), below which we start to modify the derivatives.
217
+ max_positive: the maximum, per channel, of the proportion of the time
218
+ that (x > 0), above which we start to modify the derivatives.
219
+ max_factor: the maximum factor by which we modify the derivatives for
220
+ either the sign constraint or the magnitude constraint;
221
+ e.g. with max_factor=0.02, the the derivatives would be multiplied by
222
+ values in the range [0.98..1.02].
223
+ sign_gain_factor: determines the 'gain' with which we increase the
224
+ change in gradient once the constraints on min_positive and max_positive
225
+ are violated.
226
+ scale_gain_factor: determines the 'gain' with which we increase the
227
+ change in gradient once the constraints on min_abs and max_abs
228
+ are violated.
229
+ min_abs: the minimum average-absolute-value difference from the mean
230
+ value per channel, which we allow, before we start to modify
231
+ the derivatives to prevent this.
232
+ max_abs: the maximum average-absolute-value difference from the mean
233
+ value per channel, which we allow, before we start to modify
234
+ the derivatives to prevent this.
235
+ min_prob: determines the minimum probability with which we modify the
236
+ gradients for the {min,max}_positive and {min,max}_abs constraints,
237
+ on each forward(). This is done randomly to prevent all layers
238
+ from doing it at the same time. Early in training we may use
239
+ higher probabilities than this; it will decay to this value.
240
+ """
241
+
242
+ def __init__(
243
+ self,
244
+ num_channels: int,
245
+ channel_dim: int,
246
+ min_positive: float = 0.05,
247
+ max_positive: float = 0.95,
248
+ max_factor: float = 0.04,
249
+ sign_gain_factor: float = 0.01,
250
+ scale_gain_factor: float = 0.02,
251
+ min_abs: float = 0.2,
252
+ max_abs: float = 100.0,
253
+ min_prob: float = 0.1,
254
+ ):
255
+ super(ActivationBalancer, self).__init__()
256
+ self.num_channels = num_channels
257
+ self.channel_dim = channel_dim
258
+ self.min_positive = min_positive
259
+ self.max_positive = max_positive
260
+ self.max_factor = max_factor
261
+ self.min_abs = min_abs
262
+ self.max_abs = max_abs
263
+ self.min_prob = min_prob
264
+ self.sign_gain_factor = sign_gain_factor
265
+ self.scale_gain_factor = scale_gain_factor
266
+
267
+ # count measures how many times the forward() function has been called.
268
+ # We occasionally sync this to a tensor called `count`, that exists to
269
+ # make sure it is synced to disk when we load and save the model.
270
+ self.cpu_count = 0
271
+ self.register_buffer("count", torch.tensor(0, dtype=torch.int64))
272
+
273
+ def forward(self, x: Tensor) -> Tensor:
274
+ if torch.jit.is_scripting() or not x.requires_grad or torch.jit.is_tracing():
275
+ return _no_op(x)
276
+
277
+ count = self.cpu_count
278
+ self.cpu_count += 1
279
+
280
+ if random.random() < 0.01:
281
+ # Occasionally sync self.cpu_count with self.count.
282
+ # count affects the decay of 'prob'. don't do this on every iter,
283
+ # because syncing with the GPU is slow.
284
+ self.cpu_count = max(self.cpu_count, self.count.item())
285
+ self.count.fill_(self.cpu_count)
286
+
287
+ # the prob of doing some work exponentially decreases from 0.5 till it hits
288
+ # a floor at min_prob (==0.1, by default)
289
+ prob = max(self.min_prob, 0.5 ** (1 + (count / 4000.0)))
290
+
291
+ if random.random() < prob:
292
+ sign_gain_factor = 0.5
293
+ if self.min_positive != 0.0 or self.max_positive != 1.0:
294
+ sign_factor = _compute_sign_factor(
295
+ x,
296
+ self.channel_dim,
297
+ self.min_positive,
298
+ self.max_positive,
299
+ gain_factor=self.sign_gain_factor / prob,
300
+ max_factor=self.max_factor,
301
+ )
302
+ else:
303
+ sign_factor = None
304
+
305
+ scale_factor = _compute_scale_factor(
306
+ x.detach(),
307
+ self.channel_dim,
308
+ min_abs=self.min_abs,
309
+ max_abs=self.max_abs,
310
+ gain_factor=self.scale_gain_factor / prob,
311
+ max_factor=self.max_factor,
312
+ )
313
+ return ActivationBalancerFunction.apply(
314
+ x,
315
+ scale_factor,
316
+ sign_factor,
317
+ self.channel_dim,
318
+ )
319
+ else:
320
+ return _no_op(x)
321
+
322
+
323
+ def BalancedDoubleSwish(
324
+ d_model, channel_dim=-1, max_abs=10.0, min_prob=0.25
325
+ ) -> nn.Sequential:
326
+ """
327
+ ActivationBalancer -> DoubleSwish
328
+ """
329
+ balancer = ActivationBalancer(
330
+ d_model, channel_dim=channel_dim, max_abs=max_abs, min_prob=min_prob
331
+ )
332
+ return nn.Sequential(
333
+ balancer,
334
+ DoubleSwish(),
335
+ )
GPT_SoVITS/AR/modules/transformer.py ADDED
@@ -0,0 +1,378 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # modified from https://github.com/lifeiteng/vall-e/blob/main/valle/modules/transformer.py
2
+ import copy
3
+ import numbers
4
+ from functools import partial
5
+ from typing import Any
6
+ from typing import Callable
7
+ from typing import List
8
+ from typing import Optional
9
+ from typing import Tuple
10
+ from typing import Union
11
+
12
+ import torch
13
+ from AR.modules.activation import MultiheadAttention
14
+ from AR.modules.scaling import BalancedDoubleSwish
15
+ from torch import nn
16
+ from torch import Tensor
17
+ from torch.nn import functional as F
18
+
19
+ _shape_t = Union[int, List[int], torch.Size]
20
+
21
+
22
+ class LayerNorm(nn.Module):
23
+ __constants__ = ["normalized_shape", "eps", "elementwise_affine"]
24
+ normalized_shape: Tuple[int, ...]
25
+ eps: float
26
+ elementwise_affine: bool
27
+
28
+ def __init__(
29
+ self,
30
+ normalized_shape: _shape_t,
31
+ eps: float = 1e-5,
32
+ elementwise_affine: bool = True,
33
+ device=None,
34
+ dtype=None,
35
+ ) -> None:
36
+ factory_kwargs = {"device": device, "dtype": dtype}
37
+ super(LayerNorm, self).__init__()
38
+ if isinstance(normalized_shape, numbers.Integral):
39
+ # mypy error: incompatible types in assignment
40
+ normalized_shape = (normalized_shape,) # type: ignore[assignment]
41
+ self.normalized_shape = tuple(normalized_shape) # type: ignore[arg-type]
42
+ self.eps = eps
43
+ self.elementwise_affine = elementwise_affine
44
+ if self.elementwise_affine:
45
+ self.weight = nn.Parameter(
46
+ torch.empty(self.normalized_shape, **factory_kwargs)
47
+ )
48
+ self.bias = nn.Parameter(
49
+ torch.empty(self.normalized_shape, **factory_kwargs)
50
+ )
51
+ else:
52
+ self.register_parameter("weight", None)
53
+ self.register_parameter("bias", None)
54
+
55
+ self.reset_parameters()
56
+
57
+ def reset_parameters(self) -> None:
58
+ if self.elementwise_affine:
59
+ nn.init.ones_(self.weight)
60
+ nn.init.zeros_(self.bias)
61
+
62
+ def forward(self, input: Tensor, embedding: Any = None) -> Tensor:
63
+ if isinstance(input, tuple):
64
+ input, embedding = input
65
+ return (
66
+ F.layer_norm(
67
+ input,
68
+ self.normalized_shape,
69
+ self.weight,
70
+ self.bias,
71
+ self.eps,
72
+ ),
73
+ embedding,
74
+ )
75
+
76
+ assert embedding is None
77
+ return F.layer_norm(
78
+ input, self.normalized_shape, self.weight, self.bias, self.eps
79
+ )
80
+
81
+ def extra_repr(self) -> str:
82
+ return (
83
+ "{normalized_shape}, eps={eps}, "
84
+ "elementwise_affine={elementwise_affine}".format(**self.__dict__)
85
+ )
86
+
87
+
88
+ class IdentityNorm(nn.Module):
89
+ def __init__(
90
+ self,
91
+ d_model: int,
92
+ eps: float = 1e-5,
93
+ device=None,
94
+ dtype=None,
95
+ ) -> None:
96
+ super(IdentityNorm, self).__init__()
97
+
98
+ def forward(self, input: Tensor, embedding: Any = None) -> Tensor:
99
+ if isinstance(input, tuple):
100
+ return input
101
+
102
+ assert embedding is None
103
+ return input
104
+
105
+
106
+ class TransformerEncoder(nn.Module):
107
+ r"""TransformerEncoder is a stack of N encoder layers. Users can build the
108
+ BERT(https://arxiv.org/abs/1810.04805) model with corresponding parameters.
109
+
110
+ Args:
111
+ encoder_layer: an instance of the TransformerEncoderLayer() class (required).
112
+ num_layers: the number of sub-encoder-layers in the encoder (required).
113
+ norm: the layer normalization component (optional).
114
+ enable_nested_tensor: if True, input will automatically convert to nested tensor
115
+ (and convert back on output). This will improve the overall performance of
116
+ TransformerEncoder when padding rate is high. Default: ``True`` (enabled).
117
+
118
+ Examples::
119
+ >>> encoder_layer = TransformerEncoderLayer(d_model=512, nhead=8)
120
+ >>> transformer_encoder = TransformerEncoder(encoder_layer, num_layers=6)
121
+ >>> src = torch.rand(10, 32, 512)
122
+ >>> out = transformer_encoder(src)
123
+ """
124
+ __constants__ = ["norm"]
125
+
126
+ def __init__(self, encoder_layer, num_layers, norm=None):
127
+ super(TransformerEncoder, self).__init__()
128
+ self.layers = _get_clones(encoder_layer, num_layers)
129
+ self.num_layers = num_layers
130
+ self.norm = norm
131
+
132
+ def forward(
133
+ self,
134
+ src: Tensor,
135
+ mask: Optional[Tensor] = None,
136
+ src_key_padding_mask: Optional[Tensor] = None,
137
+ return_layer_states: bool = False,
138
+ cache=None,
139
+ ) -> Tensor:
140
+ r"""Pass the input through the encoder layers in turn.
141
+
142
+ Args:
143
+ src: the sequence to the encoder (required).
144
+ mask: the mask for the src sequence (optional).
145
+ src_key_padding_mask: the mask for the src keys per batch (optional).
146
+ return_layer_states: return layers' state (optional).
147
+
148
+ Shape:
149
+ see the docs in Transformer class.
150
+ """
151
+ if return_layer_states:
152
+ layer_states = [] # layers' output
153
+ output = src
154
+ for mod in self.layers:
155
+ output = mod(
156
+ output,
157
+ src_mask=mask,
158
+ src_key_padding_mask=src_key_padding_mask,
159
+ cache=cache,
160
+ )
161
+ layer_states.append(output[0])
162
+
163
+ if self.norm is not None:
164
+ output = self.norm(output)
165
+
166
+ return layer_states, output
167
+
168
+ output = src
169
+ for mod in self.layers:
170
+ output = mod(
171
+ output,
172
+ src_mask=mask,
173
+ src_key_padding_mask=src_key_padding_mask,
174
+ cache=cache,
175
+ )
176
+
177
+ if self.norm is not None:
178
+ output = self.norm(output)
179
+
180
+ return output
181
+
182
+
183
+ class TransformerEncoderLayer(nn.Module):
184
+ __constants__ = ["batch_first", "norm_first"]
185
+
186
+ def __init__(
187
+ self,
188
+ d_model: int,
189
+ nhead: int,
190
+ dim_feedforward: int = 2048,
191
+ dropout: float = 0.1,
192
+ activation: Union[str, Callable[[Tensor], Tensor]] = F.relu,
193
+ batch_first: bool = False,
194
+ norm_first: bool = False,
195
+ device=None,
196
+ dtype=None,
197
+ linear1_self_attention_cls: nn.Module = nn.Linear,
198
+ linear2_self_attention_cls: nn.Module = nn.Linear,
199
+ linear1_feedforward_cls: nn.Module = nn.Linear,
200
+ linear2_feedforward_cls: nn.Module = nn.Linear,
201
+ layer_norm_cls: nn.Module = LayerNorm,
202
+ layer_norm_eps: float = 1e-5,
203
+ adaptive_layer_norm=False,
204
+ ) -> None:
205
+ factory_kwargs = {"device": device, "dtype": dtype}
206
+ super(TransformerEncoderLayer, self).__init__()
207
+ # print(233333333333,d_model,nhead)
208
+ # import os
209
+ # os._exit(2333333)
210
+ self.self_attn = MultiheadAttention(
211
+ d_model, # 512 16
212
+ nhead,
213
+ dropout=dropout,
214
+ batch_first=batch_first,
215
+ linear1_cls=linear1_self_attention_cls,
216
+ linear2_cls=linear2_self_attention_cls,
217
+ **factory_kwargs,
218
+ )
219
+
220
+ # Implementation of Feedforward model
221
+ self.linear1 = linear1_feedforward_cls(
222
+ d_model, dim_feedforward, **factory_kwargs
223
+ )
224
+ self.dropout = nn.Dropout(dropout)
225
+ self.linear2 = linear2_feedforward_cls(
226
+ dim_feedforward, d_model, **factory_kwargs
227
+ )
228
+
229
+ self.norm_first = norm_first
230
+ self.dropout1 = nn.Dropout(dropout)
231
+ self.dropout2 = nn.Dropout(dropout)
232
+
233
+ # Legacy string support for activation function.
234
+ if isinstance(activation, str):
235
+ activation = _get_activation_fn(activation)
236
+ elif isinstance(activation, partial):
237
+ activation = activation(d_model)
238
+ elif activation == BalancedDoubleSwish:
239
+ activation = BalancedDoubleSwish(d_model)
240
+
241
+ # # We can't test self.activation in forward() in TorchScript,
242
+ # # so stash some information about it instead.
243
+ # if activation is F.relu or isinstance(activation, torch.nn.ReLU):
244
+ # self.activation_relu_or_gelu = 1
245
+ # elif activation is F.gelu or isinstance(activation, torch.nn.GELU):
246
+ # self.activation_relu_or_gelu = 2
247
+ # else:
248
+ # self.activation_relu_or_gelu = 0
249
+ self.activation = activation
250
+
251
+ norm1 = layer_norm_cls(d_model, eps=layer_norm_eps, **factory_kwargs)
252
+ if layer_norm_cls == IdentityNorm:
253
+ norm2 = BalancedBasicNorm(d_model, eps=layer_norm_eps, **factory_kwargs)
254
+ else:
255
+ norm2 = layer_norm_cls(d_model, eps=layer_norm_eps, **factory_kwargs)
256
+
257
+ if adaptive_layer_norm:
258
+ self.norm1 = AdaptiveLayerNorm(d_model, norm1)
259
+ self.norm2 = AdaptiveLayerNorm(d_model, norm2)
260
+ else:
261
+ self.norm1 = norm1
262
+ self.norm2 = norm2
263
+
264
+ def __setstate__(self, state):
265
+ super(TransformerEncoderLayer, self).__setstate__(state)
266
+ if not hasattr(self, "activation"):
267
+ self.activation = F.relu
268
+
269
+ def forward(
270
+ self,
271
+ src: Tensor,
272
+ src_mask: Optional[Tensor] = None,
273
+ src_key_padding_mask: Optional[Tensor] = None,
274
+ cache=None,
275
+ ) -> Tensor:
276
+ r"""Pass the input through the encoder layer.
277
+
278
+ Args:
279
+ src: the sequence to the encoder layer (required).
280
+ src_mask: the mask for the src sequence (optional).
281
+ src_key_padding_mask: the mask for the src keys per batch (optional).
282
+
283
+ Shape:
284
+ see the docs in Transformer class.
285
+ """
286
+ x, stage_embedding = src, None
287
+ is_src_tuple = False
288
+ if isinstance(src, tuple):
289
+ x, stage_embedding = src
290
+ is_src_tuple = True
291
+
292
+ if src_key_padding_mask is not None:
293
+ _skpm_dtype = src_key_padding_mask.dtype
294
+ if _skpm_dtype != torch.bool and not torch.is_floating_point(
295
+ src_key_padding_mask
296
+ ):
297
+ raise AssertionError(
298
+ "only bool and floating types of key_padding_mask are supported"
299
+ )
300
+
301
+ if self.norm_first:
302
+ x = x + self._sa_block(
303
+ self.norm1(x, stage_embedding),
304
+ src_mask,
305
+ src_key_padding_mask,
306
+ cache=cache,
307
+ )
308
+ x = x + self._ff_block(self.norm2(x, stage_embedding))
309
+ else:
310
+ x = self.norm1(
311
+ x + self._sa_block(x, src_mask, src_key_padding_mask, cache=cache),
312
+ stage_embedding,
313
+ )
314
+ x = self.norm2(x + self._ff_block(x), stage_embedding)
315
+
316
+ if is_src_tuple:
317
+ return (x, stage_embedding)
318
+ return x
319
+
320
+ # self-attention block
321
+ def _sa_block(
322
+ self,
323
+ x: Tensor,
324
+ attn_mask: Optional[Tensor],
325
+ key_padding_mask: Optional[Tensor],
326
+ cache=None,
327
+ ) -> Tensor:
328
+ # print(x.shape,attn_mask.shape,key_padding_mask)
329
+ # torch.Size([1, 188, 512]) torch.Size([188, 188]) None
330
+ # import os
331
+ # os._exit(23333)
332
+ x = self.self_attn(
333
+ x,
334
+ x,
335
+ x,
336
+ attn_mask=attn_mask,
337
+ key_padding_mask=key_padding_mask,
338
+ need_weights=False,
339
+ cache=cache,
340
+ )[0]
341
+ return self.dropout1(x)
342
+
343
+ # feed forward block
344
+ def _ff_block(self, x: Tensor) -> Tensor:
345
+ x = self.linear2(self.dropout(self.activation(self.linear1(x))))
346
+ return self.dropout2(x)
347
+
348
+
349
+ class AdaptiveLayerNorm(nn.Module):
350
+ r"""Adaptive Layer Normalization"""
351
+
352
+ def __init__(self, d_model, norm) -> None:
353
+ super(AdaptiveLayerNorm, self).__init__()
354
+ self.project_layer = nn.Linear(d_model, 2 * d_model)
355
+ self.norm = norm
356
+ self.d_model = d_model
357
+ self.eps = self.norm.eps
358
+
359
+ def forward(self, input: Tensor, embedding: Tensor = None) -> Tensor:
360
+ if isinstance(input, tuple):
361
+ input, embedding = input
362
+ weight, bias = torch.split(
363
+ self.project_layer(embedding),
364
+ split_size_or_sections=self.d_model,
365
+ dim=-1,
366
+ )
367
+ return (weight * self.norm(input) + bias, embedding)
368
+
369
+ weight, bias = torch.split(
370
+ self.project_layer(embedding),
371
+ split_size_or_sections=self.d_model,
372
+ dim=-1,
373
+ )
374
+ return weight * self.norm(input) + bias
375
+
376
+
377
+ def _get_clones(module, N):
378
+ return nn.ModuleList([copy.deepcopy(module) for i in range(N)])
GPT_SoVITS/AR/modules/transformer_onnx.py ADDED
@@ -0,0 +1,292 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # modified from https://github.com/lifeiteng/vall-e/blob/main/valle/modules/transformer.py
2
+ import copy
3
+ import numbers
4
+ from functools import partial
5
+ from typing import Any
6
+ from typing import Callable
7
+ from typing import List
8
+ from typing import Optional
9
+ from typing import Tuple
10
+ from typing import Union
11
+
12
+ import torch
13
+ from AR.modules.activation_onnx import MultiheadAttention
14
+ from AR.modules.scaling import BalancedDoubleSwish
15
+ from torch import nn
16
+ from torch import Tensor
17
+ from torch.nn import functional as F
18
+
19
+ _shape_t = Union[int, List[int], torch.Size]
20
+
21
+
22
+ class LayerNorm(nn.Module):
23
+ __constants__ = ["normalized_shape", "eps", "elementwise_affine"]
24
+ normalized_shape: Tuple[int, ...]
25
+ eps: float
26
+ elementwise_affine: bool
27
+
28
+ def __init__(
29
+ self,
30
+ normalized_shape: _shape_t,
31
+ eps: float = 1e-5,
32
+ elementwise_affine: bool = True,
33
+ device=None,
34
+ dtype=None,
35
+ ) -> None:
36
+ factory_kwargs = {"device": device, "dtype": dtype}
37
+ super(LayerNorm, self).__init__()
38
+ if isinstance(normalized_shape, numbers.Integral):
39
+ # mypy error: incompatible types in assignment
40
+ normalized_shape = (normalized_shape,) # type: ignore[assignment]
41
+ self.normalized_shape = tuple(normalized_shape) # type: ignore[arg-type]
42
+ self.eps = eps
43
+ self.elementwise_affine = elementwise_affine
44
+ if self.elementwise_affine:
45
+ self.weight = nn.Parameter(
46
+ torch.empty(self.normalized_shape, **factory_kwargs)
47
+ )
48
+ self.bias = nn.Parameter(
49
+ torch.empty(self.normalized_shape, **factory_kwargs)
50
+ )
51
+ else:
52
+ self.register_parameter("weight", None)
53
+ self.register_parameter("bias", None)
54
+
55
+ self.reset_parameters()
56
+
57
+ def reset_parameters(self) -> None:
58
+ if self.elementwise_affine:
59
+ nn.init.ones_(self.weight)
60
+ nn.init.zeros_(self.bias)
61
+
62
+ def forward(self, input: Tensor, embedding: Any = None) -> Tensor:
63
+ if isinstance(input, tuple):
64
+ input, embedding = input
65
+ return (
66
+ F.layer_norm(
67
+ input,
68
+ self.normalized_shape,
69
+ self.weight,
70
+ self.bias,
71
+ self.eps,
72
+ ),
73
+ embedding,
74
+ )
75
+
76
+ assert embedding is None
77
+ return F.layer_norm(
78
+ input, self.normalized_shape, self.weight, self.bias, self.eps
79
+ )
80
+
81
+ def extra_repr(self) -> str:
82
+ return (
83
+ "{normalized_shape}, eps={eps}, "
84
+ "elementwise_affine={elementwise_affine}".format(**self.__dict__)
85
+ )
86
+
87
+
88
+ class IdentityNorm(nn.Module):
89
+ def __init__(
90
+ self,
91
+ d_model: int,
92
+ eps: float = 1e-5,
93
+ device=None,
94
+ dtype=None,
95
+ ) -> None:
96
+ super(IdentityNorm, self).__init__()
97
+
98
+ def forward(self, input: Tensor, embedding: Any = None) -> Tensor:
99
+ if isinstance(input, tuple):
100
+ return input
101
+
102
+ assert embedding is None
103
+ return input
104
+
105
+
106
+ class TransformerEncoder(nn.Module):
107
+ r"""TransformerEncoder is a stack of N encoder layers. Users can build the
108
+ BERT(https://arxiv.org/abs/1810.04805) model with corresponding parameters.
109
+
110
+ Args:
111
+ encoder_layer: an instance of the TransformerEncoderLayer() class (required).
112
+ num_layers: the number of sub-encoder-layers in the encoder (required).
113
+ norm: the layer normalization component (optional).
114
+ enable_nested_tensor: if True, input will automatically convert to nested tensor
115
+ (and convert back on output). This will improve the overall performance of
116
+ TransformerEncoder when padding rate is high. Default: ``True`` (enabled).
117
+
118
+ Examples::
119
+ >>> encoder_layer = TransformerEncoderLayer(d_model=512, nhead=8)
120
+ >>> transformer_encoder = TransformerEncoder(encoder_layer, num_layers=6)
121
+ >>> src = torch.rand(10, 32, 512)
122
+ >>> out = transformer_encoder(src)
123
+ """
124
+ __constants__ = ["norm"]
125
+
126
+ def __init__(self, encoder_layer, num_layers, norm=None):
127
+ super(TransformerEncoder, self).__init__()
128
+ self.layers = _get_clones(encoder_layer, num_layers)
129
+ self.num_layers = num_layers
130
+ self.norm = norm
131
+
132
+ def forward(
133
+ self,
134
+ src: Tensor,
135
+ mask: Optional[Tensor] = None,
136
+ src_key_padding_mask: Optional[Tensor] = None,
137
+ return_layer_states: bool = False,
138
+ cache=None,
139
+ ) -> Tensor:
140
+ output = src
141
+ for mod in self.layers:
142
+ output = mod(
143
+ output,
144
+ src_mask=mask,
145
+ src_key_padding_mask=src_key_padding_mask,
146
+ cache=cache,
147
+ )
148
+
149
+ if self.norm is not None:
150
+ output = self.norm(output)
151
+
152
+ return output
153
+
154
+
155
+ class TransformerEncoderLayer(nn.Module):
156
+ __constants__ = ["batch_first", "norm_first"]
157
+ def __init__(
158
+ self,
159
+ d_model: int,
160
+ nhead: int,
161
+ dim_feedforward: int = 2048,
162
+ dropout: float = 0.1,
163
+ activation: Union[str, Callable[[Tensor], Tensor]] = F.relu,
164
+ batch_first: bool = False,
165
+ norm_first: bool = False,
166
+ device=None,
167
+ dtype=None,
168
+ linear1_self_attention_cls: nn.Module = nn.Linear,
169
+ linear2_self_attention_cls: nn.Module = nn.Linear,
170
+ linear1_feedforward_cls: nn.Module = nn.Linear,
171
+ linear2_feedforward_cls: nn.Module = nn.Linear,
172
+ layer_norm_cls: nn.Module = LayerNorm,
173
+ layer_norm_eps: float = 1e-5,
174
+ adaptive_layer_norm=False,
175
+ ) -> None:
176
+ factory_kwargs = {"device": device, "dtype": dtype}
177
+ super(TransformerEncoderLayer, self).__init__()
178
+ self.self_attn = MultiheadAttention(
179
+ d_model, # 512 16
180
+ nhead,
181
+ dropout=dropout,
182
+ batch_first=batch_first,
183
+ linear1_cls=linear1_self_attention_cls,
184
+ linear2_cls=linear2_self_attention_cls,
185
+ **factory_kwargs,
186
+ )
187
+ self.linear1 = linear1_feedforward_cls(
188
+ d_model, dim_feedforward, **factory_kwargs
189
+ )
190
+ self.dropout = nn.Dropout(dropout)
191
+ self.linear2 = linear2_feedforward_cls(
192
+ dim_feedforward, d_model, **factory_kwargs
193
+ )
194
+ self.norm_first = norm_first
195
+ self.dropout1 = nn.Dropout(dropout)
196
+ self.dropout2 = nn.Dropout(dropout)
197
+ if isinstance(activation, str):
198
+ activation = _get_activation_fn(activation)
199
+ elif isinstance(activation, partial):
200
+ activation = activation(d_model)
201
+ elif activation == BalancedDoubleSwish:
202
+ activation = BalancedDoubleSwish(d_model)
203
+ self.activation = activation
204
+
205
+ norm1 = layer_norm_cls(d_model, eps=layer_norm_eps, **factory_kwargs)
206
+ if layer_norm_cls == IdentityNorm:
207
+ norm2 = BalancedBasicNorm(d_model, eps=layer_norm_eps, **factory_kwargs)
208
+ else:
209
+ norm2 = layer_norm_cls(d_model, eps=layer_norm_eps, **factory_kwargs)
210
+
211
+ if adaptive_layer_norm:
212
+ self.norm1 = AdaptiveLayerNorm(d_model, norm1)
213
+ self.norm2 = AdaptiveLayerNorm(d_model, norm2)
214
+ else:
215
+ self.norm1 = norm1
216
+ self.norm2 = norm2
217
+
218
+ def __setstate__(self, state):
219
+ super(TransformerEncoderLayer, self).__setstate__(state)
220
+ if not hasattr(self, "activation"):
221
+ self.activation = F.relu
222
+
223
+ def forward(
224
+ self,
225
+ src: Tensor,
226
+ src_mask: Optional[Tensor] = None,
227
+ src_key_padding_mask: Optional[Tensor] = None,
228
+ cache=None,
229
+ ) -> Tensor:
230
+ x = src
231
+ stage_embedding = None
232
+ x = self.norm1(
233
+ x + self._sa_block(x, src_mask, src_key_padding_mask, cache=cache),
234
+ stage_embedding,
235
+ )
236
+ x = self.norm2(x + self._ff_block(x), stage_embedding)
237
+
238
+ return x
239
+
240
+ def _sa_block(
241
+ self,
242
+ x: Tensor,
243
+ attn_mask: Optional[Tensor],
244
+ key_padding_mask: Optional[Tensor],
245
+ cache=None,
246
+ ) -> Tensor:
247
+ x = self.self_attn(
248
+ x,
249
+ x,
250
+ x,
251
+ attn_mask=attn_mask,
252
+ key_padding_mask=key_padding_mask,
253
+ need_weights=False,
254
+ cache=cache,
255
+ )
256
+ return self.dropout1(x)
257
+
258
+ def _ff_block(self, x: Tensor) -> Tensor:
259
+ x = self.linear2(self.dropout(self.activation(self.linear1(x))))
260
+ return self.dropout2(x)
261
+
262
+
263
+ class AdaptiveLayerNorm(nn.Module):
264
+ r"""Adaptive Layer Normalization"""
265
+
266
+ def __init__(self, d_model, norm) -> None:
267
+ super(AdaptiveLayerNorm, self).__init__()
268
+ self.project_layer = nn.Linear(d_model, 2 * d_model)
269
+ self.norm = norm
270
+ self.d_model = d_model
271
+ self.eps = self.norm.eps
272
+
273
+ def forward(self, input: Tensor, embedding: Tensor = None) -> Tensor:
274
+ if isinstance(input, tuple):
275
+ input, embedding = input
276
+ weight, bias = torch.split(
277
+ self.project_layer(embedding),
278
+ split_size_or_sections=self.d_model,
279
+ dim=-1,
280
+ )
281
+ return (weight * self.norm(input) + bias, embedding)
282
+
283
+ weight, bias = torch.split(
284
+ self.project_layer(embedding),
285
+ split_size_or_sections=self.d_model,
286
+ dim=-1,
287
+ )
288
+ return weight * self.norm(input) + bias
289
+
290
+
291
+ def _get_clones(module, N):
292
+ return nn.ModuleList([copy.deepcopy(module) for i in range(N)])
GPT_SoVITS/AR/text_processing/__init__.py ADDED
File without changes
GPT_SoVITS/AR/text_processing/phonemizer.py ADDED
@@ -0,0 +1,79 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # modified from https://github.com/yangdongchao/SoundStorm/blob/master/soundstorm/s1/AR/text_processing/phonemizer.py
2
+ # reference: https://github.com/lifeiteng/vall-e
3
+ import itertools
4
+ import re
5
+ from typing import Dict
6
+ from typing import List
7
+
8
+ import regex
9
+ from gruut import sentences
10
+ from gruut.const import Sentence
11
+ from gruut.const import Word
12
+ from AR.text_processing.symbols import SYMBOL_TO_ID
13
+
14
+
15
+ class GruutPhonemizer:
16
+ def __init__(self, language: str):
17
+ self._phonemizer = sentences
18
+ self.lang = language
19
+ self.symbol_to_id = SYMBOL_TO_ID
20
+ self._special_cases_dict: Dict[str] = {
21
+ r"\.\.\.": "... ",
22
+ ";": "; ",
23
+ ":": ": ",
24
+ ",": ", ",
25
+ r"\.": ". ",
26
+ "!": "! ",
27
+ r"\?": "? ",
28
+ "—": "—",
29
+ "…": "… ",
30
+ "«": "«",
31
+ "»": "»",
32
+ }
33
+ self._punctuation_regexp: str = (
34
+ rf"([{''.join(self._special_cases_dict.keys())}])"
35
+ )
36
+
37
+ def _normalize_punctuation(self, text: str) -> str:
38
+ text = regex.sub(rf"\pZ+{self._punctuation_regexp}", r"\1", text)
39
+ text = regex.sub(rf"{self._punctuation_regexp}(\pL)", r"\1 \2", text)
40
+ text = regex.sub(r"\pZ+", r" ", text)
41
+ return text.strip()
42
+
43
+ def _convert_punctuation(self, word: Word) -> str:
44
+ if not word.phonemes:
45
+ return ""
46
+ if word.phonemes[0] in ["‖", "|"]:
47
+ return word.text.strip()
48
+
49
+ phonemes = "".join(word.phonemes)
50
+ # remove modifier characters ˈˌː with regex
51
+ phonemes = re.sub(r"[ˈˌː͡]", "", phonemes)
52
+ return phonemes.strip()
53
+
54
+ def phonemize(self, text: str, espeak: bool = False) -> str:
55
+ text_to_phonemize: str = self._normalize_punctuation(text)
56
+ sents: List[Sentence] = [
57
+ sent
58
+ for sent in self._phonemizer(text_to_phonemize, lang="en-us", espeak=espeak)
59
+ ]
60
+ words: List[str] = [
61
+ self._convert_punctuation(word) for word in itertools.chain(*sents)
62
+ ]
63
+ return " ".join(words)
64
+
65
+ def transform(self, phonemes):
66
+ # convert phonemes to ids
67
+ # dictionary is in symbols.py
68
+ return [self.symbol_to_id[p] for p in phonemes if p in self.symbol_to_id.keys()]
69
+
70
+
71
+ if __name__ == "__main__":
72
+ phonemizer = GruutPhonemizer("en-us")
73
+ # text -> IPA
74
+ phonemes = phonemizer.phonemize("Hello, wor-ld ?")
75
+ print("phonemes:", phonemes)
76
+ print("len(phonemes):", len(phonemes))
77
+ phoneme_ids = phonemizer.transform(phonemes)
78
+ print("phoneme_ids:", phoneme_ids)
79
+ print("len(phoneme_ids):", len(phoneme_ids))
GPT_SoVITS/AR/text_processing/symbols.py ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ # modified from https://github.com/yangdongchao/SoundStorm/blob/master/soundstorm/s1/AR/text_processing/symbols.py
2
+ # reference: https://github.com/lifeiteng/vall-e
3
+ PAD = "_"
4
+ PUNCTUATION = ';:,.!?¡¿—…"«»“” '
5
+ LETTERS = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"
6
+ IPA_LETTERS = "ɑɐɒæɓʙβɔɕçɗɖðʤəɘɚɛɜɝɞɟʄɡɠɢʛɦɧħɥʜɨɪʝɭɬɫɮʟɱɯɰŋɳɲɴøɵɸθœɶʘɹɺɾɻʀʁɽʂʃʈʧʉʊʋⱱʌɣɤʍχʎʏʑʐʒʔʡʕʢǀǁǂǃˈˌːˑʼʴʰʱʲʷˠˤ˞↓↑→↗↘'̩'ᵻ"
7
+ SYMBOLS = [PAD] + list(PUNCTUATION) + list(LETTERS) + list(IPA_LETTERS)
8
+ SPACE_ID = SYMBOLS.index(" ")
9
+ SYMBOL_TO_ID = {s: i for i, s in enumerate(SYMBOLS)}
10
+ ID_TO_SYMBOL = {i: s for i, s in enumerate(SYMBOLS)}
GPT_SoVITS/AR/utils/__init__.py ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import re
2
+
3
+
4
+ def str2bool(str):
5
+ return True if str.lower() == 'true' else False
6
+
7
+
8
+ def get_newest_ckpt(string_list):
9
+ # 定义一个正则表达式模式,用于匹配字符串中的数字
10
+ pattern = r'epoch=(\d+)-step=(\d+)\.ckpt'
11
+
12
+ # 使用正则表达式提取每个字符串中的数字信息,并创建一个包含元组的列表
13
+ extracted_info = []
14
+ for string in string_list:
15
+ match = re.match(pattern, string)
16
+ if match:
17
+ epoch = int(match.group(1))
18
+ step = int(match.group(2))
19
+ extracted_info.append((epoch, step, string))
20
+ # 按照 epoch 后面的数字和 step 后面的数字进行排序
21
+ sorted_info = sorted(
22
+ extracted_info, key=lambda x: (x[0], x[1]), reverse=True)
23
+ # 获取最新的 ckpt 文件名
24
+ newest_ckpt = sorted_info[0][2]
25
+ return newest_ckpt
26
+
27
+
28
+ # 文本存在且不为空时 return True
29
+ def check_txt_file(file_path):
30
+ try:
31
+ with open(file_path, 'r') as file:
32
+ text = file.readline().strip()
33
+ assert text.strip() != ''
34
+ return text
35
+ except Exception:
36
+ return False
37
+ return False
GPT_SoVITS/AR/utils/initialize.py ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ """Initialize modules for espnet2 neural networks."""
3
+ import torch
4
+ from typeguard import check_argument_types
5
+
6
+
7
+ def initialize(model: torch.nn.Module, init: str):
8
+ """Initialize weights of a neural network module.
9
+
10
+ Parameters are initialized using the given method or distribution.
11
+
12
+ Custom initialization routines can be implemented into submodules
13
+ as function `espnet_initialization_fn` within the custom module.
14
+
15
+ Args:
16
+ model: Target.
17
+ init: Method of initialization.
18
+ """
19
+ assert check_argument_types()
20
+ print("init with", init)
21
+
22
+ # weight init
23
+ for p in model.parameters():
24
+ if p.dim() > 1:
25
+ if init == "xavier_uniform":
26
+ torch.nn.init.xavier_uniform_(p.data)
27
+ elif init == "xavier_normal":
28
+ torch.nn.init.xavier_normal_(p.data)
29
+ elif init == "kaiming_uniform":
30
+ torch.nn.init.kaiming_uniform_(p.data, nonlinearity="relu")
31
+ elif init == "kaiming_normal":
32
+ torch.nn.init.kaiming_normal_(p.data, nonlinearity="relu")
33
+ else:
34
+ raise ValueError("Unknown initialization: " + init)
35
+ # bias init
36
+ for name, p in model.named_parameters():
37
+ if ".bias" in name and p.dim() == 1:
38
+ p.data.zero_()
GPT_SoVITS/AR/utils/io.py ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys
2
+
3
+ import torch
4
+ import yaml
5
+
6
+
7
+ def load_yaml_config(path):
8
+ with open(path) as f:
9
+ config = yaml.full_load(f)
10
+ return config
11
+
12
+
13
+ def save_config_to_yaml(config, path):
14
+ assert path.endswith(".yaml")
15
+ with open(path, "w") as f:
16
+ f.write(yaml.dump(config))
17
+ f.close()
18
+
19
+
20
+ def write_args(args, path):
21
+ args_dict = dict(
22
+ (name, getattr(args, name)) for name in dir(args) if not name.startswith("_")
23
+ )
24
+ with open(path, "a") as args_file:
25
+ args_file.write("==> torch version: {}\n".format(torch.__version__))
26
+ args_file.write(
27
+ "==> cudnn version: {}\n".format(torch.backends.cudnn.version())
28
+ )
29
+ args_file.write("==> Cmd:\n")
30
+ args_file.write(str(sys.argv))
31
+ args_file.write("\n==> args:\n")
32
+ for k, v in sorted(args_dict.items()):
33
+ args_file.write(" %s: %s\n" % (str(k), str(v)))
34
+ args_file.close()
GPT_SoVITS/BigVGAN/LICENSE ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ MIT License
2
+
3
+ Copyright (c) 2024 NVIDIA CORPORATION.
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
GPT_SoVITS/BigVGAN/README.md ADDED
@@ -0,0 +1,266 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ## BigVGAN: A Universal Neural Vocoder with Large-Scale Training
2
+
3
+ #### Sang-gil Lee, Wei Ping, Boris Ginsburg, Bryan Catanzaro, Sungroh Yoon
4
+
5
+ [[Paper]](https://arxiv.org/abs/2206.04658) - [[Code]](https://github.com/NVIDIA/BigVGAN) - [[Showcase]](https://bigvgan-demo.github.io/) - [[Project Page]](https://research.nvidia.com/labs/adlr/projects/bigvgan/) - [[Weights]](https://huggingface.co/collections/nvidia/bigvgan-66959df3d97fd7d98d97dc9a) - [[Demo]](https://huggingface.co/spaces/nvidia/BigVGAN)
6
+
7
+ [![PWC](https://img.shields.io/endpoint.svg?url=https://paperswithcode.com/badge/bigvgan-a-universal-neural-vocoder-with-large/speech-synthesis-on-libritts)](https://paperswithcode.com/sota/speech-synthesis-on-libritts?p=bigvgan-a-universal-neural-vocoder-with-large)
8
+
9
+ <center><img src="https://user-images.githubusercontent.com/15963413/218609148-881e39df-33af-4af9-ab95-1427c4ebf062.png" width="800"></center>
10
+
11
+ ## News
12
+ - **Sep 2024 (v2.4):**
13
+ - We have updated the pretrained checkpoints trained for 5M steps. This is final release of the BigVGAN-v2 checkpoints.
14
+
15
+ - **Jul 2024 (v2.3):**
16
+ - General refactor and code improvements for improved readability.
17
+ - Fully fused CUDA kernel of anti-alised activation (upsampling + activation + downsampling) with inference speed benchmark.
18
+
19
+ - **Jul 2024 (v2.2):** The repository now includes an interactive local demo using gradio.
20
+
21
+ - **Jul 2024 (v2.1):** BigVGAN is now integrated with 🤗 Hugging Face Hub with easy access to inference using pretrained checkpoints. We also provide an interactive demo on Hugging Face Spaces.
22
+
23
+ - **Jul 2024 (v2):** We release BigVGAN-v2 along with pretrained checkpoints. Below are the highlights:
24
+ - Custom CUDA kernel for inference: we provide a fused upsampling + activation kernel written in CUDA for accelerated inference speed. Our test shows 1.5 - 3x faster speed on a single A100 GPU.
25
+ - Improved discriminator and loss: BigVGAN-v2 is trained using a [multi-scale sub-band CQT discriminator](https://arxiv.org/abs/2311.14957) and a [multi-scale mel spectrogram loss](https://arxiv.org/abs/2306.06546).
26
+ - Larger training data: BigVGAN-v2 is trained using datasets containing diverse audio types, including speech in multiple languages, environmental sounds, and instruments.
27
+ - We provide pretrained checkpoints of BigVGAN-v2 using diverse audio configurations, supporting up to 44 kHz sampling rate and 512x upsampling ratio.
28
+
29
+ ## Installation
30
+
31
+ The codebase has been tested on Python `3.10` and PyTorch `2.3.1` conda packages with either `pytorch-cuda=12.1` or `pytorch-cuda=11.8`. Below is an example command to create the conda environment:
32
+
33
+ ```shell
34
+ conda create -n bigvgan python=3.10 pytorch torchvision torchaudio pytorch-cuda=12.1 -c pytorch -c nvidia
35
+ conda activate bigvgan
36
+ ```
37
+
38
+ Clone the repository and install dependencies:
39
+
40
+ ```shell
41
+ git clone https://github.com/NVIDIA/BigVGAN
42
+ cd BigVGAN
43
+ pip install -r requirements.txt
44
+ ```
45
+
46
+ ## Inference Quickstart using 🤗 Hugging Face Hub
47
+
48
+ Below example describes how you can use BigVGAN: load the pretrained BigVGAN generator from Hugging Face Hub, compute mel spectrogram from input waveform, and generate synthesized waveform using the mel spectrogram as the model's input.
49
+
50
+ ```python
51
+ device = 'cuda'
52
+
53
+ import torch
54
+ import bigvgan
55
+ import librosa
56
+ from meldataset import get_mel_spectrogram
57
+
58
+ # instantiate the model. You can optionally set use_cuda_kernel=True for faster inference.
59
+ model = bigvgan.BigVGAN.from_pretrained('nvidia/bigvgan_v2_24khz_100band_256x', use_cuda_kernel=False)
60
+
61
+ # remove weight norm in the model and set to eval mode
62
+ model.remove_weight_norm()
63
+ model = model.eval().to(device)
64
+
65
+ # load wav file and compute mel spectrogram
66
+ wav_path = '/path/to/your/audio.wav'
67
+ wav, sr = librosa.load(wav_path, sr=model.h.sampling_rate, mono=True) # wav is np.ndarray with shape [T_time] and values in [-1, 1]
68
+ wav = torch.FloatTensor(wav).unsqueeze(0) # wav is FloatTensor with shape [B(1), T_time]
69
+
70
+ # compute mel spectrogram from the ground truth audio
71
+ mel = get_mel_spectrogram(wav, model.h).to(device) # mel is FloatTensor with shape [B(1), C_mel, T_frame]
72
+
73
+ # generate waveform from mel
74
+ with torch.inference_mode():
75
+ wav_gen = model(mel) # wav_gen is FloatTensor with shape [B(1), 1, T_time] and values in [-1, 1]
76
+ wav_gen_float = wav_gen.squeeze(0).cpu() # wav_gen is FloatTensor with shape [1, T_time]
77
+
78
+ # you can convert the generated waveform to 16 bit linear PCM
79
+ wav_gen_int16 = (wav_gen_float * 32767.0).numpy().astype('int16') # wav_gen is now np.ndarray with shape [1, T_time] and int16 dtype
80
+ ```
81
+
82
+ ## Local gradio demo <a href='https://github.com/gradio-app/gradio'><img src='https://img.shields.io/github/stars/gradio-app/gradio'></a>
83
+
84
+ You can run a local gradio demo using below command:
85
+
86
+ ```python
87
+ pip install -r demo/requirements.txt
88
+ python demo/app.py
89
+ ```
90
+
91
+ ## Training
92
+
93
+ Create symbolic link to the root of the dataset. The codebase uses filelist with the relative path from the dataset. Below are the example commands for LibriTTS dataset:
94
+
95
+ ```shell
96
+ cd filelists/LibriTTS && \
97
+ ln -s /path/to/your/LibriTTS/train-clean-100 train-clean-100 && \
98
+ ln -s /path/to/your/LibriTTS/train-clean-360 train-clean-360 && \
99
+ ln -s /path/to/your/LibriTTS/train-other-500 train-other-500 && \
100
+ ln -s /path/to/your/LibriTTS/dev-clean dev-clean && \
101
+ ln -s /path/to/your/LibriTTS/dev-other dev-other && \
102
+ ln -s /path/to/your/LibriTTS/test-clean test-clean && \
103
+ ln -s /path/to/your/LibriTTS/test-other test-other && \
104
+ cd ../..
105
+ ```
106
+
107
+ Train BigVGAN model. Below is an example command for training BigVGAN-v2 using LibriTTS dataset at 24kHz with a full 100-band mel spectrogram as input:
108
+
109
+ ```shell
110
+ python train.py \
111
+ --config configs/bigvgan_v2_24khz_100band_256x.json \
112
+ --input_wavs_dir filelists/LibriTTS \
113
+ --input_training_file filelists/LibriTTS/train-full.txt \
114
+ --input_validation_file filelists/LibriTTS/val-full.txt \
115
+ --list_input_unseen_wavs_dir filelists/LibriTTS filelists/LibriTTS \
116
+ --list_input_unseen_validation_file filelists/LibriTTS/dev-clean.txt filelists/LibriTTS/dev-other.txt \
117
+ --checkpoint_path exp/bigvgan_v2_24khz_100band_256x
118
+ ```
119
+
120
+ ## Synthesis
121
+
122
+ Synthesize from BigVGAN model. Below is an example command for generating audio from the model.
123
+ It computes mel spectrograms using wav files from `--input_wavs_dir` and saves the generated audio to `--output_dir`.
124
+
125
+ ```shell
126
+ python inference.py \
127
+ --checkpoint_file /path/to/your/bigvgan_v2_24khz_100band_256x/bigvgan_generator.pt \
128
+ --input_wavs_dir /path/to/your/input_wav \
129
+ --output_dir /path/to/your/output_wav
130
+ ```
131
+
132
+ `inference_e2e.py` supports synthesis directly from the mel spectrogram saved in `.npy` format, with shapes `[1, channel, frame]` or `[channel, frame]`.
133
+ It loads mel spectrograms from `--input_mels_dir` and saves the generated audio to `--output_dir`.
134
+
135
+ Make sure that the STFT hyperparameters for mel spectrogram are the same as the model, which are defined in `config.json` of the corresponding model.
136
+
137
+ ```shell
138
+ python inference_e2e.py \
139
+ --checkpoint_file /path/to/your/bigvgan_v2_24khz_100band_256x/bigvgan_generator.pt \
140
+ --input_mels_dir /path/to/your/input_mel \
141
+ --output_dir /path/to/your/output_wav
142
+ ```
143
+
144
+ ## Using Custom CUDA Kernel for Synthesis
145
+
146
+ You can apply the fast CUDA inference kernel by using a parameter `use_cuda_kernel` when instantiating BigVGAN:
147
+
148
+ ```python
149
+ generator = BigVGAN(h, use_cuda_kernel=True)
150
+ ```
151
+
152
+ You can also pass `--use_cuda_kernel` to `inference.py` and `inference_e2e.py` to enable this feature.
153
+
154
+ When applied for the first time, it builds the kernel using `nvcc` and `ninja`. If the build succeeds, the kernel is saved to `alias_free_activation/cuda/build` and the model automatically loads the kernel. The codebase has been tested using CUDA `12.1`.
155
+
156
+ Please make sure that both are installed in your system and `nvcc` installed in your system matches the version your PyTorch build is using.
157
+
158
+ We recommend running `test_cuda_vs_torch_model.py` first to build and check the correctness of the CUDA kernel. See below example command and its output, where it returns `[Success] test CUDA fused vs. plain torch BigVGAN inference`:
159
+
160
+ ```python
161
+ python tests/test_cuda_vs_torch_model.py \
162
+ --checkpoint_file /path/to/your/bigvgan_generator.pt
163
+ ```
164
+
165
+ ```shell
166
+ loading plain Pytorch BigVGAN
167
+ ...
168
+ loading CUDA kernel BigVGAN with auto-build
169
+ Detected CUDA files, patching ldflags
170
+ Emitting ninja build file /path/to/your/BigVGAN/alias_free_activation/cuda/build/build.ninja..
171
+ Building extension module anti_alias_activation_cuda...
172
+ ...
173
+ Loading extension module anti_alias_activation_cuda...
174
+ ...
175
+ Loading '/path/to/your/bigvgan_generator.pt'
176
+ ...
177
+ [Success] test CUDA fused vs. plain torch BigVGAN inference
178
+ > mean_difference=0.0007238413265440613
179
+ ...
180
+ ```
181
+
182
+ If you see `[Fail] test CUDA fused vs. plain torch BigVGAN inference`, it means that the CUDA kernel inference is incorrect. Please check if `nvcc` installed in your system is compatible with your PyTorch version.
183
+
184
+ ## Pretrained Models
185
+
186
+ We provide the [pretrained models on Hugging Face Collections](https://huggingface.co/collections/nvidia/bigvgan-66959df3d97fd7d98d97dc9a).
187
+ One can download the checkpoints of the generator weight (named `bigvgan_generator.pt`) and its discriminator/optimizer states (named `bigvgan_discriminator_optimizer.pt`) within the listed model repositories.
188
+
189
+ | Model Name | Sampling Rate | Mel band | fmax | Upsampling Ratio | Params | Dataset | Steps | Fine-Tuned |
190
+ |:--------------------------------------------------------------------------------------------------------:|:-------------:|:--------:|:-----:|:----------------:|:------:|:--------------------------:|:-----:|:----------:|
191
+ | [bigvgan_v2_44khz_128band_512x](https://huggingface.co/nvidia/bigvgan_v2_44khz_128band_512x) | 44 kHz | 128 | 22050 | 512 | 122M | Large-scale Compilation | 5M | No |
192
+ | [bigvgan_v2_44khz_128band_256x](https://huggingface.co/nvidia/bigvgan_v2_44khz_128band_256x) | 44 kHz | 128 | 22050 | 256 | 112M | Large-scale Compilation | 5M | No |
193
+ | [bigvgan_v2_24khz_100band_256x](https://huggingface.co/nvidia/bigvgan_v2_24khz_100band_256x) | 24 kHz | 100 | 12000 | 256 | 112M | Large-scale Compilation | 5M | No |
194
+ | [bigvgan_v2_22khz_80band_256x](https://huggingface.co/nvidia/bigvgan_v2_22khz_80band_256x) | 22 kHz | 80 | 11025 | 256 | 112M | Large-scale Compilation | 5M | No |
195
+ | [bigvgan_v2_22khz_80band_fmax8k_256x](https://huggingface.co/nvidia/bigvgan_v2_22khz_80band_fmax8k_256x) | 22 kHz | 80 | 8000 | 256 | 112M | Large-scale Compilation | 5M | No |
196
+ | [bigvgan_24khz_100band](https://huggingface.co/nvidia/bigvgan_24khz_100band) | 24 kHz | 100 | 12000 | 256 | 112M | LibriTTS | 5M | No |
197
+ | [bigvgan_base_24khz_100band](https://huggingface.co/nvidia/bigvgan_base_24khz_100band) | 24 kHz | 100 | 12000 | 256 | 14M | LibriTTS | 5M | No |
198
+ | [bigvgan_22khz_80band](https://huggingface.co/nvidia/bigvgan_22khz_80band) | 22 kHz | 80 | 8000 | 256 | 112M | LibriTTS + VCTK + LJSpeech | 5M | No |
199
+ | [bigvgan_base_22khz_80band](https://huggingface.co/nvidia/bigvgan_base_22khz_80band) | 22 kHz | 80 | 8000 | 256 | 14M | LibriTTS + VCTK + LJSpeech | 5M | No |
200
+
201
+ The paper results are based on the original 24kHz BigVGAN models (`bigvgan_24khz_100band` and `bigvgan_base_24khz_100band`) trained on LibriTTS dataset.
202
+ We also provide 22kHz BigVGAN models with band-limited setup (i.e., fmax=8000) for TTS applications.
203
+ Note that the checkpoints use `snakebeta` activation with log scale parameterization, which have the best overall quality.
204
+
205
+ You can fine-tune the models by:
206
+
207
+ 1. downloading the checkpoints (both the generator weight and its discriminator/optimizer states)
208
+ 2. resuming training using your audio dataset by specifying `--checkpoint_path` that includes the checkpoints when launching `train.py`
209
+
210
+ ## Training Details of BigVGAN-v2
211
+
212
+ Comapred to the original BigVGAN, the pretrained checkpoints of BigVGAN-v2 used `batch_size=32` with a longer `segment_size=65536` and are trained using 8 A100 GPUs.
213
+
214
+ Note that the BigVGAN-v2 `json` config files in `./configs` use `batch_size=4` as default to fit in a single A100 GPU for training. You can fine-tune the models adjusting `batch_size` depending on your GPUs.
215
+
216
+ When training BigVGAN-v2 from scratch with small batch size, it can potentially encounter the early divergence problem mentioned in the paper. In such case, we recommend lowering the `clip_grad_norm` value (e.g. `100`) for the early training iterations (e.g. 20000 steps) and increase the value to the default `500`.
217
+
218
+ ## Evaluation Results of BigVGAN-v2
219
+
220
+ Below are the objective results of the 24kHz model (`bigvgan_v2_24khz_100band_256x`) obtained from the LibriTTS `dev` sets. BigVGAN-v2 shows noticeable improvements of the metrics. The model also exhibits reduced perceptual artifacts, especially for non-speech audio.
221
+
222
+ | Model | Dataset | Steps | PESQ(↑) | M-STFT(↓) | MCD(↓) | Periodicity(↓) | V/UV F1(↑) |
223
+ |:----------:|:-----------------------:|:-----:|:---------:|:----------:|:----------:|:--------------:|:----------:|
224
+ | BigVGAN | LibriTTS | 1M | 4.027 | 0.7997 | 0.3745 | 0.1018 | 0.9598 |
225
+ | BigVGAN | LibriTTS | 5M | 4.256 | 0.7409 | 0.2988 | 0.0809 | 0.9698 |
226
+ | BigVGAN-v2 | Large-scale Compilation | 3M | 4.359 | 0.7134 | 0.3060 | 0.0621 | 0.9777 |
227
+ | BigVGAN-v2 | Large-scale Compilation | 5M | **4.362** | **0.7026** | **0.2903** | **0.0593** | **0.9793** |
228
+
229
+ ## Speed Benchmark
230
+
231
+ Below are the speed and VRAM usage benchmark results of BigVGAN from `tests/test_cuda_vs_torch_model.py`, using `bigvgan_v2_24khz_100band_256x` as a reference model.
232
+
233
+ | GPU | num_mel_frame | use_cuda_kernel | Speed (kHz) | Real-time Factor | VRAM (GB) |
234
+ |:--------------------------:|:-------------:|:---------------:|:-----------:|:----------------:|:---------:|
235
+ | NVIDIA A100 | 256 | False | 1672.1 | 69.7x | 1.3 |
236
+ | | | True | 3916.5 | 163.2x | 1.3 |
237
+ | | 2048 | False | 1899.6 | 79.2x | 1.7 |
238
+ | | | True | 5330.1 | 222.1x | 1.7 |
239
+ | | 16384 | False | 1973.8 | 82.2x | 5.0 |
240
+ | | | True | 5761.7 | 240.1x | 4.4 |
241
+ | NVIDIA GeForce RTX 3080 | 256 | False | 841.1 | 35.0x | 1.3 |
242
+ | | | True | 1598.1 | 66.6x | 1.3 |
243
+ | | 2048 | False | 929.9 | 38.7x | 1.7 |
244
+ | | | True | 1971.3 | 82.1x | 1.6 |
245
+ | | 16384 | False | 943.4 | 39.3x | 5.0 |
246
+ | | | True | 2026.5 | 84.4x | 3.9 |
247
+ | NVIDIA GeForce RTX 2080 Ti | 256 | False | 515.6 | 21.5x | 1.3 |
248
+ | | | True | 811.3 | 33.8x | 1.3 |
249
+ | | 2048 | False | 576.5 | 24.0x | 1.7 |
250
+ | | | True | 1023.0 | 42.6x | 1.5 |
251
+ | | 16384 | False | 589.4 | 24.6x | 5.0 |
252
+ | | | True | 1068.1 | 44.5x | 3.2 |
253
+
254
+ ## Acknowledgements
255
+
256
+ We thank Vijay Anand Korthikanti and Kevin J. Shih for their generous support in implementing the CUDA kernel for inference.
257
+
258
+ ## References
259
+
260
+ - [HiFi-GAN](https://github.com/jik876/hifi-gan) (for generator and multi-period discriminator)
261
+ - [Snake](https://github.com/EdwardDixon/snake) (for periodic activation)
262
+ - [Alias-free-torch](https://github.com/junjun3518/alias-free-torch) (for anti-aliasing)
263
+ - [Julius](https://github.com/adefossez/julius) (for low-pass filter)
264
+ - [UnivNet](https://github.com/mindslab-ai/univnet) (for multi-resolution discriminator)
265
+ - [descript-audio-codec](https://github.com/descriptinc/descript-audio-codec) and [vocos](https://github.com/gemelo-ai/vocos) (for multi-band multi-scale STFT discriminator and multi-scale mel spectrogram loss)
266
+ - [Amphion](https://github.com/open-mmlab/Amphion) (for multi-scale sub-band CQT discriminator)
GPT_SoVITS/BigVGAN/activations.py ADDED
@@ -0,0 +1,126 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Implementation adapted from https://github.com/EdwardDixon/snake under the MIT license.
2
+ # LICENSE is in incl_licenses directory.
3
+
4
+ import torch
5
+ from torch import nn, sin, pow
6
+ from torch.nn import Parameter
7
+
8
+
9
+ class Snake(nn.Module):
10
+ """
11
+ Implementation of a sine-based periodic activation function
12
+ Shape:
13
+ - Input: (B, C, T)
14
+ - Output: (B, C, T), same shape as the input
15
+ Parameters:
16
+ - alpha - trainable parameter
17
+ References:
18
+ - This activation function is from this paper by Liu Ziyin, Tilman Hartwig, Masahito Ueda:
19
+ https://arxiv.org/abs/2006.08195
20
+ Examples:
21
+ >>> a1 = snake(256)
22
+ >>> x = torch.randn(256)
23
+ >>> x = a1(x)
24
+ """
25
+
26
+ def __init__(
27
+ self, in_features, alpha=1.0, alpha_trainable=True, alpha_logscale=False
28
+ ):
29
+ """
30
+ Initialization.
31
+ INPUT:
32
+ - in_features: shape of the input
33
+ - alpha: trainable parameter
34
+ alpha is initialized to 1 by default, higher values = higher-frequency.
35
+ alpha will be trained along with the rest of your model.
36
+ """
37
+ super(Snake, self).__init__()
38
+ self.in_features = in_features
39
+
40
+ # Initialize alpha
41
+ self.alpha_logscale = alpha_logscale
42
+ if self.alpha_logscale: # Log scale alphas initialized to zeros
43
+ self.alpha = Parameter(torch.zeros(in_features) * alpha)
44
+ else: # Linear scale alphas initialized to ones
45
+ self.alpha = Parameter(torch.ones(in_features) * alpha)
46
+
47
+ self.alpha.requires_grad = alpha_trainable
48
+
49
+ self.no_div_by_zero = 0.000000001
50
+
51
+ def forward(self, x):
52
+ """
53
+ Forward pass of the function.
54
+ Applies the function to the input elementwise.
55
+ Snake ∶= x + 1/a * sin^2 (xa)
56
+ """
57
+ alpha = self.alpha.unsqueeze(0).unsqueeze(-1) # Line up with x to [B, C, T]
58
+ if self.alpha_logscale:
59
+ alpha = torch.exp(alpha)
60
+ x = x + (1.0 / (alpha + self.no_div_by_zero)) * pow(sin(x * alpha), 2)
61
+
62
+ return x
63
+
64
+
65
+ class SnakeBeta(nn.Module):
66
+ """
67
+ A modified Snake function which uses separate parameters for the magnitude of the periodic components
68
+ Shape:
69
+ - Input: (B, C, T)
70
+ - Output: (B, C, T), same shape as the input
71
+ Parameters:
72
+ - alpha - trainable parameter that controls frequency
73
+ - beta - trainable parameter that controls magnitude
74
+ References:
75
+ - This activation function is a modified version based on this paper by Liu Ziyin, Tilman Hartwig, Masahito Ueda:
76
+ https://arxiv.org/abs/2006.08195
77
+ Examples:
78
+ >>> a1 = snakebeta(256)
79
+ >>> x = torch.randn(256)
80
+ >>> x = a1(x)
81
+ """
82
+
83
+ def __init__(
84
+ self, in_features, alpha=1.0, alpha_trainable=True, alpha_logscale=False
85
+ ):
86
+ """
87
+ Initialization.
88
+ INPUT:
89
+ - in_features: shape of the input
90
+ - alpha - trainable parameter that controls frequency
91
+ - beta - trainable parameter that controls magnitude
92
+ alpha is initialized to 1 by default, higher values = higher-frequency.
93
+ beta is initialized to 1 by default, higher values = higher-magnitude.
94
+ alpha will be trained along with the rest of your model.
95
+ """
96
+ super(SnakeBeta, self).__init__()
97
+ self.in_features = in_features
98
+
99
+ # Initialize alpha
100
+ self.alpha_logscale = alpha_logscale
101
+ if self.alpha_logscale: # Log scale alphas initialized to zeros
102
+ self.alpha = Parameter(torch.zeros(in_features) * alpha)
103
+ self.beta = Parameter(torch.zeros(in_features) * alpha)
104
+ else: # Linear scale alphas initialized to ones
105
+ self.alpha = Parameter(torch.ones(in_features) * alpha)
106
+ self.beta = Parameter(torch.ones(in_features) * alpha)
107
+
108
+ self.alpha.requires_grad = alpha_trainable
109
+ self.beta.requires_grad = alpha_trainable
110
+
111
+ self.no_div_by_zero = 0.000000001
112
+
113
+ def forward(self, x):
114
+ """
115
+ Forward pass of the function.
116
+ Applies the function to the input elementwise.
117
+ SnakeBeta ∶= x + 1/b * sin^2 (xa)
118
+ """
119
+ alpha = self.alpha.unsqueeze(0).unsqueeze(-1) # Line up with x to [B, C, T]
120
+ beta = self.beta.unsqueeze(0).unsqueeze(-1)
121
+ if self.alpha_logscale:
122
+ alpha = torch.exp(alpha)
123
+ beta = torch.exp(beta)
124
+ x = x + (1.0 / (beta + self.no_div_by_zero)) * pow(sin(x * alpha), 2)
125
+
126
+ return x
GPT_SoVITS/BigVGAN/alias_free_activation/cuda/__init__.py ADDED
File without changes
GPT_SoVITS/BigVGAN/alias_free_activation/cuda/activation1d.py ADDED
@@ -0,0 +1,77 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2024 NVIDIA CORPORATION.
2
+ # Licensed under the MIT license.
3
+
4
+ import torch
5
+ import torch.nn as nn
6
+ from alias_free_activation.torch.resample import UpSample1d, DownSample1d
7
+
8
+ # load fused CUDA kernel: this enables importing anti_alias_activation_cuda
9
+ from alias_free_activation.cuda import load
10
+
11
+ anti_alias_activation_cuda = load.load()
12
+
13
+
14
+ class FusedAntiAliasActivation(torch.autograd.Function):
15
+ """
16
+ Assumes filter size 12, replication padding on upsampling/downsampling, and logscale alpha/beta parameters as inputs.
17
+ The hyperparameters are hard-coded in the kernel to maximize speed.
18
+ NOTE: The fused kenrel is incorrect for Activation1d with different hyperparameters.
19
+ """
20
+
21
+ @staticmethod
22
+ def forward(ctx, inputs, up_ftr, down_ftr, alpha, beta):
23
+ activation_results = anti_alias_activation_cuda.forward(
24
+ inputs, up_ftr, down_ftr, alpha, beta
25
+ )
26
+
27
+ return activation_results
28
+
29
+ @staticmethod
30
+ def backward(ctx, output_grads):
31
+ raise NotImplementedError
32
+ return output_grads, None, None
33
+
34
+
35
+ class Activation1d(nn.Module):
36
+ def __init__(
37
+ self,
38
+ activation,
39
+ up_ratio: int = 2,
40
+ down_ratio: int = 2,
41
+ up_kernel_size: int = 12,
42
+ down_kernel_size: int = 12,
43
+ fused: bool = True,
44
+ ):
45
+ super().__init__()
46
+ self.up_ratio = up_ratio
47
+ self.down_ratio = down_ratio
48
+ self.act = activation
49
+ self.upsample = UpSample1d(up_ratio, up_kernel_size)
50
+ self.downsample = DownSample1d(down_ratio, down_kernel_size)
51
+
52
+ self.fused = fused # Whether to use fused CUDA kernel or not
53
+
54
+ def forward(self, x):
55
+ if not self.fused:
56
+ x = self.upsample(x)
57
+ x = self.act(x)
58
+ x = self.downsample(x)
59
+ return x
60
+ else:
61
+ if self.act.__class__.__name__ == "Snake":
62
+ beta = self.act.alpha.data # Snake uses same params for alpha and beta
63
+ else:
64
+ beta = (
65
+ self.act.beta.data
66
+ ) # Snakebeta uses different params for alpha and beta
67
+ alpha = self.act.alpha.data
68
+ if (
69
+ not self.act.alpha_logscale
70
+ ): # Exp baked into cuda kernel, cancel it out with a log
71
+ alpha = torch.log(alpha)
72
+ beta = torch.log(beta)
73
+
74
+ x = FusedAntiAliasActivation.apply(
75
+ x, self.upsample.filter, self.downsample.lowpass.filter, alpha, beta
76
+ )
77
+ return x
GPT_SoVITS/BigVGAN/alias_free_activation/cuda/anti_alias_activation.cpp ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* coding=utf-8
2
+ * Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved.
3
+ *
4
+ * Licensed under the Apache License, Version 2.0 (the "License");
5
+ * you may not use this file except in compliance with the License.
6
+ * You may obtain a copy of the License at
7
+ *
8
+ * http://www.apache.org/licenses/LICENSE-2.0
9
+ *
10
+ * Unless required by applicable law or agreed to in writing, software
11
+ * distributed under the License is distributed on an "AS IS" BASIS,
12
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ * See the License for the specific language governing permissions and
14
+ * limitations under the License.
15
+ */
16
+
17
+ #include <torch/extension.h>
18
+
19
+ extern "C" torch::Tensor fwd_cuda(torch::Tensor const &input, torch::Tensor const &up_filter, torch::Tensor const &down_filter, torch::Tensor const &alpha, torch::Tensor const &beta);
20
+
21
+ PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
22
+ m.def("forward", &fwd_cuda, "Anti-Alias Activation forward (CUDA)");
23
+ }
GPT_SoVITS/BigVGAN/alias_free_activation/cuda/anti_alias_activation_cuda.cu ADDED
@@ -0,0 +1,246 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* coding=utf-8
2
+ * Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved.
3
+ *
4
+ * Licensed under the Apache License, Version 2.0 (the "License");
5
+ * you may not use this file except in compliance with the License.
6
+ * You may obtain a copy of the License at
7
+ *
8
+ * http://www.apache.org/licenses/LICENSE-2.0
9
+ *
10
+ * Unless required by applicable law or agreed to in writing, software
11
+ * distributed under the License is distributed on an "AS IS" BASIS,
12
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ * See the License for the specific language governing permissions and
14
+ * limitations under the License.
15
+ */
16
+
17
+ #include <ATen/ATen.h>
18
+ #include <cuda.h>
19
+ #include <cuda_runtime.h>
20
+ #include <cuda_fp16.h>
21
+ #include <cuda_profiler_api.h>
22
+ #include <ATen/cuda/CUDAContext.h>
23
+ #include <torch/extension.h>
24
+ #include "type_shim.h"
25
+ #include <assert.h>
26
+ #include <cfloat>
27
+ #include <limits>
28
+ #include <stdint.h>
29
+ #include <c10/macros/Macros.h>
30
+
31
+ namespace
32
+ {
33
+ // Hard-coded hyperparameters
34
+ // WARP_SIZE and WARP_BATCH must match the return values batches_per_warp and
35
+ constexpr int ELEMENTS_PER_LDG_STG = 1; //(WARP_ITERATIONS < 4) ? 1 : 4;
36
+ constexpr int BUFFER_SIZE = 32;
37
+ constexpr int FILTER_SIZE = 12;
38
+ constexpr int HALF_FILTER_SIZE = 6;
39
+ constexpr int UPSAMPLE_REPLICATION_PAD = 5; // 5 on each side, matching torch impl
40
+ constexpr int DOWNSAMPLE_REPLICATION_PAD_LEFT = 5; // matching torch impl
41
+ constexpr int DOWNSAMPLE_REPLICATION_PAD_RIGHT = 6; // matching torch impl
42
+
43
+ template <typename input_t, typename output_t, typename acc_t>
44
+ __global__ void anti_alias_activation_forward(
45
+ output_t *dst,
46
+ const input_t *src,
47
+ const input_t *up_ftr,
48
+ const input_t *down_ftr,
49
+ const input_t *alpha,
50
+ const input_t *beta,
51
+ int batch_size,
52
+ int channels,
53
+ int seq_len)
54
+ {
55
+ // Up and downsample filters
56
+ input_t up_filter[FILTER_SIZE];
57
+ input_t down_filter[FILTER_SIZE];
58
+
59
+ // Load data from global memory including extra indices reserved for replication paddings
60
+ input_t elements[2 * FILTER_SIZE + 2 * BUFFER_SIZE + 2 * UPSAMPLE_REPLICATION_PAD] = {0};
61
+ input_t intermediates[2 * FILTER_SIZE + 2 * BUFFER_SIZE + DOWNSAMPLE_REPLICATION_PAD_LEFT + DOWNSAMPLE_REPLICATION_PAD_RIGHT] = {0};
62
+
63
+ // Output stores downsampled output before writing to dst
64
+ output_t output[BUFFER_SIZE];
65
+
66
+ // blockDim/threadIdx = (128, 1, 1)
67
+ // gridDim/blockIdx = (seq_blocks, channels, batches)
68
+ int block_offset = (blockIdx.x * 128 * BUFFER_SIZE + seq_len * (blockIdx.y + gridDim.y * blockIdx.z));
69
+ int local_offset = threadIdx.x * BUFFER_SIZE;
70
+ int seq_offset = blockIdx.x * 128 * BUFFER_SIZE + local_offset;
71
+
72
+ // intermediate have double the seq_len
73
+ int intermediate_local_offset = threadIdx.x * BUFFER_SIZE * 2;
74
+ int intermediate_seq_offset = blockIdx.x * 128 * BUFFER_SIZE * 2 + intermediate_local_offset;
75
+
76
+ // Get values needed for replication padding before moving pointer
77
+ const input_t *right_most_pntr = src + (seq_len * (blockIdx.y + gridDim.y * blockIdx.z));
78
+ input_t seq_left_most_value = right_most_pntr[0];
79
+ input_t seq_right_most_value = right_most_pntr[seq_len - 1];
80
+
81
+ // Move src and dst pointers
82
+ src += block_offset + local_offset;
83
+ dst += block_offset + local_offset;
84
+
85
+ // Alpha and beta values for snake activatons. Applies exp by default
86
+ alpha = alpha + blockIdx.y;
87
+ input_t alpha_val = expf(alpha[0]);
88
+ beta = beta + blockIdx.y;
89
+ input_t beta_val = expf(beta[0]);
90
+
91
+ #pragma unroll
92
+ for (int it = 0; it < FILTER_SIZE; it += 1)
93
+ {
94
+ up_filter[it] = up_ftr[it];
95
+ down_filter[it] = down_ftr[it];
96
+ }
97
+
98
+ // Apply replication padding for upsampling, matching torch impl
99
+ #pragma unroll
100
+ for (int it = -HALF_FILTER_SIZE; it < BUFFER_SIZE + HALF_FILTER_SIZE; it += 1)
101
+ {
102
+ int element_index = seq_offset + it; // index for element
103
+ if ((element_index < 0) && (element_index >= -UPSAMPLE_REPLICATION_PAD))
104
+ {
105
+ elements[2 * (HALF_FILTER_SIZE + it)] = 2 * seq_left_most_value;
106
+ }
107
+ if ((element_index >= seq_len) && (element_index < seq_len + UPSAMPLE_REPLICATION_PAD))
108
+ {
109
+ elements[2 * (HALF_FILTER_SIZE + it)] = 2 * seq_right_most_value;
110
+ }
111
+ if ((element_index >= 0) && (element_index < seq_len))
112
+ {
113
+ elements[2 * (HALF_FILTER_SIZE + it)] = 2 * src[it];
114
+ }
115
+ }
116
+
117
+ // Apply upsampling strided convolution and write to intermediates. It reserves DOWNSAMPLE_REPLICATION_PAD_LEFT for replication padding of the downsampilng conv later
118
+ #pragma unroll
119
+ for (int it = 0; it < (2 * BUFFER_SIZE + 2 * FILTER_SIZE); it += 1)
120
+ {
121
+ input_t acc = 0.0;
122
+ int element_index = intermediate_seq_offset + it; // index for intermediate
123
+ #pragma unroll
124
+ for (int f_idx = 0; f_idx < FILTER_SIZE; f_idx += 1)
125
+ {
126
+ if ((element_index + f_idx) >= 0)
127
+ {
128
+ acc += up_filter[f_idx] * elements[it + f_idx];
129
+ }
130
+ }
131
+ intermediates[it + DOWNSAMPLE_REPLICATION_PAD_LEFT] = acc;
132
+ }
133
+
134
+ // Apply activation function. It reserves DOWNSAMPLE_REPLICATION_PAD_LEFT and DOWNSAMPLE_REPLICATION_PAD_RIGHT for replication padding of the downsampilng conv later
135
+ double no_div_by_zero = 0.000000001;
136
+ #pragma unroll
137
+ for (int it = 0; it < 2 * BUFFER_SIZE + 2 * FILTER_SIZE; it += 1)
138
+ {
139
+ intermediates[it + DOWNSAMPLE_REPLICATION_PAD_LEFT] += (1.0 / (beta_val + no_div_by_zero)) * sinf(intermediates[it + DOWNSAMPLE_REPLICATION_PAD_LEFT] * alpha_val) * sinf(intermediates[it + DOWNSAMPLE_REPLICATION_PAD_LEFT] * alpha_val);
140
+ }
141
+
142
+ // Apply replication padding before downsampling conv from intermediates
143
+ #pragma unroll
144
+ for (int it = 0; it < DOWNSAMPLE_REPLICATION_PAD_LEFT; it += 1)
145
+ {
146
+ intermediates[it] = intermediates[DOWNSAMPLE_REPLICATION_PAD_LEFT];
147
+ }
148
+ #pragma unroll
149
+ for (int it = DOWNSAMPLE_REPLICATION_PAD_LEFT + 2 * BUFFER_SIZE + 2 * FILTER_SIZE; it < DOWNSAMPLE_REPLICATION_PAD_LEFT + 2 * BUFFER_SIZE + 2 * FILTER_SIZE + DOWNSAMPLE_REPLICATION_PAD_RIGHT; it += 1)
150
+ {
151
+ intermediates[it] = intermediates[DOWNSAMPLE_REPLICATION_PAD_LEFT + 2 * BUFFER_SIZE + 2 * FILTER_SIZE - 1];
152
+ }
153
+
154
+ // Apply downsample strided convolution (assuming stride=2) from intermediates
155
+ #pragma unroll
156
+ for (int it = 0; it < BUFFER_SIZE; it += 1)
157
+ {
158
+ input_t acc = 0.0;
159
+ #pragma unroll
160
+ for (int f_idx = 0; f_idx < FILTER_SIZE; f_idx += 1)
161
+ {
162
+ // Add constant DOWNSAMPLE_REPLICATION_PAD_RIGHT to match torch implementation
163
+ acc += down_filter[f_idx] * intermediates[it * 2 + f_idx + DOWNSAMPLE_REPLICATION_PAD_RIGHT];
164
+ }
165
+ output[it] = acc;
166
+ }
167
+
168
+ // Write output to dst
169
+ #pragma unroll
170
+ for (int it = 0; it < BUFFER_SIZE; it += ELEMENTS_PER_LDG_STG)
171
+ {
172
+ int element_index = seq_offset + it;
173
+ if (element_index < seq_len)
174
+ {
175
+ dst[it] = output[it];
176
+ }
177
+ }
178
+
179
+ }
180
+
181
+ template <typename input_t, typename output_t, typename acc_t>
182
+ void dispatch_anti_alias_activation_forward(
183
+ output_t *dst,
184
+ const input_t *src,
185
+ const input_t *up_ftr,
186
+ const input_t *down_ftr,
187
+ const input_t *alpha,
188
+ const input_t *beta,
189
+ int batch_size,
190
+ int channels,
191
+ int seq_len)
192
+ {
193
+ if (seq_len == 0)
194
+ {
195
+ return;
196
+ }
197
+ else
198
+ {
199
+ // Use 128 threads per block to maximimize gpu utilization
200
+ constexpr int threads_per_block = 128;
201
+ constexpr int seq_len_per_block = 4096;
202
+ int blocks_per_seq_len = (seq_len + seq_len_per_block - 1) / seq_len_per_block;
203
+ dim3 blocks(blocks_per_seq_len, channels, batch_size);
204
+ dim3 threads(threads_per_block, 1, 1);
205
+
206
+ anti_alias_activation_forward<input_t, output_t, acc_t>
207
+ <<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(dst, src, up_ftr, down_ftr, alpha, beta, batch_size, channels, seq_len);
208
+ }
209
+ }
210
+ }
211
+
212
+ extern "C" torch::Tensor fwd_cuda(torch::Tensor const &input, torch::Tensor const &up_filter, torch::Tensor const &down_filter, torch::Tensor const &alpha, torch::Tensor const &beta)
213
+ {
214
+ // Input is a 3d tensor with dimensions [batches, channels, seq_len]
215
+ const int batches = input.size(0);
216
+ const int channels = input.size(1);
217
+ const int seq_len = input.size(2);
218
+
219
+ // Output
220
+ auto act_options = input.options().requires_grad(false);
221
+
222
+ torch::Tensor anti_alias_activation_results =
223
+ torch::empty({batches, channels, seq_len}, act_options);
224
+
225
+ void *input_ptr = static_cast<void *>(input.data_ptr());
226
+ void *up_filter_ptr = static_cast<void *>(up_filter.data_ptr());
227
+ void *down_filter_ptr = static_cast<void *>(down_filter.data_ptr());
228
+ void *alpha_ptr = static_cast<void *>(alpha.data_ptr());
229
+ void *beta_ptr = static_cast<void *>(beta.data_ptr());
230
+ void *anti_alias_activation_results_ptr = static_cast<void *>(anti_alias_activation_results.data_ptr());
231
+
232
+ DISPATCH_FLOAT_HALF_AND_BFLOAT(
233
+ input.scalar_type(),
234
+ "dispatch anti alias activation_forward",
235
+ dispatch_anti_alias_activation_forward<scalar_t, scalar_t, float>(
236
+ reinterpret_cast<scalar_t *>(anti_alias_activation_results_ptr),
237
+ reinterpret_cast<const scalar_t *>(input_ptr),
238
+ reinterpret_cast<const scalar_t *>(up_filter_ptr),
239
+ reinterpret_cast<const scalar_t *>(down_filter_ptr),
240
+ reinterpret_cast<const scalar_t *>(alpha_ptr),
241
+ reinterpret_cast<const scalar_t *>(beta_ptr),
242
+ batches,
243
+ channels,
244
+ seq_len););
245
+ return anti_alias_activation_results;
246
+ }
GPT_SoVITS/BigVGAN/alias_free_activation/cuda/build/_ ADDED
@@ -0,0 +1 @@
 
 
1
+
GPT_SoVITS/BigVGAN/alias_free_activation/cuda/compat.h ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* coding=utf-8
2
+ * Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
3
+ *
4
+ * Licensed under the Apache License, Version 2.0 (the "License");
5
+ * you may not use this file except in compliance with the License.
6
+ * You may obtain a copy of the License at
7
+ *
8
+ * http://www.apache.org/licenses/LICENSE-2.0
9
+ *
10
+ * Unless required by applicable law or agreed to in writing, software
11
+ * distributed under the License is distributed on an "AS IS" BASIS,
12
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ * See the License for the specific language governing permissions and
14
+ * limitations under the License.
15
+ */
16
+
17
+ /*This code is copied fron NVIDIA apex:
18
+ * https://github.com/NVIDIA/apex
19
+ * with minor changes. */
20
+
21
+ #ifndef TORCH_CHECK
22
+ #define TORCH_CHECK AT_CHECK
23
+ #endif
24
+
25
+ #ifdef VERSION_GE_1_3
26
+ #define DATA_PTR data_ptr
27
+ #else
28
+ #define DATA_PTR data
29
+ #endif