Spaces:
Runtime error
Runtime error
kevinwang676
commited on
Commit
•
06c138d
1
Parent(s):
2c74f9d
Upload folder using huggingface_hub
Browse files- .gitignore +389 -0
- LICENSE +21 -0
- README.md +2 -13
- app.py +566 -0
- config.py +80 -0
- lib/infer_pack/attentions.py +417 -0
- lib/infer_pack/commons.py +166 -0
- lib/infer_pack/models.py +1142 -0
- lib/infer_pack/models_dml.py +1124 -0
- lib/infer_pack/models_onnx.py +819 -0
- lib/infer_pack/modules.py +522 -0
- lib/infer_pack/modules/F0Predictor/DioF0Predictor.py +90 -0
- lib/infer_pack/modules/F0Predictor/F0Predictor.py +16 -0
- lib/infer_pack/modules/F0Predictor/HarvestF0Predictor.py +86 -0
- lib/infer_pack/modules/F0Predictor/PMF0Predictor.py +97 -0
- lib/infer_pack/modules/F0Predictor/__init__.py +1 -0
- lib/infer_pack/onnx_inference.py +145 -0
- lib/infer_pack/transforms.py +209 -0
- requirements.txt +19 -0
- rmvpe.py +432 -0
- uvr5/lib/lib_v5/layers_123821KB.py +118 -0
- uvr5/lib/lib_v5/model_param_init.py +69 -0
- uvr5/lib/lib_v5/modelparams/4band_v2.json +54 -0
- uvr5/lib/lib_v5/nets_61968KB.py +122 -0
- uvr5/lib/lib_v5/spec_utils.py +672 -0
- uvr5/lib/name_params.json +263 -0
- uvr5/lib/utils.py +121 -0
- uvr5/uvr_model/__init__.py +1 -0
- uvr5/vr.py +196 -0
- vc_infer_pipeline.py +424 -0
.gitignore
ADDED
@@ -0,0 +1,389 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
## Ignore Visual Studio temporary files, build results, and
|
2 |
+
## files generated by popular Visual Studio add-ons.
|
3 |
+
##
|
4 |
+
## Get latest from https://github.com/github/gitignore/blob/master/VisualStudio.gitignore
|
5 |
+
|
6 |
+
# User-specific files
|
7 |
+
*.rsuser
|
8 |
+
*.suo
|
9 |
+
*.user
|
10 |
+
*.userosscache
|
11 |
+
*.sln.docstates
|
12 |
+
|
13 |
+
# User-specific files (MonoDevelop/Xamarin Studio)
|
14 |
+
*.userprefs
|
15 |
+
|
16 |
+
# Mono auto generated files
|
17 |
+
mono_crash.*
|
18 |
+
|
19 |
+
# Build results
|
20 |
+
[Dd]ebug/
|
21 |
+
[Dd]ebugPublic/
|
22 |
+
[Rr]elease/
|
23 |
+
[Rr]eleases/
|
24 |
+
x64/
|
25 |
+
x86/
|
26 |
+
[Ww][Ii][Nn]32/
|
27 |
+
[Aa][Rr][Mm]/
|
28 |
+
[Aa][Rr][Mm]64/
|
29 |
+
bld/
|
30 |
+
[Bb]in/
|
31 |
+
[Oo]bj/
|
32 |
+
[Oo]ut/
|
33 |
+
[Ll]og/
|
34 |
+
[Ll]ogs/
|
35 |
+
infer_pack\__pycache__
|
36 |
+
# Visual Studio 2015/2017 cache/options directory
|
37 |
+
.vs/
|
38 |
+
# Uncomment if you have tasks that create the project's static files in wwwroot
|
39 |
+
#wwwroot/
|
40 |
+
|
41 |
+
# Visual Studio 2017 auto generated files
|
42 |
+
Generated\ Files/
|
43 |
+
|
44 |
+
# MSTest test Results
|
45 |
+
[Tt]est[Rr]esult*/
|
46 |
+
[Bb]uild[Ll]og.*
|
47 |
+
|
48 |
+
# NUnit
|
49 |
+
*.VisualState.xml
|
50 |
+
TestResult.xml
|
51 |
+
nunit-*.xml
|
52 |
+
|
53 |
+
# Build Results of an ATL Project
|
54 |
+
[Dd]ebugPS/
|
55 |
+
[Rr]eleasePS/
|
56 |
+
dlldata.c
|
57 |
+
|
58 |
+
# Benchmark Results
|
59 |
+
BenchmarkDotNet.Artifacts/
|
60 |
+
|
61 |
+
# .NET Core
|
62 |
+
project.lock.json
|
63 |
+
project.fragment.lock.json
|
64 |
+
artifacts/
|
65 |
+
|
66 |
+
# ASP.NET Scaffolding
|
67 |
+
ScaffoldingReadMe.txt
|
68 |
+
|
69 |
+
# StyleCop
|
70 |
+
StyleCopReport.xml
|
71 |
+
|
72 |
+
# Files built by Visual Studio
|
73 |
+
*_i.c
|
74 |
+
*_p.c
|
75 |
+
*_h.h
|
76 |
+
*.ilk
|
77 |
+
*.meta
|
78 |
+
*.obj
|
79 |
+
*.iobj
|
80 |
+
*.pch
|
81 |
+
*.pdb
|
82 |
+
*.ipdb
|
83 |
+
*.pgc
|
84 |
+
*.pgd
|
85 |
+
*.rsp
|
86 |
+
*.sbr
|
87 |
+
*.tlb
|
88 |
+
*.tli
|
89 |
+
*.tlh
|
90 |
+
*.tmp
|
91 |
+
*.tmp_proj
|
92 |
+
*_wpftmp.csproj
|
93 |
+
*.log
|
94 |
+
*.vspscc
|
95 |
+
*.vssscc
|
96 |
+
.builds
|
97 |
+
*.pidb
|
98 |
+
*.svclog
|
99 |
+
*.scc
|
100 |
+
|
101 |
+
# Chutzpah Test files
|
102 |
+
_Chutzpah*
|
103 |
+
|
104 |
+
# Visual C++ cache files
|
105 |
+
ipch/
|
106 |
+
*.aps
|
107 |
+
*.ncb
|
108 |
+
*.opendb
|
109 |
+
*.opensdf
|
110 |
+
*.sdf
|
111 |
+
*.cachefile
|
112 |
+
*.VC.db
|
113 |
+
*.VC.VC.opendb
|
114 |
+
|
115 |
+
# Visual Studio profiler
|
116 |
+
*.psess
|
117 |
+
*.vsp
|
118 |
+
*.vspx
|
119 |
+
*.sap
|
120 |
+
|
121 |
+
# Visual Studio Trace Files
|
122 |
+
*.e2e
|
123 |
+
|
124 |
+
# TFS 2012 Local Workspace
|
125 |
+
$tf/
|
126 |
+
|
127 |
+
# Guidance Automation Toolkit
|
128 |
+
*.gpState
|
129 |
+
|
130 |
+
# ReSharper is a .NET coding add-in
|
131 |
+
_ReSharper*/
|
132 |
+
*.[Rr]e[Ss]harper
|
133 |
+
*.DotSettings.user
|
134 |
+
|
135 |
+
# TeamCity is a build add-in
|
136 |
+
_TeamCity*
|
137 |
+
|
138 |
+
# DotCover is a Code Coverage Tool
|
139 |
+
*.dotCover
|
140 |
+
|
141 |
+
# AxoCover is a Code Coverage Tool
|
142 |
+
.axoCover/*
|
143 |
+
!.axoCover/settings.json
|
144 |
+
|
145 |
+
# Coverlet is a free, cross platform Code Coverage Tool
|
146 |
+
coverage*.json
|
147 |
+
coverage*.xml
|
148 |
+
coverage*.info
|
149 |
+
|
150 |
+
# Visual Studio code coverage results
|
151 |
+
*.coverage
|
152 |
+
*.coveragexml
|
153 |
+
|
154 |
+
# NCrunch
|
155 |
+
_NCrunch_*
|
156 |
+
.*crunch*.local.xml
|
157 |
+
nCrunchTemp_*
|
158 |
+
|
159 |
+
# MightyMoose
|
160 |
+
*.mm.*
|
161 |
+
AutoTest.Net/
|
162 |
+
|
163 |
+
# Web workbench (sass)
|
164 |
+
.sass-cache/
|
165 |
+
|
166 |
+
# Installshield output folder
|
167 |
+
[Ee]xpress/
|
168 |
+
|
169 |
+
# DocProject is a documentation generator add-in
|
170 |
+
DocProject/buildhelp/
|
171 |
+
DocProject/Help/*.HxT
|
172 |
+
DocProject/Help/*.HxC
|
173 |
+
DocProject/Help/*.hhc
|
174 |
+
DocProject/Help/*.hhk
|
175 |
+
DocProject/Help/*.hhp
|
176 |
+
DocProject/Help/Html2
|
177 |
+
DocProject/Help/html
|
178 |
+
|
179 |
+
# Click-Once directory
|
180 |
+
publish/
|
181 |
+
|
182 |
+
# Publish Web Output
|
183 |
+
*.[Pp]ublish.xml
|
184 |
+
*.azurePubxml
|
185 |
+
# Note: Comment the next line if you want to checkin your web deploy settings,
|
186 |
+
# but database connection strings (with potential passwords) will be unencrypted
|
187 |
+
*.pubxml
|
188 |
+
*.publishproj
|
189 |
+
|
190 |
+
# Microsoft Azure Web App publish settings. Comment the next line if you want to
|
191 |
+
# checkin your Azure Web App publish settings, but sensitive information contained
|
192 |
+
# in these scripts will be unencrypted
|
193 |
+
PublishScripts/
|
194 |
+
|
195 |
+
# NuGet Packages
|
196 |
+
*.nupkg
|
197 |
+
# NuGet Symbol Packages
|
198 |
+
*.snupkg
|
199 |
+
# The packages folder can be ignored because of Package Restore
|
200 |
+
**/[Pp]ackages/*
|
201 |
+
# except build/, which is used as an MSBuild target.
|
202 |
+
!**/[Pp]ackages/build/
|
203 |
+
# Uncomment if necessary however generally it will be regenerated when needed
|
204 |
+
#!**/[Pp]ackages/repositories.config
|
205 |
+
# NuGet v3's project.json files produces more ignorable files
|
206 |
+
*.nuget.props
|
207 |
+
*.nuget.targets
|
208 |
+
|
209 |
+
# Microsoft Azure Build Output
|
210 |
+
csx/
|
211 |
+
*.build.csdef
|
212 |
+
|
213 |
+
# Microsoft Azure Emulator
|
214 |
+
ecf/
|
215 |
+
rcf/
|
216 |
+
|
217 |
+
# Windows Store app package directories and files
|
218 |
+
AppPackages/
|
219 |
+
BundleArtifacts/
|
220 |
+
Package.StoreAssociation.xml
|
221 |
+
_pkginfo.txt
|
222 |
+
*.appx
|
223 |
+
*.appxbundle
|
224 |
+
*.appxupload
|
225 |
+
|
226 |
+
# Visual Studio cache files
|
227 |
+
# files ending in .cache can be ignored
|
228 |
+
*.[Cc]ache
|
229 |
+
# but keep track of directories ending in .cache
|
230 |
+
!?*.[Cc]ache/
|
231 |
+
|
232 |
+
# Others
|
233 |
+
ClientBin/
|
234 |
+
~$*
|
235 |
+
*~
|
236 |
+
*.dbmdl
|
237 |
+
*.dbproj.schemaview
|
238 |
+
*.jfm
|
239 |
+
*.pfx
|
240 |
+
*.publishsettings
|
241 |
+
orleans.codegen.cs
|
242 |
+
|
243 |
+
# Including strong name files can present a security risk
|
244 |
+
# (https://github.com/github/gitignore/pull/2483#issue-259490424)
|
245 |
+
#*.snk
|
246 |
+
|
247 |
+
# Since there are multiple workflows, uncomment next line to ignore bower_components
|
248 |
+
# (https://github.com/github/gitignore/pull/1529#issuecomment-104372622)
|
249 |
+
#bower_components/
|
250 |
+
|
251 |
+
# RIA/Silverlight projects
|
252 |
+
Generated_Code/
|
253 |
+
|
254 |
+
# Backup & report files from converting an old project file
|
255 |
+
# to a newer Visual Studio version. Backup files are not needed,
|
256 |
+
# because we have git ;-)
|
257 |
+
_UpgradeReport_Files/
|
258 |
+
Backup*/
|
259 |
+
UpgradeLog*.XML
|
260 |
+
UpgradeLog*.htm
|
261 |
+
ServiceFabricBackup/
|
262 |
+
*.rptproj.bak
|
263 |
+
|
264 |
+
# SQL Server files
|
265 |
+
*.mdf
|
266 |
+
*.ldf
|
267 |
+
*.ndf
|
268 |
+
|
269 |
+
# Business Intelligence projects
|
270 |
+
*.rdl.data
|
271 |
+
*.bim.layout
|
272 |
+
*.bim_*.settings
|
273 |
+
*.rptproj.rsuser
|
274 |
+
*- [Bb]ackup.rdl
|
275 |
+
*- [Bb]ackup ([0-9]).rdl
|
276 |
+
*- [Bb]ackup ([0-9][0-9]).rdl
|
277 |
+
|
278 |
+
# Microsoft Fakes
|
279 |
+
FakesAssemblies/
|
280 |
+
|
281 |
+
# GhostDoc plugin setting file
|
282 |
+
*.GhostDoc.xml
|
283 |
+
|
284 |
+
# Node.js Tools for Visual Studio
|
285 |
+
.ntvs_analysis.dat
|
286 |
+
node_modules/
|
287 |
+
|
288 |
+
# Visual Studio 6 build log
|
289 |
+
*.plg
|
290 |
+
|
291 |
+
# Visual Studio 6 workspace options file
|
292 |
+
*.opt
|
293 |
+
|
294 |
+
# Visual Studio 6 auto-generated workspace file (contains which files were open etc.)
|
295 |
+
*.vbw
|
296 |
+
|
297 |
+
# Visual Studio LightSwitch build output
|
298 |
+
**/*.HTMLClient/GeneratedArtifacts
|
299 |
+
**/*.DesktopClient/GeneratedArtifacts
|
300 |
+
**/*.DesktopClient/ModelManifest.xml
|
301 |
+
**/*.Server/GeneratedArtifacts
|
302 |
+
**/*.Server/ModelManifest.xml
|
303 |
+
_Pvt_Extensions
|
304 |
+
|
305 |
+
# Paket dependency manager
|
306 |
+
.paket/paket.exe
|
307 |
+
paket-files/
|
308 |
+
|
309 |
+
# FAKE - F# Make
|
310 |
+
.fake/
|
311 |
+
|
312 |
+
# CodeRush personal settings
|
313 |
+
.cr/personal
|
314 |
+
|
315 |
+
# Python Tools for Visual Studio (PTVS)
|
316 |
+
__pycache__/
|
317 |
+
|
318 |
+
|
319 |
+
# Cake - Uncomment if you are using it
|
320 |
+
# tools/**
|
321 |
+
# !tools/packages.config
|
322 |
+
|
323 |
+
# Tabs Studio
|
324 |
+
*.tss
|
325 |
+
|
326 |
+
# Telerik's JustMock configuration file
|
327 |
+
*.jmconfig
|
328 |
+
|
329 |
+
# BizTalk build output
|
330 |
+
*.btp.cs
|
331 |
+
*.btm.cs
|
332 |
+
*.odx.cs
|
333 |
+
*.xsd.cs
|
334 |
+
|
335 |
+
# OpenCover UI analysis results
|
336 |
+
OpenCover/
|
337 |
+
|
338 |
+
# Azure Stream Analytics local run output
|
339 |
+
ASALocalRun/
|
340 |
+
|
341 |
+
# MSBuild Binary and Structured Log
|
342 |
+
*.binlog
|
343 |
+
|
344 |
+
# NVidia Nsight GPU debugger configuration file
|
345 |
+
*.nvuser
|
346 |
+
|
347 |
+
# MFractors (Xamarin productivity tool) working folder
|
348 |
+
.mfractor/
|
349 |
+
|
350 |
+
# Local History for Visual Studio
|
351 |
+
.localhistory/
|
352 |
+
|
353 |
+
# BeatPulse healthcheck temp database
|
354 |
+
healthchecksdb
|
355 |
+
|
356 |
+
# Backup folder for Package Reference Convert tool in Visual Studio 2017
|
357 |
+
MigrationBackup/
|
358 |
+
|
359 |
+
# Ionide (cross platform F# VS Code tools) working folder
|
360 |
+
.ionide/
|
361 |
+
|
362 |
+
# Fody - auto-generated XML schema
|
363 |
+
FodyWeavers.xsd
|
364 |
+
|
365 |
+
# build
|
366 |
+
build
|
367 |
+
monotonic_align/core.c
|
368 |
+
*.o
|
369 |
+
*.so
|
370 |
+
*.dll
|
371 |
+
|
372 |
+
# data
|
373 |
+
/config.json
|
374 |
+
/*.pth
|
375 |
+
*.wav
|
376 |
+
/monotonic_align/monotonic_align
|
377 |
+
/resources
|
378 |
+
/MoeGoe.spec
|
379 |
+
/dist/MoeGoe
|
380 |
+
/dist
|
381 |
+
|
382 |
+
/env
|
383 |
+
.idea
|
384 |
+
infer-web.py
|
385 |
+
infer.py
|
386 |
+
app-old.py
|
387 |
+
hubert_base.pt
|
388 |
+
rmvpe.pt
|
389 |
+
test.py
|
LICENSE
ADDED
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
MIT License
|
2 |
+
|
3 |
+
Copyright (c) 2024 Kevin Wang
|
4 |
+
|
5 |
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
6 |
+
of this software and associated documentation files (the "Software"), to deal
|
7 |
+
in the Software without restriction, including without limitation the rights
|
8 |
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
9 |
+
copies of the Software, and to permit persons to whom the Software is
|
10 |
+
furnished to do so, subject to the following conditions:
|
11 |
+
|
12 |
+
The above copyright notice and this permission notice shall be included in all
|
13 |
+
copies or substantial portions of the Software.
|
14 |
+
|
15 |
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
16 |
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
17 |
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
18 |
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
19 |
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
20 |
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
21 |
+
SOFTWARE.
|
README.md
CHANGED
@@ -1,13 +1,2 @@
|
|
1 |
-
|
2 |
-
|
3 |
-
emoji: 🐨
|
4 |
-
colorFrom: purple
|
5 |
-
colorTo: gray
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 4.36.1
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
license: mit
|
11 |
-
---
|
12 |
-
|
13 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
1 |
+
# talktalkai-singing
|
2 |
+
TalkTalkAI-音乐区
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
app.py
ADDED
@@ -0,0 +1,566 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import glob
|
3 |
+
import json
|
4 |
+
import traceback
|
5 |
+
import logging
|
6 |
+
import gradio as gr
|
7 |
+
import numpy as np
|
8 |
+
import librosa
|
9 |
+
import torch
|
10 |
+
import asyncio
|
11 |
+
import ffmpeg
|
12 |
+
import subprocess
|
13 |
+
import sys
|
14 |
+
import io
|
15 |
+
import wave
|
16 |
+
from datetime import datetime
|
17 |
+
from fairseq import checkpoint_utils
|
18 |
+
import urllib.request
|
19 |
+
import zipfile
|
20 |
+
import shutil
|
21 |
+
import gradio as gr
|
22 |
+
from textwrap import dedent
|
23 |
+
import pprint
|
24 |
+
import time
|
25 |
+
|
26 |
+
import re
|
27 |
+
import requests
|
28 |
+
import subprocess
|
29 |
+
from pathlib import Path
|
30 |
+
from scipy.io.wavfile import write
|
31 |
+
from scipy.io import wavfile
|
32 |
+
import soundfile as sf
|
33 |
+
|
34 |
+
from lib.infer_pack.models import (
|
35 |
+
SynthesizerTrnMs256NSFsid,
|
36 |
+
SynthesizerTrnMs256NSFsid_nono,
|
37 |
+
SynthesizerTrnMs768NSFsid,
|
38 |
+
SynthesizerTrnMs768NSFsid_nono,
|
39 |
+
)
|
40 |
+
from vc_infer_pipeline import VC
|
41 |
+
from config import Config
|
42 |
+
config = Config()
|
43 |
+
logging.getLogger("numba").setLevel(logging.WARNING)
|
44 |
+
spaces = True #os.getenv("SYSTEM") == "spaces"
|
45 |
+
force_support = True
|
46 |
+
|
47 |
+
audio_mode = []
|
48 |
+
f0method_mode = []
|
49 |
+
f0method_info = ""
|
50 |
+
|
51 |
+
headers = {
|
52 |
+
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/121.0.0.0 Safari/537.36"
|
53 |
+
}
|
54 |
+
pattern = r'//www\.bilibili\.com/video[^"]*'
|
55 |
+
|
56 |
+
# Download models
|
57 |
+
|
58 |
+
urllib.request.urlretrieve("https://download.openxlab.org.cn/models/Kevin676/rvc-models/weight/hubert_base", "hubert_base.pt")
|
59 |
+
urllib.request.urlretrieve("https://download.openxlab.org.cn/models/Kevin676/rvc-models/weight/rmvpe", "rmvpe.pt")
|
60 |
+
|
61 |
+
# Get zip name
|
62 |
+
|
63 |
+
pattern_zip = r"/([^/]+)\.zip$"
|
64 |
+
|
65 |
+
def get_file_name(url):
|
66 |
+
match = re.search(pattern_zip, url)
|
67 |
+
if match:
|
68 |
+
extracted_string = match.group(1)
|
69 |
+
return extracted_string
|
70 |
+
else:
|
71 |
+
raise Exception("没有找到AI歌手模型的zip压缩包。")
|
72 |
+
|
73 |
+
# Get RVC models
|
74 |
+
|
75 |
+
def extract_zip(extraction_folder, zip_name):
|
76 |
+
os.makedirs(extraction_folder)
|
77 |
+
with zipfile.ZipFile(zip_name, 'r') as zip_ref:
|
78 |
+
zip_ref.extractall(extraction_folder)
|
79 |
+
os.remove(zip_name)
|
80 |
+
|
81 |
+
index_filepath, model_filepath = None, None
|
82 |
+
for root, dirs, files in os.walk(extraction_folder):
|
83 |
+
for name in files:
|
84 |
+
if name.endswith('.index') and os.stat(os.path.join(root, name)).st_size > 1024 * 100:
|
85 |
+
index_filepath = os.path.join(root, name)
|
86 |
+
|
87 |
+
if name.endswith('.pth') and os.stat(os.path.join(root, name)).st_size > 1024 * 1024 * 40:
|
88 |
+
model_filepath = os.path.join(root, name)
|
89 |
+
|
90 |
+
if not model_filepath:
|
91 |
+
raise Exception(f'No .pth model file was found in the extracted zip. Please check {extraction_folder}.')
|
92 |
+
|
93 |
+
# move model and index file to extraction folder
|
94 |
+
os.rename(model_filepath, os.path.join(extraction_folder, os.path.basename(model_filepath)))
|
95 |
+
if index_filepath:
|
96 |
+
os.rename(index_filepath, os.path.join(extraction_folder, os.path.basename(index_filepath)))
|
97 |
+
|
98 |
+
# remove any unnecessary nested folders
|
99 |
+
for filepath in os.listdir(extraction_folder):
|
100 |
+
if os.path.isdir(os.path.join(extraction_folder, filepath)):
|
101 |
+
shutil.rmtree(os.path.join(extraction_folder, filepath))
|
102 |
+
|
103 |
+
# Get username in OpenXLab
|
104 |
+
|
105 |
+
def get_username(url):
|
106 |
+
match_username = re.search(r'models/(.*?)/', url)
|
107 |
+
if match_username:
|
108 |
+
result = match_username.group(1)
|
109 |
+
return result
|
110 |
+
|
111 |
+
def download_online_model(url, dir_name):
|
112 |
+
if url.startswith('https://download.openxlab.org.cn/models/'):
|
113 |
+
zip_path = get_username(url) + "-" + get_file_name(url)
|
114 |
+
else:
|
115 |
+
zip_path = get_file_name(url)
|
116 |
+
if not os.path.exists(zip_path):
|
117 |
+
try:
|
118 |
+
zip_name = url.split('/')[-1]
|
119 |
+
extraction_folder = os.path.join(zip_path, dir_name)
|
120 |
+
if os.path.exists(extraction_folder):
|
121 |
+
raise Exception(f'Voice model directory {dir_name} already exists! Choose a different name for your voice model.')
|
122 |
+
|
123 |
+
if 'pixeldrain.com' in url:
|
124 |
+
url = f'https://pixeldrain.com/api/file/{zip_name}'
|
125 |
+
|
126 |
+
urllib.request.urlretrieve(url, zip_name)
|
127 |
+
|
128 |
+
extract_zip(extraction_folder, zip_name)
|
129 |
+
#return f'[√] {dir_name} Model successfully downloaded!'
|
130 |
+
|
131 |
+
except Exception as e:
|
132 |
+
raise Exception(str(e))
|
133 |
+
|
134 |
+
#Get bilibili BV id
|
135 |
+
|
136 |
+
def get_bilibili_video_id(url):
|
137 |
+
match = re.search(r'/video/([a-zA-Z0-9]+)/', url)
|
138 |
+
extracted_value = match.group(1)
|
139 |
+
return extracted_value
|
140 |
+
|
141 |
+
# Get bilibili audio
|
142 |
+
def find_first_appearance_with_neighborhood(text, pattern):
|
143 |
+
match = re.search(pattern, text)
|
144 |
+
|
145 |
+
if match:
|
146 |
+
return match.group()
|
147 |
+
else:
|
148 |
+
return None
|
149 |
+
|
150 |
+
def search_bilibili(keyword):
|
151 |
+
if keyword.startswith("BV"):
|
152 |
+
req = requests.get("https://search.bilibili.com/all?keyword={}&duration=1".format(keyword), headers=headers).text
|
153 |
+
else:
|
154 |
+
req = requests.get("https://search.bilibili.com/all?keyword={}&duration=1&tids=3&page=1".format(keyword), headers=headers).text
|
155 |
+
|
156 |
+
video_link = "https:" + find_first_appearance_with_neighborhood(req, pattern)
|
157 |
+
|
158 |
+
return video_link
|
159 |
+
|
160 |
+
# Save bilibili audio
|
161 |
+
|
162 |
+
def get_response(html_url):
|
163 |
+
headers = {
|
164 |
+
"referer": "https://www.bilibili.com/",
|
165 |
+
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/121.0.0.0 Safari/537.36"
|
166 |
+
}
|
167 |
+
response = requests.get(html_url, headers=headers)
|
168 |
+
return response
|
169 |
+
|
170 |
+
def get_video_info(html_url):
|
171 |
+
response = get_response(html_url)
|
172 |
+
html_data = re.findall('<script>window.__playinfo__=(.*?)</script>', response.text)[0]
|
173 |
+
json_data = json.loads(html_data)
|
174 |
+
if json_data['data']['dash']['audio'][0]['backupUrl']!=None:
|
175 |
+
audio_url = json_data['data']['dash']['audio'][0]['backupUrl'][0]
|
176 |
+
else:
|
177 |
+
audio_url = json_data['data']['dash']['audio'][0]['baseUrl']
|
178 |
+
return audio_url
|
179 |
+
|
180 |
+
def save_audio(title, audio_url):
|
181 |
+
audio_content = get_response(audio_url).content
|
182 |
+
with open(title + '.wav', mode='wb') as f:
|
183 |
+
f.write(audio_content)
|
184 |
+
print("音乐内容保存完成")
|
185 |
+
|
186 |
+
|
187 |
+
# Use UVR-HP5/2
|
188 |
+
|
189 |
+
urllib.request.urlretrieve("https://download.openxlab.org.cn/models/Kevin676/rvc-models/weight/UVR-HP2.pth", "uvr5/uvr_model/UVR-HP2.pth")
|
190 |
+
urllib.request.urlretrieve("https://download.openxlab.org.cn/models/Kevin676/rvc-models/weight/UVR-HP5.pth", "uvr5/uvr_model/UVR-HP5.pth")
|
191 |
+
#urllib.request.urlretrieve("https://huggingface.co/fastrolling/uvr/resolve/main/Main_Models/5_HP-Karaoke-UVR.pth", "uvr5/uvr_model/UVR-HP5.pth")
|
192 |
+
|
193 |
+
from uvr5.vr import AudioPre
|
194 |
+
weight_uvr5_root = "uvr5/uvr_model"
|
195 |
+
uvr5_names = []
|
196 |
+
for name in os.listdir(weight_uvr5_root):
|
197 |
+
if name.endswith(".pth") or "onnx" in name:
|
198 |
+
uvr5_names.append(name.replace(".pth", ""))
|
199 |
+
|
200 |
+
func = AudioPre
|
201 |
+
pre_fun_hp2 = func(
|
202 |
+
agg=int(10),
|
203 |
+
model_path=os.path.join(weight_uvr5_root, "UVR-HP2.pth"),
|
204 |
+
device="cuda",
|
205 |
+
is_half=True,
|
206 |
+
)
|
207 |
+
|
208 |
+
pre_fun_hp5 = func(
|
209 |
+
agg=int(10),
|
210 |
+
model_path=os.path.join(weight_uvr5_root, "UVR-HP5.pth"),
|
211 |
+
device="cuda",
|
212 |
+
is_half=True,
|
213 |
+
)
|
214 |
+
|
215 |
+
# Separate vocals
|
216 |
+
|
217 |
+
def youtube_downloader(
|
218 |
+
video_identifier,
|
219 |
+
filename,
|
220 |
+
split_model,
|
221 |
+
):
|
222 |
+
print(video_identifier)
|
223 |
+
video_info = get_video_info(video_identifier)
|
224 |
+
print(video_info)
|
225 |
+
audio_content = get_response(video_info).content
|
226 |
+
with open(filename.strip() + ".wav", mode="wb") as f:
|
227 |
+
f.write(audio_content)
|
228 |
+
audio_path = filename.strip() + ".wav"
|
229 |
+
|
230 |
+
# make dir output
|
231 |
+
os.makedirs("output", exist_ok=True)
|
232 |
+
|
233 |
+
if split_model=="UVR-HP2":
|
234 |
+
pre_fun = pre_fun_hp2
|
235 |
+
else:
|
236 |
+
pre_fun = pre_fun_hp5
|
237 |
+
|
238 |
+
pre_fun._path_audio_(audio_path, f"./output/{split_model}/{filename}/", f"./output/{split_model}/{filename}/", "wav")
|
239 |
+
os.remove(filename.strip()+".wav")
|
240 |
+
|
241 |
+
return f"./output/{split_model}/{filename}/vocal_{filename}.wav_10.wav", f"./output/{split_model}/{filename}/instrument_{filename}.wav_10.wav"
|
242 |
+
|
243 |
+
# Original code
|
244 |
+
|
245 |
+
if force_support is False or spaces is True:
|
246 |
+
if spaces is True:
|
247 |
+
audio_mode = ["Upload audio", "TTS Audio"]
|
248 |
+
else:
|
249 |
+
audio_mode = ["Input path", "Upload audio", "TTS Audio"]
|
250 |
+
f0method_mode = ["pm", "harvest"]
|
251 |
+
f0method_info = "PM is fast, Harvest is good but extremely slow, Rvmpe is alternative to harvest (might be better). (Default: PM)"
|
252 |
+
else:
|
253 |
+
audio_mode = ["Input path", "Upload audio", "Youtube", "TTS Audio"]
|
254 |
+
f0method_mode = ["pm", "harvest", "crepe"]
|
255 |
+
f0method_info = "PM is fast, Harvest is good but extremely slow, Rvmpe is alternative to harvest (might be better), and Crepe effect is good but requires GPU (Default: PM)"
|
256 |
+
|
257 |
+
if os.path.isfile("rmvpe.pt"):
|
258 |
+
f0method_mode.insert(2, "rmvpe")
|
259 |
+
|
260 |
+
def create_vc_fn(model_name, tgt_sr, net_g, vc, if_f0, version, file_index):
|
261 |
+
def vc_fn(
|
262 |
+
vc_audio_mode,
|
263 |
+
vc_input,
|
264 |
+
vc_upload,
|
265 |
+
tts_text,
|
266 |
+
tts_voice,
|
267 |
+
f0_up_key,
|
268 |
+
f0_method,
|
269 |
+
index_rate,
|
270 |
+
filter_radius,
|
271 |
+
resample_sr,
|
272 |
+
rms_mix_rate,
|
273 |
+
protect,
|
274 |
+
):
|
275 |
+
try:
|
276 |
+
logs = []
|
277 |
+
print(f"Converting using {model_name}...")
|
278 |
+
logs.append(f"Converting using {model_name}...")
|
279 |
+
yield "\n".join(logs), None
|
280 |
+
if vc_audio_mode == "Input path" or "Youtube" and vc_input != "":
|
281 |
+
audio, sr = librosa.load(vc_input, sr=16000, mono=True)
|
282 |
+
elif vc_audio_mode == "Upload audio":
|
283 |
+
if vc_upload is None:
|
284 |
+
return "You need to upload an audio", None
|
285 |
+
sampling_rate, audio = vc_upload
|
286 |
+
duration = audio.shape[0] / sampling_rate
|
287 |
+
if duration > 20 and spaces:
|
288 |
+
return "Please upload an audio file that is less than 20 seconds. If you need to generate a longer audio file, please use Colab.", None
|
289 |
+
audio = (audio / np.iinfo(audio.dtype).max).astype(np.float32)
|
290 |
+
if len(audio.shape) > 1:
|
291 |
+
audio = librosa.to_mono(audio.transpose(1, 0))
|
292 |
+
if sampling_rate != 16000:
|
293 |
+
audio = librosa.resample(audio, orig_sr=sampling_rate, target_sr=16000)
|
294 |
+
times = [0, 0, 0]
|
295 |
+
f0_up_key = int(f0_up_key)
|
296 |
+
audio_opt = vc.pipeline(
|
297 |
+
hubert_model,
|
298 |
+
net_g,
|
299 |
+
0,
|
300 |
+
audio,
|
301 |
+
vc_input,
|
302 |
+
times,
|
303 |
+
f0_up_key,
|
304 |
+
f0_method,
|
305 |
+
file_index,
|
306 |
+
# file_big_npy,
|
307 |
+
index_rate,
|
308 |
+
if_f0,
|
309 |
+
filter_radius,
|
310 |
+
tgt_sr,
|
311 |
+
resample_sr,
|
312 |
+
rms_mix_rate,
|
313 |
+
version,
|
314 |
+
protect,
|
315 |
+
f0_file=None,
|
316 |
+
)
|
317 |
+
info = f"[{datetime.now().strftime('%Y-%m-%d %H:%M')}]: npy: {times[0]}, f0: {times[1]}s, infer: {times[2]}s"
|
318 |
+
print(f"{model_name} | {info}")
|
319 |
+
logs.append(f"Successfully Convert {model_name}\n{info}")
|
320 |
+
yield "\n".join(logs), (tgt_sr, audio_opt)
|
321 |
+
except Exception as err:
|
322 |
+
info = traceback.format_exc()
|
323 |
+
print(info)
|
324 |
+
print(f"Error when using {model_name}.\n{str(err)}")
|
325 |
+
yield info, None
|
326 |
+
return vc_fn
|
327 |
+
|
328 |
+
def combine_vocal_and_inst(model_name, song_name, song_id, split_model, cover_song, vocal_volume, inst_volume):
|
329 |
+
#samplerate, data = wavfile.read(cover_song)
|
330 |
+
vocal_path = cover_song #f"output/{split_model}/{song_id}/vocal_{song_id}.wav_10.wav"
|
331 |
+
output_path = song_name.strip() + "-AI-" + ''.join(os.listdir(f"{model_name}")).strip() + "翻唱版.mp3"
|
332 |
+
inst_path = f"output/{split_model}/{song_id}/instrument_{song_id}.wav_10.wav"
|
333 |
+
#with wave.open(vocal_path, "w") as wave_file:
|
334 |
+
#wave_file.setnchannels(1)
|
335 |
+
#wave_file.setsampwidth(2)
|
336 |
+
#wave_file.setframerate(samplerate)
|
337 |
+
#wave_file.writeframes(data.tobytes())
|
338 |
+
command = f'ffmpeg -y -i {inst_path} -i {vocal_path} -filter_complex [0:a]volume={inst_volume}[i];[1:a]volume={vocal_volume}[v];[i][v]amix=inputs=2:duration=longest[a] -map [a] -b:a 320k -c:a libmp3lame {output_path}'
|
339 |
+
result = subprocess.run(command.split(), stdout=subprocess.PIPE)
|
340 |
+
print(result.stdout.decode())
|
341 |
+
return output_path
|
342 |
+
|
343 |
+
def load_hubert():
|
344 |
+
global hubert_model
|
345 |
+
models, _, _ = checkpoint_utils.load_model_ensemble_and_task(
|
346 |
+
["hubert_base.pt"],
|
347 |
+
suffix="",
|
348 |
+
)
|
349 |
+
hubert_model = models[0]
|
350 |
+
hubert_model = hubert_model.to(config.device)
|
351 |
+
if config.is_half:
|
352 |
+
hubert_model = hubert_model.half()
|
353 |
+
else:
|
354 |
+
hubert_model = hubert_model.float()
|
355 |
+
hubert_model.eval()
|
356 |
+
|
357 |
+
def rvc_models(model_name):
|
358 |
+
global vc, net_g, index_files, tgt_sr, version
|
359 |
+
categories = []
|
360 |
+
models = []
|
361 |
+
for w_root, w_dirs, _ in os.walk(f"{model_name}"):
|
362 |
+
model_count = 1
|
363 |
+
for sub_dir in w_dirs:
|
364 |
+
pth_files = glob.glob(f"{model_name}/{sub_dir}/*.pth")
|
365 |
+
index_files = glob.glob(f"{model_name}/{sub_dir}/*.index")
|
366 |
+
if pth_files == []:
|
367 |
+
print(f"Model [{model_count}/{len(w_dirs)}]: No Model file detected, skipping...")
|
368 |
+
continue
|
369 |
+
cpt = torch.load(pth_files[0])
|
370 |
+
tgt_sr = cpt["config"][-1]
|
371 |
+
cpt["config"][-3] = cpt["weight"]["emb_g.weight"].shape[0] # n_spk
|
372 |
+
if_f0 = cpt.get("f0", 1)
|
373 |
+
version = cpt.get("version", "v1")
|
374 |
+
if version == "v1":
|
375 |
+
if if_f0 == 1:
|
376 |
+
net_g = SynthesizerTrnMs256NSFsid(*cpt["config"], is_half=config.is_half)
|
377 |
+
else:
|
378 |
+
net_g = SynthesizerTrnMs256NSFsid_nono(*cpt["config"])
|
379 |
+
model_version = "V1"
|
380 |
+
elif version == "v2":
|
381 |
+
if if_f0 == 1:
|
382 |
+
net_g = SynthesizerTrnMs768NSFsid(*cpt["config"], is_half=config.is_half)
|
383 |
+
else:
|
384 |
+
net_g = SynthesizerTrnMs768NSFsid_nono(*cpt["config"])
|
385 |
+
model_version = "V2"
|
386 |
+
del net_g.enc_q
|
387 |
+
print(net_g.load_state_dict(cpt["weight"], strict=False))
|
388 |
+
net_g.eval().to(config.device)
|
389 |
+
if config.is_half:
|
390 |
+
net_g = net_g.half()
|
391 |
+
else:
|
392 |
+
net_g = net_g.float()
|
393 |
+
vc = VC(tgt_sr, config)
|
394 |
+
if index_files == []:
|
395 |
+
print("Warning: No Index file detected!")
|
396 |
+
index_info = "None"
|
397 |
+
model_index = ""
|
398 |
+
else:
|
399 |
+
index_info = index_files[0]
|
400 |
+
model_index = index_files[0]
|
401 |
+
print(f"Model loaded [{model_count}/{len(w_dirs)}]: {index_files[0]} / {index_info} | ({model_version})")
|
402 |
+
model_count += 1
|
403 |
+
models.append((index_files[0][:-4], index_files[0][:-4], "", "", model_version, create_vc_fn(index_files[0], tgt_sr, net_g, vc, if_f0, version, model_index)))
|
404 |
+
categories.append(["Models", "", models])
|
405 |
+
return vc, net_g, index_files, tgt_sr, version
|
406 |
+
|
407 |
+
load_hubert()
|
408 |
+
|
409 |
+
singers="您的专属AI歌手阵容:"
|
410 |
+
|
411 |
+
def rvc_infer_music(url, model_name, song_name, split_model, f0_up_key, vocal_volume, inst_volume):
|
412 |
+
url = url.strip().replace(" ", "")
|
413 |
+
model_name = model_name.strip().replace(" ", "")
|
414 |
+
if url.startswith('https://download.openxlab.org.cn/models/'):
|
415 |
+
zip_path = get_username(url) + "-" + get_file_name(url)
|
416 |
+
else:
|
417 |
+
zip_path = get_file_name(url)
|
418 |
+
global singers
|
419 |
+
if model_name not in singers:
|
420 |
+
singers = singers+ ' '+ model_name
|
421 |
+
download_online_model(url, model_name)
|
422 |
+
rvc_models(zip_path)
|
423 |
+
song_name = song_name.strip().replace(" ", "")
|
424 |
+
video_identifier = search_bilibili(song_name)
|
425 |
+
song_id = get_bilibili_video_id(video_identifier)
|
426 |
+
|
427 |
+
if os.path.isdir(f"./output/{split_model}/{song_id}")==True:
|
428 |
+
audio, sr = librosa.load(f"./output/{split_model}/{song_id}/vocal_{song_id}.wav_10.wav", sr=16000, mono=True)
|
429 |
+
song_infer = vc.pipeline(
|
430 |
+
hubert_model,
|
431 |
+
net_g,
|
432 |
+
0,
|
433 |
+
audio,
|
434 |
+
"",
|
435 |
+
[0, 0, 0],
|
436 |
+
f0_up_key,
|
437 |
+
"rmvpe",
|
438 |
+
index_files[0],
|
439 |
+
0.7,
|
440 |
+
1,
|
441 |
+
3,
|
442 |
+
tgt_sr,
|
443 |
+
0,
|
444 |
+
0.25,
|
445 |
+
version,
|
446 |
+
0.33,
|
447 |
+
f0_file=None,
|
448 |
+
)
|
449 |
+
else:
|
450 |
+
audio, sr = librosa.load(youtube_downloader(video_identifier, song_id, split_model)[0], sr=16000, mono=True)
|
451 |
+
song_infer = vc.pipeline(
|
452 |
+
hubert_model,
|
453 |
+
net_g,
|
454 |
+
0,
|
455 |
+
audio,
|
456 |
+
"",
|
457 |
+
[0, 0, 0],
|
458 |
+
f0_up_key,
|
459 |
+
"rmvpe",
|
460 |
+
index_files[0],
|
461 |
+
0.7,
|
462 |
+
1,
|
463 |
+
3,
|
464 |
+
tgt_sr,
|
465 |
+
0,
|
466 |
+
0.25,
|
467 |
+
version,
|
468 |
+
0.33,
|
469 |
+
f0_file=None,
|
470 |
+
)
|
471 |
+
sf.write(song_name.strip()+zip_path+"AI翻唱.wav", song_infer, tgt_sr)
|
472 |
+
output_full_song = combine_vocal_and_inst(zip_path, song_name.strip(), song_id, split_model, song_name.strip()+zip_path+"AI翻唱.wav", vocal_volume, inst_volume)
|
473 |
+
os.remove(song_name.strip()+zip_path+"AI翻唱.wav")
|
474 |
+
return output_full_song, singers
|
475 |
+
|
476 |
+
app = gr.Blocks(theme="JohnSmith9982/small_and_pretty")
|
477 |
+
with app:
|
478 |
+
with gr.Tab("中文版"):
|
479 |
+
gr.Markdown("# <center>🌊💕🎶 滔滔AI,您的专属AI全明星乐团</center>")
|
480 |
+
gr.Markdown("## <center>🌟 只需一个歌曲名,全网AI歌手任您选择!随时随地,听我想听!</center>")
|
481 |
+
gr.Markdown("### <center>🤗 更多精彩应用,敬请关注[滔滔AI](http://www.talktalkai.com);相关问题欢迎在我们的[B站](https://space.bilibili.com/501495851)账号交流!滔滔AI,为爱滔滔!💕</center>")
|
482 |
+
with gr.Accordion("💡 一些AI歌手模型链接及使用说明(建议阅读)", open=False):
|
483 |
+
_ = f""" 任何能够在线下载的zip压缩包的链接都可以哦(zip压缩包只需包括AI歌手模型的.pth和.index文件,zip压缩包的链接需要以.zip作为后缀):
|
484 |
+
* Taylor Swift: https://download.openxlab.org.cn/models/Kevin676/rvc-models/weight/taylor.zip
|
485 |
+
* Blackpink Lisa: https://download.openxlab.org.cn/models/Kevin676/rvc-models/weight/Lisa.zip
|
486 |
+
* AI派蒙: https://download.openxlab.org.cn/models/Kevin676/rvc-models/weight/paimon.zip
|
487 |
+
* AI孙燕姿: https://download.openxlab.org.cn/models/Kevin676/rvc-models/weight/syz.zip
|
488 |
+
* AI[一清清清](https://www.bilibili.com/video/BV1wV411u74P)(推荐使用[OpenXLab](https://openxlab.org.cn/models)存放模型zip压缩包): https://download.openxlab.org.cn/models/Kevin676/rvc-models/weight/yiqing.zip\n
|
489 |
+
说明1:点击“一键开启AI翻唱之旅吧!”按钮即可使用!✨\n
|
490 |
+
说明2:一般情况下,男声演唱的歌曲转换成AI女声演唱需要升调,反之则需要降调;在“歌曲人声升降调”模块可以调整\n
|
491 |
+
说明3:对于同一个AI歌手模型或者同一首歌曲,第一次的运行时间会比较长(大约1分钟),请您耐心等待;之后的运行时间会大大缩短哦!\n
|
492 |
+
说明4:您之前下载过的模型会在“已下载的AI歌手全明星阵容”模块出现\n
|
493 |
+
说明5:此程序使用 [RVC](https://github.com/RVC-Project/Retrieval-based-Voice-Conversion-WebUI) AI歌手模型,感谢[作者](https://space.bilibili.com/5760446)的开源!RVC模型训练教程参见[视频](https://www.bilibili.com/video/BV1mX4y1C7w4)\n
|
494 |
+
🤗 我们正在创建一个完全开源、共建共享的AI歌手模型社区,让更多的人感受到AI音乐的乐趣与魅力!请关注我们的[B站](https://space.bilibili.com/501495851)账号,了解社区的最新进展!合作联系:talktalkai.kevin@gmail.com
|
495 |
+
"""
|
496 |
+
gr.Markdown(dedent(_))
|
497 |
+
|
498 |
+
with gr.Row():
|
499 |
+
with gr.Column():
|
500 |
+
inp1 = gr.Textbox(label="请输入AI歌手模型链接", info="模型需要是含有.pth和.index文件的zip压缩包", lines=2, value="https://download.openxlab.org.cn/models/Kevin676/rvc-models/weight/taylor.zip", placeholder="https://download.openxlab.org.cn/models/Kevin676/rvc-models/weight/taylor.zip")
|
501 |
+
with gr.Column():
|
502 |
+
inp2 = gr.Textbox(label="请给您的AI歌手起一个昵称吧", info="可自定义名称,但名称中不能有特殊符号", lines=1, value="AI Taylor", placeholder="AI Taylor")
|
503 |
+
inp3 = gr.Textbox(label="请输入您需要AI翻唱的歌曲名", info="如果您对搜索结果不满意,可在歌曲名后加上“无损”或“歌手的名字”等关键词;歌曲名中不能有特殊符号", lines=1, value="小幸运", placeholder="小幸运")
|
504 |
+
with gr.Row():
|
505 |
+
inp4 = gr.Dropdown(label="请选择用于分离伴奏的模型", choices=["UVR-HP2", "UVR-HP5"], value="UVR-HP5", visible=False)
|
506 |
+
inp5 = gr.Slider(label="歌曲人声升降调", info="默认为0,+2为升高2个key,以此类推", minimum=-12, maximum=12, value=0, step=1)
|
507 |
+
inp6 = gr.Slider(label="歌曲人声音量调节", info="默认为1,等于0时为静音", minimum=0, maximum=3, value=1, step=0.2)
|
508 |
+
inp7 = gr.Slider(label="歌曲伴奏音量调节", info="默认为1,等于0时为静音", minimum=0, maximum=3, value=1, step=0.2)
|
509 |
+
btn = gr.Button("一键开启AI翻唱之旅吧!💕", variant="primary")
|
510 |
+
with gr.Row():
|
511 |
+
output_song = gr.Audio(label="AI歌手为您倾情演绎")
|
512 |
+
singer_list = gr.Textbox(label="已下载的AI歌手全明星阵容")
|
513 |
+
|
514 |
+
btn.click(fn=rvc_infer_music, inputs=[inp1, inp2, inp3, inp4, inp5, inp6, inp7], outputs=[output_song, singer_list])
|
515 |
+
|
516 |
+
gr.Markdown("### <center>注意❗:请不要生成会对个人以及组织造成侵害的内容,此程序仅供科研、学习及个人娱乐使用。请自觉合规使用此程序,程序开发者不负有任何责任。</center>")
|
517 |
+
gr.HTML('''
|
518 |
+
<div class="footer">
|
519 |
+
<p>🌊🏞️🎶 - 江水东流急,滔滔无尽声。 明·顾璘
|
520 |
+
</p>
|
521 |
+
</div>
|
522 |
+
''')
|
523 |
+
with gr.Tab("EN"):
|
524 |
+
gr.Markdown("# <center>🌊💕🎶 TalkTalkAI - Best AI song cover generator ever</center>")
|
525 |
+
gr.Markdown("## <center>🌟 Provide the name of a song and our application running on A100 will handle everything else!</center>")
|
526 |
+
gr.Markdown("### <center>🤗 [TalkTalkAI](http://www.talktalkai.com/), let everyone enjoy a better life through human-centered AI💕</center>")
|
527 |
+
with gr.Accordion("💡 Some AI singers you can try", open=False):
|
528 |
+
_ = f""" Any Zip file that you can download online will be fine (The Zip file should contain .pth and .index files):
|
529 |
+
* AI Taylor Swift: https://download.openxlab.org.cn/models/Kevin676/rvc-models/weight/taylor.zip
|
530 |
+
* AI Blackpink Lisa: https://download.openxlab.org.cn/models/Kevin676/rvc-models/weight/Lisa.zip
|
531 |
+
* AI Paimon: https://download.openxlab.org.cn/models/Kevin676/rvc-models/weight/paimon.zip
|
532 |
+
* AI Stefanie Sun: https://download.openxlab.org.cn/models/Kevin676/rvc-models/weight/syz.zip
|
533 |
+
* AI[一清清清](https://www.bilibili.com/video/BV1wV411u74P): https://download.openxlab.org.cn/models/Kevin676/rvc-models/weight/yiqing.zip\n
|
534 |
+
"""
|
535 |
+
gr.Markdown(dedent(_))
|
536 |
+
|
537 |
+
with gr.Row():
|
538 |
+
with gr.Column():
|
539 |
+
inp1_en = gr.Textbox(label="The Zip file of an AI singer", info="The Zip file should contain .pth and .index files", lines=2, value="https://download.openxlab.org.cn/models/Kevin676/rvc-models/weight/taylor.zip", placeholder="https://download.openxlab.org.cn/models/Kevin676/rvc-models/weight/taylor.zip")
|
540 |
+
with gr.Column():
|
541 |
+
inp2_en = gr.Textbox(label="The name of your AI singer", lines=1, value="AI Taylor", placeholder="AI Taylor")
|
542 |
+
inp3_en = gr.Textbox(label="The name of a song", lines=1, value="Hotel California Eagles", placeholder="Hotel California Eagles")
|
543 |
+
with gr.Row():
|
544 |
+
inp4_en = gr.Dropdown(label="UVR models", choices=["UVR-HP2", "UVR-HP5"], value="UVR-HP5", visible=False)
|
545 |
+
inp5_en = gr.Slider(label="Transpose", info="0 from man to man (or woman to woman); 12 from man to woman and -12 from woman to man.", minimum=-12, maximum=12, value=0, step=1)
|
546 |
+
inp6_en = gr.Slider(label="Vocal volume", info="Adjust vocal volume (Default: 1)", minimum=0, maximum=3, value=1, step=0.2)
|
547 |
+
inp7_en = gr.Slider(label="Instrument volume", info="Adjust instrument volume (Default: 1)", minimum=0, maximum=3, value=1, step=0.2)
|
548 |
+
btn_en = gr.Button("Convert💕", variant="primary")
|
549 |
+
with gr.Row():
|
550 |
+
output_song_en = gr.Audio(label="AI song cover")
|
551 |
+
singer_list_en = gr.Textbox(label="The AI singers you have")
|
552 |
+
|
553 |
+
btn_en.click(fn=rvc_infer_music, inputs=[inp1_en, inp2_en, inp3_en, inp4_en, inp5_en, inp6_en, inp7_en], outputs=[output_song_en, singer_list_en])
|
554 |
+
|
555 |
+
|
556 |
+
gr.HTML('''
|
557 |
+
<div class="footer">
|
558 |
+
<p>🤗 - Stay tuned! The best is yet to come.
|
559 |
+
</p>
|
560 |
+
<p>📧 - Contact us: talktalkai.kevin@gmail.com
|
561 |
+
</p>
|
562 |
+
</div>
|
563 |
+
''')
|
564 |
+
|
565 |
+
app.queue(max_size=40, api_open=False)
|
566 |
+
app.launch(max_threads=400, show_error=True)
|
config.py
ADDED
@@ -0,0 +1,80 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import argparse
|
2 |
+
import sys
|
3 |
+
import torch
|
4 |
+
from multiprocessing import cpu_count
|
5 |
+
|
6 |
+
class Config:
|
7 |
+
def __init__(self):
|
8 |
+
self.device = "cuda:0"
|
9 |
+
self.is_half = True
|
10 |
+
self.n_cpu = 0
|
11 |
+
self.gpu_name = None
|
12 |
+
self.gpu_mem = None
|
13 |
+
self.x_pad, self.x_query, self.x_center, self.x_max = self.device_config()
|
14 |
+
|
15 |
+
# has_mps is only available in nightly pytorch (for now) and MasOS 12.3+.
|
16 |
+
# check `getattr` and try it for compatibility
|
17 |
+
@staticmethod
|
18 |
+
def has_mps() -> bool:
|
19 |
+
if not torch.backends.mps.is_available():
|
20 |
+
return False
|
21 |
+
try:
|
22 |
+
torch.zeros(1).to(torch.device("mps"))
|
23 |
+
return True
|
24 |
+
except Exception:
|
25 |
+
return False
|
26 |
+
|
27 |
+
def device_config(self) -> tuple:
|
28 |
+
if torch.cuda.is_available():
|
29 |
+
i_device = int(self.device.split(":")[-1])
|
30 |
+
self.gpu_name = torch.cuda.get_device_name(i_device)
|
31 |
+
if (
|
32 |
+
("16" in self.gpu_name and "V100" not in self.gpu_name.upper())
|
33 |
+
or "P40" in self.gpu_name.upper()
|
34 |
+
or "1060" in self.gpu_name
|
35 |
+
or "1070" in self.gpu_name
|
36 |
+
or "1080" in self.gpu_name
|
37 |
+
):
|
38 |
+
print("INFO: Found GPU", self.gpu_name, ", force to fp32")
|
39 |
+
self.is_half = False
|
40 |
+
else:
|
41 |
+
print("INFO: Found GPU", self.gpu_name)
|
42 |
+
self.gpu_mem = int(
|
43 |
+
torch.cuda.get_device_properties(i_device).total_memory
|
44 |
+
/ 1024
|
45 |
+
/ 1024
|
46 |
+
/ 1024
|
47 |
+
+ 0.4
|
48 |
+
)
|
49 |
+
elif self.has_mps():
|
50 |
+
print("INFO: No supported Nvidia GPU found, use MPS instead")
|
51 |
+
self.device = "mps"
|
52 |
+
self.is_half = False
|
53 |
+
else:
|
54 |
+
print("INFO: No supported Nvidia GPU found, use CPU instead")
|
55 |
+
self.device = "cpu"
|
56 |
+
self.is_half = False
|
57 |
+
|
58 |
+
if self.n_cpu == 0:
|
59 |
+
self.n_cpu = cpu_count()
|
60 |
+
|
61 |
+
if self.is_half:
|
62 |
+
# 6G显存配置
|
63 |
+
x_pad = 3
|
64 |
+
x_query = 10
|
65 |
+
x_center = 60
|
66 |
+
x_max = 65
|
67 |
+
else:
|
68 |
+
# 5G显存配置
|
69 |
+
x_pad = 1
|
70 |
+
x_query = 6
|
71 |
+
x_center = 38
|
72 |
+
x_max = 41
|
73 |
+
|
74 |
+
if self.gpu_mem != None and self.gpu_mem <= 4:
|
75 |
+
x_pad = 1
|
76 |
+
x_query = 5
|
77 |
+
x_center = 30
|
78 |
+
x_max = 32
|
79 |
+
|
80 |
+
return x_pad, x_query, x_center, x_max
|
lib/infer_pack/attentions.py
ADDED
@@ -0,0 +1,417 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import copy
|
2 |
+
import math
|
3 |
+
import numpy as np
|
4 |
+
import torch
|
5 |
+
from torch import nn
|
6 |
+
from torch.nn import functional as F
|
7 |
+
|
8 |
+
from lib.infer_pack import commons
|
9 |
+
from lib.infer_pack import modules
|
10 |
+
from lib.infer_pack.modules import LayerNorm
|
11 |
+
|
12 |
+
|
13 |
+
class Encoder(nn.Module):
|
14 |
+
def __init__(
|
15 |
+
self,
|
16 |
+
hidden_channels,
|
17 |
+
filter_channels,
|
18 |
+
n_heads,
|
19 |
+
n_layers,
|
20 |
+
kernel_size=1,
|
21 |
+
p_dropout=0.0,
|
22 |
+
window_size=10,
|
23 |
+
**kwargs
|
24 |
+
):
|
25 |
+
super().__init__()
|
26 |
+
self.hidden_channels = hidden_channels
|
27 |
+
self.filter_channels = filter_channels
|
28 |
+
self.n_heads = n_heads
|
29 |
+
self.n_layers = n_layers
|
30 |
+
self.kernel_size = kernel_size
|
31 |
+
self.p_dropout = p_dropout
|
32 |
+
self.window_size = window_size
|
33 |
+
|
34 |
+
self.drop = nn.Dropout(p_dropout)
|
35 |
+
self.attn_layers = nn.ModuleList()
|
36 |
+
self.norm_layers_1 = nn.ModuleList()
|
37 |
+
self.ffn_layers = nn.ModuleList()
|
38 |
+
self.norm_layers_2 = nn.ModuleList()
|
39 |
+
for i in range(self.n_layers):
|
40 |
+
self.attn_layers.append(
|
41 |
+
MultiHeadAttention(
|
42 |
+
hidden_channels,
|
43 |
+
hidden_channels,
|
44 |
+
n_heads,
|
45 |
+
p_dropout=p_dropout,
|
46 |
+
window_size=window_size,
|
47 |
+
)
|
48 |
+
)
|
49 |
+
self.norm_layers_1.append(LayerNorm(hidden_channels))
|
50 |
+
self.ffn_layers.append(
|
51 |
+
FFN(
|
52 |
+
hidden_channels,
|
53 |
+
hidden_channels,
|
54 |
+
filter_channels,
|
55 |
+
kernel_size,
|
56 |
+
p_dropout=p_dropout,
|
57 |
+
)
|
58 |
+
)
|
59 |
+
self.norm_layers_2.append(LayerNorm(hidden_channels))
|
60 |
+
|
61 |
+
def forward(self, x, x_mask):
|
62 |
+
attn_mask = x_mask.unsqueeze(2) * x_mask.unsqueeze(-1)
|
63 |
+
x = x * x_mask
|
64 |
+
for i in range(self.n_layers):
|
65 |
+
y = self.attn_layers[i](x, x, attn_mask)
|
66 |
+
y = self.drop(y)
|
67 |
+
x = self.norm_layers_1[i](x + y)
|
68 |
+
|
69 |
+
y = self.ffn_layers[i](x, x_mask)
|
70 |
+
y = self.drop(y)
|
71 |
+
x = self.norm_layers_2[i](x + y)
|
72 |
+
x = x * x_mask
|
73 |
+
return x
|
74 |
+
|
75 |
+
|
76 |
+
class Decoder(nn.Module):
|
77 |
+
def __init__(
|
78 |
+
self,
|
79 |
+
hidden_channels,
|
80 |
+
filter_channels,
|
81 |
+
n_heads,
|
82 |
+
n_layers,
|
83 |
+
kernel_size=1,
|
84 |
+
p_dropout=0.0,
|
85 |
+
proximal_bias=False,
|
86 |
+
proximal_init=True,
|
87 |
+
**kwargs
|
88 |
+
):
|
89 |
+
super().__init__()
|
90 |
+
self.hidden_channels = hidden_channels
|
91 |
+
self.filter_channels = filter_channels
|
92 |
+
self.n_heads = n_heads
|
93 |
+
self.n_layers = n_layers
|
94 |
+
self.kernel_size = kernel_size
|
95 |
+
self.p_dropout = p_dropout
|
96 |
+
self.proximal_bias = proximal_bias
|
97 |
+
self.proximal_init = proximal_init
|
98 |
+
|
99 |
+
self.drop = nn.Dropout(p_dropout)
|
100 |
+
self.self_attn_layers = nn.ModuleList()
|
101 |
+
self.norm_layers_0 = nn.ModuleList()
|
102 |
+
self.encdec_attn_layers = nn.ModuleList()
|
103 |
+
self.norm_layers_1 = nn.ModuleList()
|
104 |
+
self.ffn_layers = nn.ModuleList()
|
105 |
+
self.norm_layers_2 = nn.ModuleList()
|
106 |
+
for i in range(self.n_layers):
|
107 |
+
self.self_attn_layers.append(
|
108 |
+
MultiHeadAttention(
|
109 |
+
hidden_channels,
|
110 |
+
hidden_channels,
|
111 |
+
n_heads,
|
112 |
+
p_dropout=p_dropout,
|
113 |
+
proximal_bias=proximal_bias,
|
114 |
+
proximal_init=proximal_init,
|
115 |
+
)
|
116 |
+
)
|
117 |
+
self.norm_layers_0.append(LayerNorm(hidden_channels))
|
118 |
+
self.encdec_attn_layers.append(
|
119 |
+
MultiHeadAttention(
|
120 |
+
hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout
|
121 |
+
)
|
122 |
+
)
|
123 |
+
self.norm_layers_1.append(LayerNorm(hidden_channels))
|
124 |
+
self.ffn_layers.append(
|
125 |
+
FFN(
|
126 |
+
hidden_channels,
|
127 |
+
hidden_channels,
|
128 |
+
filter_channels,
|
129 |
+
kernel_size,
|
130 |
+
p_dropout=p_dropout,
|
131 |
+
causal=True,
|
132 |
+
)
|
133 |
+
)
|
134 |
+
self.norm_layers_2.append(LayerNorm(hidden_channels))
|
135 |
+
|
136 |
+
def forward(self, x, x_mask, h, h_mask):
|
137 |
+
"""
|
138 |
+
x: decoder input
|
139 |
+
h: encoder output
|
140 |
+
"""
|
141 |
+
self_attn_mask = commons.subsequent_mask(x_mask.size(2)).to(
|
142 |
+
device=x.device, dtype=x.dtype
|
143 |
+
)
|
144 |
+
encdec_attn_mask = h_mask.unsqueeze(2) * x_mask.unsqueeze(-1)
|
145 |
+
x = x * x_mask
|
146 |
+
for i in range(self.n_layers):
|
147 |
+
y = self.self_attn_layers[i](x, x, self_attn_mask)
|
148 |
+
y = self.drop(y)
|
149 |
+
x = self.norm_layers_0[i](x + y)
|
150 |
+
|
151 |
+
y = self.encdec_attn_layers[i](x, h, encdec_attn_mask)
|
152 |
+
y = self.drop(y)
|
153 |
+
x = self.norm_layers_1[i](x + y)
|
154 |
+
|
155 |
+
y = self.ffn_layers[i](x, x_mask)
|
156 |
+
y = self.drop(y)
|
157 |
+
x = self.norm_layers_2[i](x + y)
|
158 |
+
x = x * x_mask
|
159 |
+
return x
|
160 |
+
|
161 |
+
|
162 |
+
class MultiHeadAttention(nn.Module):
|
163 |
+
def __init__(
|
164 |
+
self,
|
165 |
+
channels,
|
166 |
+
out_channels,
|
167 |
+
n_heads,
|
168 |
+
p_dropout=0.0,
|
169 |
+
window_size=None,
|
170 |
+
heads_share=True,
|
171 |
+
block_length=None,
|
172 |
+
proximal_bias=False,
|
173 |
+
proximal_init=False,
|
174 |
+
):
|
175 |
+
super().__init__()
|
176 |
+
assert channels % n_heads == 0
|
177 |
+
|
178 |
+
self.channels = channels
|
179 |
+
self.out_channels = out_channels
|
180 |
+
self.n_heads = n_heads
|
181 |
+
self.p_dropout = p_dropout
|
182 |
+
self.window_size = window_size
|
183 |
+
self.heads_share = heads_share
|
184 |
+
self.block_length = block_length
|
185 |
+
self.proximal_bias = proximal_bias
|
186 |
+
self.proximal_init = proximal_init
|
187 |
+
self.attn = None
|
188 |
+
|
189 |
+
self.k_channels = channels // n_heads
|
190 |
+
self.conv_q = nn.Conv1d(channels, channels, 1)
|
191 |
+
self.conv_k = nn.Conv1d(channels, channels, 1)
|
192 |
+
self.conv_v = nn.Conv1d(channels, channels, 1)
|
193 |
+
self.conv_o = nn.Conv1d(channels, out_channels, 1)
|
194 |
+
self.drop = nn.Dropout(p_dropout)
|
195 |
+
|
196 |
+
if window_size is not None:
|
197 |
+
n_heads_rel = 1 if heads_share else n_heads
|
198 |
+
rel_stddev = self.k_channels**-0.5
|
199 |
+
self.emb_rel_k = nn.Parameter(
|
200 |
+
torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels)
|
201 |
+
* rel_stddev
|
202 |
+
)
|
203 |
+
self.emb_rel_v = nn.Parameter(
|
204 |
+
torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels)
|
205 |
+
* rel_stddev
|
206 |
+
)
|
207 |
+
|
208 |
+
nn.init.xavier_uniform_(self.conv_q.weight)
|
209 |
+
nn.init.xavier_uniform_(self.conv_k.weight)
|
210 |
+
nn.init.xavier_uniform_(self.conv_v.weight)
|
211 |
+
if proximal_init:
|
212 |
+
with torch.no_grad():
|
213 |
+
self.conv_k.weight.copy_(self.conv_q.weight)
|
214 |
+
self.conv_k.bias.copy_(self.conv_q.bias)
|
215 |
+
|
216 |
+
def forward(self, x, c, attn_mask=None):
|
217 |
+
q = self.conv_q(x)
|
218 |
+
k = self.conv_k(c)
|
219 |
+
v = self.conv_v(c)
|
220 |
+
|
221 |
+
x, self.attn = self.attention(q, k, v, mask=attn_mask)
|
222 |
+
|
223 |
+
x = self.conv_o(x)
|
224 |
+
return x
|
225 |
+
|
226 |
+
def attention(self, query, key, value, mask=None):
|
227 |
+
# reshape [b, d, t] -> [b, n_h, t, d_k]
|
228 |
+
b, d, t_s, t_t = (*key.size(), query.size(2))
|
229 |
+
query = query.view(b, self.n_heads, self.k_channels, t_t).transpose(2, 3)
|
230 |
+
key = key.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3)
|
231 |
+
value = value.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3)
|
232 |
+
|
233 |
+
scores = torch.matmul(query / math.sqrt(self.k_channels), key.transpose(-2, -1))
|
234 |
+
if self.window_size is not None:
|
235 |
+
assert (
|
236 |
+
t_s == t_t
|
237 |
+
), "Relative attention is only available for self-attention."
|
238 |
+
key_relative_embeddings = self._get_relative_embeddings(self.emb_rel_k, t_s)
|
239 |
+
rel_logits = self._matmul_with_relative_keys(
|
240 |
+
query / math.sqrt(self.k_channels), key_relative_embeddings
|
241 |
+
)
|
242 |
+
scores_local = self._relative_position_to_absolute_position(rel_logits)
|
243 |
+
scores = scores + scores_local
|
244 |
+
if self.proximal_bias:
|
245 |
+
assert t_s == t_t, "Proximal bias is only available for self-attention."
|
246 |
+
scores = scores + self._attention_bias_proximal(t_s).to(
|
247 |
+
device=scores.device, dtype=scores.dtype
|
248 |
+
)
|
249 |
+
if mask is not None:
|
250 |
+
scores = scores.masked_fill(mask == 0, -1e4)
|
251 |
+
if self.block_length is not None:
|
252 |
+
assert (
|
253 |
+
t_s == t_t
|
254 |
+
), "Local attention is only available for self-attention."
|
255 |
+
block_mask = (
|
256 |
+
torch.ones_like(scores)
|
257 |
+
.triu(-self.block_length)
|
258 |
+
.tril(self.block_length)
|
259 |
+
)
|
260 |
+
scores = scores.masked_fill(block_mask == 0, -1e4)
|
261 |
+
p_attn = F.softmax(scores, dim=-1) # [b, n_h, t_t, t_s]
|
262 |
+
p_attn = self.drop(p_attn)
|
263 |
+
output = torch.matmul(p_attn, value)
|
264 |
+
if self.window_size is not None:
|
265 |
+
relative_weights = self._absolute_position_to_relative_position(p_attn)
|
266 |
+
value_relative_embeddings = self._get_relative_embeddings(
|
267 |
+
self.emb_rel_v, t_s
|
268 |
+
)
|
269 |
+
output = output + self._matmul_with_relative_values(
|
270 |
+
relative_weights, value_relative_embeddings
|
271 |
+
)
|
272 |
+
output = (
|
273 |
+
output.transpose(2, 3).contiguous().view(b, d, t_t)
|
274 |
+
) # [b, n_h, t_t, d_k] -> [b, d, t_t]
|
275 |
+
return output, p_attn
|
276 |
+
|
277 |
+
def _matmul_with_relative_values(self, x, y):
|
278 |
+
"""
|
279 |
+
x: [b, h, l, m]
|
280 |
+
y: [h or 1, m, d]
|
281 |
+
ret: [b, h, l, d]
|
282 |
+
"""
|
283 |
+
ret = torch.matmul(x, y.unsqueeze(0))
|
284 |
+
return ret
|
285 |
+
|
286 |
+
def _matmul_with_relative_keys(self, x, y):
|
287 |
+
"""
|
288 |
+
x: [b, h, l, d]
|
289 |
+
y: [h or 1, m, d]
|
290 |
+
ret: [b, h, l, m]
|
291 |
+
"""
|
292 |
+
ret = torch.matmul(x, y.unsqueeze(0).transpose(-2, -1))
|
293 |
+
return ret
|
294 |
+
|
295 |
+
def _get_relative_embeddings(self, relative_embeddings, length):
|
296 |
+
max_relative_position = 2 * self.window_size + 1
|
297 |
+
# Pad first before slice to avoid using cond ops.
|
298 |
+
pad_length = max(length - (self.window_size + 1), 0)
|
299 |
+
slice_start_position = max((self.window_size + 1) - length, 0)
|
300 |
+
slice_end_position = slice_start_position + 2 * length - 1
|
301 |
+
if pad_length > 0:
|
302 |
+
padded_relative_embeddings = F.pad(
|
303 |
+
relative_embeddings,
|
304 |
+
commons.convert_pad_shape([[0, 0], [pad_length, pad_length], [0, 0]]),
|
305 |
+
)
|
306 |
+
else:
|
307 |
+
padded_relative_embeddings = relative_embeddings
|
308 |
+
used_relative_embeddings = padded_relative_embeddings[
|
309 |
+
:, slice_start_position:slice_end_position
|
310 |
+
]
|
311 |
+
return used_relative_embeddings
|
312 |
+
|
313 |
+
def _relative_position_to_absolute_position(self, x):
|
314 |
+
"""
|
315 |
+
x: [b, h, l, 2*l-1]
|
316 |
+
ret: [b, h, l, l]
|
317 |
+
"""
|
318 |
+
batch, heads, length, _ = x.size()
|
319 |
+
# Concat columns of pad to shift from relative to absolute indexing.
|
320 |
+
x = F.pad(x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, 1]]))
|
321 |
+
|
322 |
+
# Concat extra elements so to add up to shape (len+1, 2*len-1).
|
323 |
+
x_flat = x.view([batch, heads, length * 2 * length])
|
324 |
+
x_flat = F.pad(
|
325 |
+
x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [0, length - 1]])
|
326 |
+
)
|
327 |
+
|
328 |
+
# Reshape and slice out the padded elements.
|
329 |
+
x_final = x_flat.view([batch, heads, length + 1, 2 * length - 1])[
|
330 |
+
:, :, :length, length - 1 :
|
331 |
+
]
|
332 |
+
return x_final
|
333 |
+
|
334 |
+
def _absolute_position_to_relative_position(self, x):
|
335 |
+
"""
|
336 |
+
x: [b, h, l, l]
|
337 |
+
ret: [b, h, l, 2*l-1]
|
338 |
+
"""
|
339 |
+
batch, heads, length, _ = x.size()
|
340 |
+
# padd along column
|
341 |
+
x = F.pad(
|
342 |
+
x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, length - 1]])
|
343 |
+
)
|
344 |
+
x_flat = x.view([batch, heads, length**2 + length * (length - 1)])
|
345 |
+
# add 0's in the beginning that will skew the elements after reshape
|
346 |
+
x_flat = F.pad(x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [length, 0]]))
|
347 |
+
x_final = x_flat.view([batch, heads, length, 2 * length])[:, :, :, 1:]
|
348 |
+
return x_final
|
349 |
+
|
350 |
+
def _attention_bias_proximal(self, length):
|
351 |
+
"""Bias for self-attention to encourage attention to close positions.
|
352 |
+
Args:
|
353 |
+
length: an integer scalar.
|
354 |
+
Returns:
|
355 |
+
a Tensor with shape [1, 1, length, length]
|
356 |
+
"""
|
357 |
+
r = torch.arange(length, dtype=torch.float32)
|
358 |
+
diff = torch.unsqueeze(r, 0) - torch.unsqueeze(r, 1)
|
359 |
+
return torch.unsqueeze(torch.unsqueeze(-torch.log1p(torch.abs(diff)), 0), 0)
|
360 |
+
|
361 |
+
|
362 |
+
class FFN(nn.Module):
|
363 |
+
def __init__(
|
364 |
+
self,
|
365 |
+
in_channels,
|
366 |
+
out_channels,
|
367 |
+
filter_channels,
|
368 |
+
kernel_size,
|
369 |
+
p_dropout=0.0,
|
370 |
+
activation=None,
|
371 |
+
causal=False,
|
372 |
+
):
|
373 |
+
super().__init__()
|
374 |
+
self.in_channels = in_channels
|
375 |
+
self.out_channels = out_channels
|
376 |
+
self.filter_channels = filter_channels
|
377 |
+
self.kernel_size = kernel_size
|
378 |
+
self.p_dropout = p_dropout
|
379 |
+
self.activation = activation
|
380 |
+
self.causal = causal
|
381 |
+
|
382 |
+
if causal:
|
383 |
+
self.padding = self._causal_padding
|
384 |
+
else:
|
385 |
+
self.padding = self._same_padding
|
386 |
+
|
387 |
+
self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size)
|
388 |
+
self.conv_2 = nn.Conv1d(filter_channels, out_channels, kernel_size)
|
389 |
+
self.drop = nn.Dropout(p_dropout)
|
390 |
+
|
391 |
+
def forward(self, x, x_mask):
|
392 |
+
x = self.conv_1(self.padding(x * x_mask))
|
393 |
+
if self.activation == "gelu":
|
394 |
+
x = x * torch.sigmoid(1.702 * x)
|
395 |
+
else:
|
396 |
+
x = torch.relu(x)
|
397 |
+
x = self.drop(x)
|
398 |
+
x = self.conv_2(self.padding(x * x_mask))
|
399 |
+
return x * x_mask
|
400 |
+
|
401 |
+
def _causal_padding(self, x):
|
402 |
+
if self.kernel_size == 1:
|
403 |
+
return x
|
404 |
+
pad_l = self.kernel_size - 1
|
405 |
+
pad_r = 0
|
406 |
+
padding = [[0, 0], [0, 0], [pad_l, pad_r]]
|
407 |
+
x = F.pad(x, commons.convert_pad_shape(padding))
|
408 |
+
return x
|
409 |
+
|
410 |
+
def _same_padding(self, x):
|
411 |
+
if self.kernel_size == 1:
|
412 |
+
return x
|
413 |
+
pad_l = (self.kernel_size - 1) // 2
|
414 |
+
pad_r = self.kernel_size // 2
|
415 |
+
padding = [[0, 0], [0, 0], [pad_l, pad_r]]
|
416 |
+
x = F.pad(x, commons.convert_pad_shape(padding))
|
417 |
+
return x
|
lib/infer_pack/commons.py
ADDED
@@ -0,0 +1,166 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import math
|
2 |
+
import numpy as np
|
3 |
+
import torch
|
4 |
+
from torch import nn
|
5 |
+
from torch.nn import functional as F
|
6 |
+
|
7 |
+
|
8 |
+
def init_weights(m, mean=0.0, std=0.01):
|
9 |
+
classname = m.__class__.__name__
|
10 |
+
if classname.find("Conv") != -1:
|
11 |
+
m.weight.data.normal_(mean, std)
|
12 |
+
|
13 |
+
|
14 |
+
def get_padding(kernel_size, dilation=1):
|
15 |
+
return int((kernel_size * dilation - dilation) / 2)
|
16 |
+
|
17 |
+
|
18 |
+
def convert_pad_shape(pad_shape):
|
19 |
+
l = pad_shape[::-1]
|
20 |
+
pad_shape = [item for sublist in l for item in sublist]
|
21 |
+
return pad_shape
|
22 |
+
|
23 |
+
|
24 |
+
def kl_divergence(m_p, logs_p, m_q, logs_q):
|
25 |
+
"""KL(P||Q)"""
|
26 |
+
kl = (logs_q - logs_p) - 0.5
|
27 |
+
kl += (
|
28 |
+
0.5 * (torch.exp(2.0 * logs_p) + ((m_p - m_q) ** 2)) * torch.exp(-2.0 * logs_q)
|
29 |
+
)
|
30 |
+
return kl
|
31 |
+
|
32 |
+
|
33 |
+
def rand_gumbel(shape):
|
34 |
+
"""Sample from the Gumbel distribution, protect from overflows."""
|
35 |
+
uniform_samples = torch.rand(shape) * 0.99998 + 0.00001
|
36 |
+
return -torch.log(-torch.log(uniform_samples))
|
37 |
+
|
38 |
+
|
39 |
+
def rand_gumbel_like(x):
|
40 |
+
g = rand_gumbel(x.size()).to(dtype=x.dtype, device=x.device)
|
41 |
+
return g
|
42 |
+
|
43 |
+
|
44 |
+
def slice_segments(x, ids_str, segment_size=4):
|
45 |
+
ret = torch.zeros_like(x[:, :, :segment_size])
|
46 |
+
for i in range(x.size(0)):
|
47 |
+
idx_str = ids_str[i]
|
48 |
+
idx_end = idx_str + segment_size
|
49 |
+
ret[i] = x[i, :, idx_str:idx_end]
|
50 |
+
return ret
|
51 |
+
|
52 |
+
|
53 |
+
def slice_segments2(x, ids_str, segment_size=4):
|
54 |
+
ret = torch.zeros_like(x[:, :segment_size])
|
55 |
+
for i in range(x.size(0)):
|
56 |
+
idx_str = ids_str[i]
|
57 |
+
idx_end = idx_str + segment_size
|
58 |
+
ret[i] = x[i, idx_str:idx_end]
|
59 |
+
return ret
|
60 |
+
|
61 |
+
|
62 |
+
def rand_slice_segments(x, x_lengths=None, segment_size=4):
|
63 |
+
b, d, t = x.size()
|
64 |
+
if x_lengths is None:
|
65 |
+
x_lengths = t
|
66 |
+
ids_str_max = x_lengths - segment_size + 1
|
67 |
+
ids_str = (torch.rand([b]).to(device=x.device) * ids_str_max).to(dtype=torch.long)
|
68 |
+
ret = slice_segments(x, ids_str, segment_size)
|
69 |
+
return ret, ids_str
|
70 |
+
|
71 |
+
|
72 |
+
def get_timing_signal_1d(length, channels, min_timescale=1.0, max_timescale=1.0e4):
|
73 |
+
position = torch.arange(length, dtype=torch.float)
|
74 |
+
num_timescales = channels // 2
|
75 |
+
log_timescale_increment = math.log(float(max_timescale) / float(min_timescale)) / (
|
76 |
+
num_timescales - 1
|
77 |
+
)
|
78 |
+
inv_timescales = min_timescale * torch.exp(
|
79 |
+
torch.arange(num_timescales, dtype=torch.float) * -log_timescale_increment
|
80 |
+
)
|
81 |
+
scaled_time = position.unsqueeze(0) * inv_timescales.unsqueeze(1)
|
82 |
+
signal = torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)], 0)
|
83 |
+
signal = F.pad(signal, [0, 0, 0, channels % 2])
|
84 |
+
signal = signal.view(1, channels, length)
|
85 |
+
return signal
|
86 |
+
|
87 |
+
|
88 |
+
def add_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4):
|
89 |
+
b, channels, length = x.size()
|
90 |
+
signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale)
|
91 |
+
return x + signal.to(dtype=x.dtype, device=x.device)
|
92 |
+
|
93 |
+
|
94 |
+
def cat_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4, axis=1):
|
95 |
+
b, channels, length = x.size()
|
96 |
+
signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale)
|
97 |
+
return torch.cat([x, signal.to(dtype=x.dtype, device=x.device)], axis)
|
98 |
+
|
99 |
+
|
100 |
+
def subsequent_mask(length):
|
101 |
+
mask = torch.tril(torch.ones(length, length)).unsqueeze(0).unsqueeze(0)
|
102 |
+
return mask
|
103 |
+
|
104 |
+
|
105 |
+
@torch.jit.script
|
106 |
+
def fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels):
|
107 |
+
n_channels_int = n_channels[0]
|
108 |
+
in_act = input_a + input_b
|
109 |
+
t_act = torch.tanh(in_act[:, :n_channels_int, :])
|
110 |
+
s_act = torch.sigmoid(in_act[:, n_channels_int:, :])
|
111 |
+
acts = t_act * s_act
|
112 |
+
return acts
|
113 |
+
|
114 |
+
|
115 |
+
def convert_pad_shape(pad_shape):
|
116 |
+
l = pad_shape[::-1]
|
117 |
+
pad_shape = [item for sublist in l for item in sublist]
|
118 |
+
return pad_shape
|
119 |
+
|
120 |
+
|
121 |
+
def shift_1d(x):
|
122 |
+
x = F.pad(x, convert_pad_shape([[0, 0], [0, 0], [1, 0]]))[:, :, :-1]
|
123 |
+
return x
|
124 |
+
|
125 |
+
|
126 |
+
def sequence_mask(length, max_length=None):
|
127 |
+
if max_length is None:
|
128 |
+
max_length = length.max()
|
129 |
+
x = torch.arange(max_length, dtype=length.dtype, device=length.device)
|
130 |
+
return x.unsqueeze(0) < length.unsqueeze(1)
|
131 |
+
|
132 |
+
|
133 |
+
def generate_path(duration, mask):
|
134 |
+
"""
|
135 |
+
duration: [b, 1, t_x]
|
136 |
+
mask: [b, 1, t_y, t_x]
|
137 |
+
"""
|
138 |
+
device = duration.device
|
139 |
+
|
140 |
+
b, _, t_y, t_x = mask.shape
|
141 |
+
cum_duration = torch.cumsum(duration, -1)
|
142 |
+
|
143 |
+
cum_duration_flat = cum_duration.view(b * t_x)
|
144 |
+
path = sequence_mask(cum_duration_flat, t_y).to(mask.dtype)
|
145 |
+
path = path.view(b, t_x, t_y)
|
146 |
+
path = path - F.pad(path, convert_pad_shape([[0, 0], [1, 0], [0, 0]]))[:, :-1]
|
147 |
+
path = path.unsqueeze(1).transpose(2, 3) * mask
|
148 |
+
return path
|
149 |
+
|
150 |
+
|
151 |
+
def clip_grad_value_(parameters, clip_value, norm_type=2):
|
152 |
+
if isinstance(parameters, torch.Tensor):
|
153 |
+
parameters = [parameters]
|
154 |
+
parameters = list(filter(lambda p: p.grad is not None, parameters))
|
155 |
+
norm_type = float(norm_type)
|
156 |
+
if clip_value is not None:
|
157 |
+
clip_value = float(clip_value)
|
158 |
+
|
159 |
+
total_norm = 0
|
160 |
+
for p in parameters:
|
161 |
+
param_norm = p.grad.data.norm(norm_type)
|
162 |
+
total_norm += param_norm.item() ** norm_type
|
163 |
+
if clip_value is not None:
|
164 |
+
p.grad.data.clamp_(min=-clip_value, max=clip_value)
|
165 |
+
total_norm = total_norm ** (1.0 / norm_type)
|
166 |
+
return total_norm
|
lib/infer_pack/models.py
ADDED
@@ -0,0 +1,1142 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import math, pdb, os
|
2 |
+
from time import time as ttime
|
3 |
+
import torch
|
4 |
+
from torch import nn
|
5 |
+
from torch.nn import functional as F
|
6 |
+
from lib.infer_pack import modules
|
7 |
+
from lib.infer_pack import attentions
|
8 |
+
from lib.infer_pack import commons
|
9 |
+
from lib.infer_pack.commons import init_weights, get_padding
|
10 |
+
from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d
|
11 |
+
from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm
|
12 |
+
from lib.infer_pack.commons import init_weights
|
13 |
+
import numpy as np
|
14 |
+
from lib.infer_pack import commons
|
15 |
+
|
16 |
+
|
17 |
+
class TextEncoder256(nn.Module):
|
18 |
+
def __init__(
|
19 |
+
self,
|
20 |
+
out_channels,
|
21 |
+
hidden_channels,
|
22 |
+
filter_channels,
|
23 |
+
n_heads,
|
24 |
+
n_layers,
|
25 |
+
kernel_size,
|
26 |
+
p_dropout,
|
27 |
+
f0=True,
|
28 |
+
):
|
29 |
+
super().__init__()
|
30 |
+
self.out_channels = out_channels
|
31 |
+
self.hidden_channels = hidden_channels
|
32 |
+
self.filter_channels = filter_channels
|
33 |
+
self.n_heads = n_heads
|
34 |
+
self.n_layers = n_layers
|
35 |
+
self.kernel_size = kernel_size
|
36 |
+
self.p_dropout = p_dropout
|
37 |
+
self.emb_phone = nn.Linear(256, hidden_channels)
|
38 |
+
self.lrelu = nn.LeakyReLU(0.1, inplace=True)
|
39 |
+
if f0 == True:
|
40 |
+
self.emb_pitch = nn.Embedding(256, hidden_channels) # pitch 256
|
41 |
+
self.encoder = attentions.Encoder(
|
42 |
+
hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout
|
43 |
+
)
|
44 |
+
self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
|
45 |
+
|
46 |
+
def forward(self, phone, pitch, lengths):
|
47 |
+
if pitch == None:
|
48 |
+
x = self.emb_phone(phone)
|
49 |
+
else:
|
50 |
+
x = self.emb_phone(phone) + self.emb_pitch(pitch)
|
51 |
+
x = x * math.sqrt(self.hidden_channels) # [b, t, h]
|
52 |
+
x = self.lrelu(x)
|
53 |
+
x = torch.transpose(x, 1, -1) # [b, h, t]
|
54 |
+
x_mask = torch.unsqueeze(commons.sequence_mask(lengths, x.size(2)), 1).to(
|
55 |
+
x.dtype
|
56 |
+
)
|
57 |
+
x = self.encoder(x * x_mask, x_mask)
|
58 |
+
stats = self.proj(x) * x_mask
|
59 |
+
|
60 |
+
m, logs = torch.split(stats, self.out_channels, dim=1)
|
61 |
+
return m, logs, x_mask
|
62 |
+
|
63 |
+
|
64 |
+
class TextEncoder768(nn.Module):
|
65 |
+
def __init__(
|
66 |
+
self,
|
67 |
+
out_channels,
|
68 |
+
hidden_channels,
|
69 |
+
filter_channels,
|
70 |
+
n_heads,
|
71 |
+
n_layers,
|
72 |
+
kernel_size,
|
73 |
+
p_dropout,
|
74 |
+
f0=True,
|
75 |
+
):
|
76 |
+
super().__init__()
|
77 |
+
self.out_channels = out_channels
|
78 |
+
self.hidden_channels = hidden_channels
|
79 |
+
self.filter_channels = filter_channels
|
80 |
+
self.n_heads = n_heads
|
81 |
+
self.n_layers = n_layers
|
82 |
+
self.kernel_size = kernel_size
|
83 |
+
self.p_dropout = p_dropout
|
84 |
+
self.emb_phone = nn.Linear(768, hidden_channels)
|
85 |
+
self.lrelu = nn.LeakyReLU(0.1, inplace=True)
|
86 |
+
if f0 == True:
|
87 |
+
self.emb_pitch = nn.Embedding(256, hidden_channels) # pitch 256
|
88 |
+
self.encoder = attentions.Encoder(
|
89 |
+
hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout
|
90 |
+
)
|
91 |
+
self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
|
92 |
+
|
93 |
+
def forward(self, phone, pitch, lengths):
|
94 |
+
if pitch == None:
|
95 |
+
x = self.emb_phone(phone)
|
96 |
+
else:
|
97 |
+
x = self.emb_phone(phone) + self.emb_pitch(pitch)
|
98 |
+
x = x * math.sqrt(self.hidden_channels) # [b, t, h]
|
99 |
+
x = self.lrelu(x)
|
100 |
+
x = torch.transpose(x, 1, -1) # [b, h, t]
|
101 |
+
x_mask = torch.unsqueeze(commons.sequence_mask(lengths, x.size(2)), 1).to(
|
102 |
+
x.dtype
|
103 |
+
)
|
104 |
+
x = self.encoder(x * x_mask, x_mask)
|
105 |
+
stats = self.proj(x) * x_mask
|
106 |
+
|
107 |
+
m, logs = torch.split(stats, self.out_channels, dim=1)
|
108 |
+
return m, logs, x_mask
|
109 |
+
|
110 |
+
|
111 |
+
class ResidualCouplingBlock(nn.Module):
|
112 |
+
def __init__(
|
113 |
+
self,
|
114 |
+
channels,
|
115 |
+
hidden_channels,
|
116 |
+
kernel_size,
|
117 |
+
dilation_rate,
|
118 |
+
n_layers,
|
119 |
+
n_flows=4,
|
120 |
+
gin_channels=0,
|
121 |
+
):
|
122 |
+
super().__init__()
|
123 |
+
self.channels = channels
|
124 |
+
self.hidden_channels = hidden_channels
|
125 |
+
self.kernel_size = kernel_size
|
126 |
+
self.dilation_rate = dilation_rate
|
127 |
+
self.n_layers = n_layers
|
128 |
+
self.n_flows = n_flows
|
129 |
+
self.gin_channels = gin_channels
|
130 |
+
|
131 |
+
self.flows = nn.ModuleList()
|
132 |
+
for i in range(n_flows):
|
133 |
+
self.flows.append(
|
134 |
+
modules.ResidualCouplingLayer(
|
135 |
+
channels,
|
136 |
+
hidden_channels,
|
137 |
+
kernel_size,
|
138 |
+
dilation_rate,
|
139 |
+
n_layers,
|
140 |
+
gin_channels=gin_channels,
|
141 |
+
mean_only=True,
|
142 |
+
)
|
143 |
+
)
|
144 |
+
self.flows.append(modules.Flip())
|
145 |
+
|
146 |
+
def forward(self, x, x_mask, g=None, reverse=False):
|
147 |
+
if not reverse:
|
148 |
+
for flow in self.flows:
|
149 |
+
x, _ = flow(x, x_mask, g=g, reverse=reverse)
|
150 |
+
else:
|
151 |
+
for flow in reversed(self.flows):
|
152 |
+
x = flow(x, x_mask, g=g, reverse=reverse)
|
153 |
+
return x
|
154 |
+
|
155 |
+
def remove_weight_norm(self):
|
156 |
+
for i in range(self.n_flows):
|
157 |
+
self.flows[i * 2].remove_weight_norm()
|
158 |
+
|
159 |
+
|
160 |
+
class PosteriorEncoder(nn.Module):
|
161 |
+
def __init__(
|
162 |
+
self,
|
163 |
+
in_channels,
|
164 |
+
out_channels,
|
165 |
+
hidden_channels,
|
166 |
+
kernel_size,
|
167 |
+
dilation_rate,
|
168 |
+
n_layers,
|
169 |
+
gin_channels=0,
|
170 |
+
):
|
171 |
+
super().__init__()
|
172 |
+
self.in_channels = in_channels
|
173 |
+
self.out_channels = out_channels
|
174 |
+
self.hidden_channels = hidden_channels
|
175 |
+
self.kernel_size = kernel_size
|
176 |
+
self.dilation_rate = dilation_rate
|
177 |
+
self.n_layers = n_layers
|
178 |
+
self.gin_channels = gin_channels
|
179 |
+
|
180 |
+
self.pre = nn.Conv1d(in_channels, hidden_channels, 1)
|
181 |
+
self.enc = modules.WN(
|
182 |
+
hidden_channels,
|
183 |
+
kernel_size,
|
184 |
+
dilation_rate,
|
185 |
+
n_layers,
|
186 |
+
gin_channels=gin_channels,
|
187 |
+
)
|
188 |
+
self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
|
189 |
+
|
190 |
+
def forward(self, x, x_lengths, g=None):
|
191 |
+
x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(
|
192 |
+
x.dtype
|
193 |
+
)
|
194 |
+
x = self.pre(x) * x_mask
|
195 |
+
x = self.enc(x, x_mask, g=g)
|
196 |
+
stats = self.proj(x) * x_mask
|
197 |
+
m, logs = torch.split(stats, self.out_channels, dim=1)
|
198 |
+
z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask
|
199 |
+
return z, m, logs, x_mask
|
200 |
+
|
201 |
+
def remove_weight_norm(self):
|
202 |
+
self.enc.remove_weight_norm()
|
203 |
+
|
204 |
+
|
205 |
+
class Generator(torch.nn.Module):
|
206 |
+
def __init__(
|
207 |
+
self,
|
208 |
+
initial_channel,
|
209 |
+
resblock,
|
210 |
+
resblock_kernel_sizes,
|
211 |
+
resblock_dilation_sizes,
|
212 |
+
upsample_rates,
|
213 |
+
upsample_initial_channel,
|
214 |
+
upsample_kernel_sizes,
|
215 |
+
gin_channels=0,
|
216 |
+
):
|
217 |
+
super(Generator, self).__init__()
|
218 |
+
self.num_kernels = len(resblock_kernel_sizes)
|
219 |
+
self.num_upsamples = len(upsample_rates)
|
220 |
+
self.conv_pre = Conv1d(
|
221 |
+
initial_channel, upsample_initial_channel, 7, 1, padding=3
|
222 |
+
)
|
223 |
+
resblock = modules.ResBlock1 if resblock == "1" else modules.ResBlock2
|
224 |
+
|
225 |
+
self.ups = nn.ModuleList()
|
226 |
+
for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)):
|
227 |
+
self.ups.append(
|
228 |
+
weight_norm(
|
229 |
+
ConvTranspose1d(
|
230 |
+
upsample_initial_channel // (2**i),
|
231 |
+
upsample_initial_channel // (2 ** (i + 1)),
|
232 |
+
k,
|
233 |
+
u,
|
234 |
+
padding=(k - u) // 2,
|
235 |
+
)
|
236 |
+
)
|
237 |
+
)
|
238 |
+
|
239 |
+
self.resblocks = nn.ModuleList()
|
240 |
+
for i in range(len(self.ups)):
|
241 |
+
ch = upsample_initial_channel // (2 ** (i + 1))
|
242 |
+
for j, (k, d) in enumerate(
|
243 |
+
zip(resblock_kernel_sizes, resblock_dilation_sizes)
|
244 |
+
):
|
245 |
+
self.resblocks.append(resblock(ch, k, d))
|
246 |
+
|
247 |
+
self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False)
|
248 |
+
self.ups.apply(init_weights)
|
249 |
+
|
250 |
+
if gin_channels != 0:
|
251 |
+
self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1)
|
252 |
+
|
253 |
+
def forward(self, x, g=None):
|
254 |
+
x = self.conv_pre(x)
|
255 |
+
if g is not None:
|
256 |
+
x = x + self.cond(g)
|
257 |
+
|
258 |
+
for i in range(self.num_upsamples):
|
259 |
+
x = F.leaky_relu(x, modules.LRELU_SLOPE)
|
260 |
+
x = self.ups[i](x)
|
261 |
+
xs = None
|
262 |
+
for j in range(self.num_kernels):
|
263 |
+
if xs is None:
|
264 |
+
xs = self.resblocks[i * self.num_kernels + j](x)
|
265 |
+
else:
|
266 |
+
xs += self.resblocks[i * self.num_kernels + j](x)
|
267 |
+
x = xs / self.num_kernels
|
268 |
+
x = F.leaky_relu(x)
|
269 |
+
x = self.conv_post(x)
|
270 |
+
x = torch.tanh(x)
|
271 |
+
|
272 |
+
return x
|
273 |
+
|
274 |
+
def remove_weight_norm(self):
|
275 |
+
for l in self.ups:
|
276 |
+
remove_weight_norm(l)
|
277 |
+
for l in self.resblocks:
|
278 |
+
l.remove_weight_norm()
|
279 |
+
|
280 |
+
|
281 |
+
class SineGen(torch.nn.Module):
|
282 |
+
"""Definition of sine generator
|
283 |
+
SineGen(samp_rate, harmonic_num = 0,
|
284 |
+
sine_amp = 0.1, noise_std = 0.003,
|
285 |
+
voiced_threshold = 0,
|
286 |
+
flag_for_pulse=False)
|
287 |
+
samp_rate: sampling rate in Hz
|
288 |
+
harmonic_num: number of harmonic overtones (default 0)
|
289 |
+
sine_amp: amplitude of sine-wavefrom (default 0.1)
|
290 |
+
noise_std: std of Gaussian noise (default 0.003)
|
291 |
+
voiced_thoreshold: F0 threshold for U/V classification (default 0)
|
292 |
+
flag_for_pulse: this SinGen is used inside PulseGen (default False)
|
293 |
+
Note: when flag_for_pulse is True, the first time step of a voiced
|
294 |
+
segment is always sin(np.pi) or cos(0)
|
295 |
+
"""
|
296 |
+
|
297 |
+
def __init__(
|
298 |
+
self,
|
299 |
+
samp_rate,
|
300 |
+
harmonic_num=0,
|
301 |
+
sine_amp=0.1,
|
302 |
+
noise_std=0.003,
|
303 |
+
voiced_threshold=0,
|
304 |
+
flag_for_pulse=False,
|
305 |
+
):
|
306 |
+
super(SineGen, self).__init__()
|
307 |
+
self.sine_amp = sine_amp
|
308 |
+
self.noise_std = noise_std
|
309 |
+
self.harmonic_num = harmonic_num
|
310 |
+
self.dim = self.harmonic_num + 1
|
311 |
+
self.sampling_rate = samp_rate
|
312 |
+
self.voiced_threshold = voiced_threshold
|
313 |
+
|
314 |
+
def _f02uv(self, f0):
|
315 |
+
# generate uv signal
|
316 |
+
uv = torch.ones_like(f0)
|
317 |
+
uv = uv * (f0 > self.voiced_threshold)
|
318 |
+
return uv
|
319 |
+
|
320 |
+
def forward(self, f0, upp):
|
321 |
+
"""sine_tensor, uv = forward(f0)
|
322 |
+
input F0: tensor(batchsize=1, length, dim=1)
|
323 |
+
f0 for unvoiced steps should be 0
|
324 |
+
output sine_tensor: tensor(batchsize=1, length, dim)
|
325 |
+
output uv: tensor(batchsize=1, length, 1)
|
326 |
+
"""
|
327 |
+
with torch.no_grad():
|
328 |
+
f0 = f0[:, None].transpose(1, 2)
|
329 |
+
f0_buf = torch.zeros(f0.shape[0], f0.shape[1], self.dim, device=f0.device)
|
330 |
+
# fundamental component
|
331 |
+
f0_buf[:, :, 0] = f0[:, :, 0]
|
332 |
+
for idx in np.arange(self.harmonic_num):
|
333 |
+
f0_buf[:, :, idx + 1] = f0_buf[:, :, 0] * (
|
334 |
+
idx + 2
|
335 |
+
) # idx + 2: the (idx+1)-th overtone, (idx+2)-th harmonic
|
336 |
+
rad_values = (f0_buf / self.sampling_rate) % 1 ###%1意味着n_har的乘积无法后处理优化
|
337 |
+
rand_ini = torch.rand(
|
338 |
+
f0_buf.shape[0], f0_buf.shape[2], device=f0_buf.device
|
339 |
+
)
|
340 |
+
rand_ini[:, 0] = 0
|
341 |
+
rad_values[:, 0, :] = rad_values[:, 0, :] + rand_ini
|
342 |
+
tmp_over_one = torch.cumsum(rad_values, 1) # % 1 #####%1意味着后面的cumsum无法再优化
|
343 |
+
tmp_over_one *= upp
|
344 |
+
tmp_over_one = F.interpolate(
|
345 |
+
tmp_over_one.transpose(2, 1),
|
346 |
+
scale_factor=upp,
|
347 |
+
mode="linear",
|
348 |
+
align_corners=True,
|
349 |
+
).transpose(2, 1)
|
350 |
+
rad_values = F.interpolate(
|
351 |
+
rad_values.transpose(2, 1), scale_factor=upp, mode="nearest"
|
352 |
+
).transpose(
|
353 |
+
2, 1
|
354 |
+
) #######
|
355 |
+
tmp_over_one %= 1
|
356 |
+
tmp_over_one_idx = (tmp_over_one[:, 1:, :] - tmp_over_one[:, :-1, :]) < 0
|
357 |
+
cumsum_shift = torch.zeros_like(rad_values)
|
358 |
+
cumsum_shift[:, 1:, :] = tmp_over_one_idx * -1.0
|
359 |
+
sine_waves = torch.sin(
|
360 |
+
torch.cumsum(rad_values + cumsum_shift, dim=1) * 2 * np.pi
|
361 |
+
)
|
362 |
+
sine_waves = sine_waves * self.sine_amp
|
363 |
+
uv = self._f02uv(f0)
|
364 |
+
uv = F.interpolate(
|
365 |
+
uv.transpose(2, 1), scale_factor=upp, mode="nearest"
|
366 |
+
).transpose(2, 1)
|
367 |
+
noise_amp = uv * self.noise_std + (1 - uv) * self.sine_amp / 3
|
368 |
+
noise = noise_amp * torch.randn_like(sine_waves)
|
369 |
+
sine_waves = sine_waves * uv + noise
|
370 |
+
return sine_waves, uv, noise
|
371 |
+
|
372 |
+
|
373 |
+
class SourceModuleHnNSF(torch.nn.Module):
|
374 |
+
"""SourceModule for hn-nsf
|
375 |
+
SourceModule(sampling_rate, harmonic_num=0, sine_amp=0.1,
|
376 |
+
add_noise_std=0.003, voiced_threshod=0)
|
377 |
+
sampling_rate: sampling_rate in Hz
|
378 |
+
harmonic_num: number of harmonic above F0 (default: 0)
|
379 |
+
sine_amp: amplitude of sine source signal (default: 0.1)
|
380 |
+
add_noise_std: std of additive Gaussian noise (default: 0.003)
|
381 |
+
note that amplitude of noise in unvoiced is decided
|
382 |
+
by sine_amp
|
383 |
+
voiced_threshold: threhold to set U/V given F0 (default: 0)
|
384 |
+
Sine_source, noise_source = SourceModuleHnNSF(F0_sampled)
|
385 |
+
F0_sampled (batchsize, length, 1)
|
386 |
+
Sine_source (batchsize, length, 1)
|
387 |
+
noise_source (batchsize, length 1)
|
388 |
+
uv (batchsize, length, 1)
|
389 |
+
"""
|
390 |
+
|
391 |
+
def __init__(
|
392 |
+
self,
|
393 |
+
sampling_rate,
|
394 |
+
harmonic_num=0,
|
395 |
+
sine_amp=0.1,
|
396 |
+
add_noise_std=0.003,
|
397 |
+
voiced_threshod=0,
|
398 |
+
is_half=True,
|
399 |
+
):
|
400 |
+
super(SourceModuleHnNSF, self).__init__()
|
401 |
+
|
402 |
+
self.sine_amp = sine_amp
|
403 |
+
self.noise_std = add_noise_std
|
404 |
+
self.is_half = is_half
|
405 |
+
# to produce sine waveforms
|
406 |
+
self.l_sin_gen = SineGen(
|
407 |
+
sampling_rate, harmonic_num, sine_amp, add_noise_std, voiced_threshod
|
408 |
+
)
|
409 |
+
|
410 |
+
# to merge source harmonics into a single excitation
|
411 |
+
self.l_linear = torch.nn.Linear(harmonic_num + 1, 1)
|
412 |
+
self.l_tanh = torch.nn.Tanh()
|
413 |
+
|
414 |
+
def forward(self, x, upp=None):
|
415 |
+
sine_wavs, uv, _ = self.l_sin_gen(x, upp)
|
416 |
+
if self.is_half:
|
417 |
+
sine_wavs = sine_wavs.half()
|
418 |
+
sine_merge = self.l_tanh(self.l_linear(sine_wavs))
|
419 |
+
return sine_merge, None, None # noise, uv
|
420 |
+
|
421 |
+
|
422 |
+
class GeneratorNSF(torch.nn.Module):
|
423 |
+
def __init__(
|
424 |
+
self,
|
425 |
+
initial_channel,
|
426 |
+
resblock,
|
427 |
+
resblock_kernel_sizes,
|
428 |
+
resblock_dilation_sizes,
|
429 |
+
upsample_rates,
|
430 |
+
upsample_initial_channel,
|
431 |
+
upsample_kernel_sizes,
|
432 |
+
gin_channels,
|
433 |
+
sr,
|
434 |
+
is_half=False,
|
435 |
+
):
|
436 |
+
super(GeneratorNSF, self).__init__()
|
437 |
+
self.num_kernels = len(resblock_kernel_sizes)
|
438 |
+
self.num_upsamples = len(upsample_rates)
|
439 |
+
|
440 |
+
self.f0_upsamp = torch.nn.Upsample(scale_factor=np.prod(upsample_rates))
|
441 |
+
self.m_source = SourceModuleHnNSF(
|
442 |
+
sampling_rate=sr, harmonic_num=0, is_half=is_half
|
443 |
+
)
|
444 |
+
self.noise_convs = nn.ModuleList()
|
445 |
+
self.conv_pre = Conv1d(
|
446 |
+
initial_channel, upsample_initial_channel, 7, 1, padding=3
|
447 |
+
)
|
448 |
+
resblock = modules.ResBlock1 if resblock == "1" else modules.ResBlock2
|
449 |
+
|
450 |
+
self.ups = nn.ModuleList()
|
451 |
+
for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)):
|
452 |
+
c_cur = upsample_initial_channel // (2 ** (i + 1))
|
453 |
+
self.ups.append(
|
454 |
+
weight_norm(
|
455 |
+
ConvTranspose1d(
|
456 |
+
upsample_initial_channel // (2**i),
|
457 |
+
upsample_initial_channel // (2 ** (i + 1)),
|
458 |
+
k,
|
459 |
+
u,
|
460 |
+
padding=(k - u) // 2,
|
461 |
+
)
|
462 |
+
)
|
463 |
+
)
|
464 |
+
if i + 1 < len(upsample_rates):
|
465 |
+
stride_f0 = np.prod(upsample_rates[i + 1 :])
|
466 |
+
self.noise_convs.append(
|
467 |
+
Conv1d(
|
468 |
+
1,
|
469 |
+
c_cur,
|
470 |
+
kernel_size=stride_f0 * 2,
|
471 |
+
stride=stride_f0,
|
472 |
+
padding=stride_f0 // 2,
|
473 |
+
)
|
474 |
+
)
|
475 |
+
else:
|
476 |
+
self.noise_convs.append(Conv1d(1, c_cur, kernel_size=1))
|
477 |
+
|
478 |
+
self.resblocks = nn.ModuleList()
|
479 |
+
for i in range(len(self.ups)):
|
480 |
+
ch = upsample_initial_channel // (2 ** (i + 1))
|
481 |
+
for j, (k, d) in enumerate(
|
482 |
+
zip(resblock_kernel_sizes, resblock_dilation_sizes)
|
483 |
+
):
|
484 |
+
self.resblocks.append(resblock(ch, k, d))
|
485 |
+
|
486 |
+
self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False)
|
487 |
+
self.ups.apply(init_weights)
|
488 |
+
|
489 |
+
if gin_channels != 0:
|
490 |
+
self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1)
|
491 |
+
|
492 |
+
self.upp = np.prod(upsample_rates)
|
493 |
+
|
494 |
+
def forward(self, x, f0, g=None):
|
495 |
+
har_source, noi_source, uv = self.m_source(f0, self.upp)
|
496 |
+
har_source = har_source.transpose(1, 2)
|
497 |
+
x = self.conv_pre(x)
|
498 |
+
if g is not None:
|
499 |
+
x = x + self.cond(g)
|
500 |
+
|
501 |
+
for i in range(self.num_upsamples):
|
502 |
+
x = F.leaky_relu(x, modules.LRELU_SLOPE)
|
503 |
+
x = self.ups[i](x)
|
504 |
+
x_source = self.noise_convs[i](har_source)
|
505 |
+
x = x + x_source
|
506 |
+
xs = None
|
507 |
+
for j in range(self.num_kernels):
|
508 |
+
if xs is None:
|
509 |
+
xs = self.resblocks[i * self.num_kernels + j](x)
|
510 |
+
else:
|
511 |
+
xs += self.resblocks[i * self.num_kernels + j](x)
|
512 |
+
x = xs / self.num_kernels
|
513 |
+
x = F.leaky_relu(x)
|
514 |
+
x = self.conv_post(x)
|
515 |
+
x = torch.tanh(x)
|
516 |
+
return x
|
517 |
+
|
518 |
+
def remove_weight_norm(self):
|
519 |
+
for l in self.ups:
|
520 |
+
remove_weight_norm(l)
|
521 |
+
for l in self.resblocks:
|
522 |
+
l.remove_weight_norm()
|
523 |
+
|
524 |
+
|
525 |
+
sr2sr = {
|
526 |
+
"32k": 32000,
|
527 |
+
"40k": 40000,
|
528 |
+
"48k": 48000,
|
529 |
+
}
|
530 |
+
|
531 |
+
|
532 |
+
class SynthesizerTrnMs256NSFsid(nn.Module):
|
533 |
+
def __init__(
|
534 |
+
self,
|
535 |
+
spec_channels,
|
536 |
+
segment_size,
|
537 |
+
inter_channels,
|
538 |
+
hidden_channels,
|
539 |
+
filter_channels,
|
540 |
+
n_heads,
|
541 |
+
n_layers,
|
542 |
+
kernel_size,
|
543 |
+
p_dropout,
|
544 |
+
resblock,
|
545 |
+
resblock_kernel_sizes,
|
546 |
+
resblock_dilation_sizes,
|
547 |
+
upsample_rates,
|
548 |
+
upsample_initial_channel,
|
549 |
+
upsample_kernel_sizes,
|
550 |
+
spk_embed_dim,
|
551 |
+
gin_channels,
|
552 |
+
sr,
|
553 |
+
**kwargs
|
554 |
+
):
|
555 |
+
super().__init__()
|
556 |
+
if type(sr) == type("strr"):
|
557 |
+
sr = sr2sr[sr]
|
558 |
+
self.spec_channels = spec_channels
|
559 |
+
self.inter_channels = inter_channels
|
560 |
+
self.hidden_channels = hidden_channels
|
561 |
+
self.filter_channels = filter_channels
|
562 |
+
self.n_heads = n_heads
|
563 |
+
self.n_layers = n_layers
|
564 |
+
self.kernel_size = kernel_size
|
565 |
+
self.p_dropout = p_dropout
|
566 |
+
self.resblock = resblock
|
567 |
+
self.resblock_kernel_sizes = resblock_kernel_sizes
|
568 |
+
self.resblock_dilation_sizes = resblock_dilation_sizes
|
569 |
+
self.upsample_rates = upsample_rates
|
570 |
+
self.upsample_initial_channel = upsample_initial_channel
|
571 |
+
self.upsample_kernel_sizes = upsample_kernel_sizes
|
572 |
+
self.segment_size = segment_size
|
573 |
+
self.gin_channels = gin_channels
|
574 |
+
# self.hop_length = hop_length#
|
575 |
+
self.spk_embed_dim = spk_embed_dim
|
576 |
+
self.enc_p = TextEncoder256(
|
577 |
+
inter_channels,
|
578 |
+
hidden_channels,
|
579 |
+
filter_channels,
|
580 |
+
n_heads,
|
581 |
+
n_layers,
|
582 |
+
kernel_size,
|
583 |
+
p_dropout,
|
584 |
+
)
|
585 |
+
self.dec = GeneratorNSF(
|
586 |
+
inter_channels,
|
587 |
+
resblock,
|
588 |
+
resblock_kernel_sizes,
|
589 |
+
resblock_dilation_sizes,
|
590 |
+
upsample_rates,
|
591 |
+
upsample_initial_channel,
|
592 |
+
upsample_kernel_sizes,
|
593 |
+
gin_channels=gin_channels,
|
594 |
+
sr=sr,
|
595 |
+
is_half=kwargs["is_half"],
|
596 |
+
)
|
597 |
+
self.enc_q = PosteriorEncoder(
|
598 |
+
spec_channels,
|
599 |
+
inter_channels,
|
600 |
+
hidden_channels,
|
601 |
+
5,
|
602 |
+
1,
|
603 |
+
16,
|
604 |
+
gin_channels=gin_channels,
|
605 |
+
)
|
606 |
+
self.flow = ResidualCouplingBlock(
|
607 |
+
inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels
|
608 |
+
)
|
609 |
+
self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels)
|
610 |
+
print("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim)
|
611 |
+
|
612 |
+
def remove_weight_norm(self):
|
613 |
+
self.dec.remove_weight_norm()
|
614 |
+
self.flow.remove_weight_norm()
|
615 |
+
self.enc_q.remove_weight_norm()
|
616 |
+
|
617 |
+
def forward(
|
618 |
+
self, phone, phone_lengths, pitch, pitchf, y, y_lengths, ds
|
619 |
+
): # 这里ds是id,[bs,1]
|
620 |
+
# print(1,pitch.shape)#[bs,t]
|
621 |
+
g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的
|
622 |
+
m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths)
|
623 |
+
z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g)
|
624 |
+
z_p = self.flow(z, y_mask, g=g)
|
625 |
+
z_slice, ids_slice = commons.rand_slice_segments(
|
626 |
+
z, y_lengths, self.segment_size
|
627 |
+
)
|
628 |
+
# print(-1,pitchf.shape,ids_slice,self.segment_size,self.hop_length,self.segment_size//self.hop_length)
|
629 |
+
pitchf = commons.slice_segments2(pitchf, ids_slice, self.segment_size)
|
630 |
+
# print(-2,pitchf.shape,z_slice.shape)
|
631 |
+
o = self.dec(z_slice, pitchf, g=g)
|
632 |
+
return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q)
|
633 |
+
|
634 |
+
def infer(self, phone, phone_lengths, pitch, nsff0, sid, rate=None):
|
635 |
+
g = self.emb_g(sid).unsqueeze(-1)
|
636 |
+
m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths)
|
637 |
+
z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask
|
638 |
+
if rate:
|
639 |
+
head = int(z_p.shape[2] * rate)
|
640 |
+
z_p = z_p[:, :, -head:]
|
641 |
+
x_mask = x_mask[:, :, -head:]
|
642 |
+
nsff0 = nsff0[:, -head:]
|
643 |
+
z = self.flow(z_p, x_mask, g=g, reverse=True)
|
644 |
+
o = self.dec(z * x_mask, nsff0, g=g)
|
645 |
+
return o, x_mask, (z, z_p, m_p, logs_p)
|
646 |
+
|
647 |
+
|
648 |
+
class SynthesizerTrnMs768NSFsid(nn.Module):
|
649 |
+
def __init__(
|
650 |
+
self,
|
651 |
+
spec_channels,
|
652 |
+
segment_size,
|
653 |
+
inter_channels,
|
654 |
+
hidden_channels,
|
655 |
+
filter_channels,
|
656 |
+
n_heads,
|
657 |
+
n_layers,
|
658 |
+
kernel_size,
|
659 |
+
p_dropout,
|
660 |
+
resblock,
|
661 |
+
resblock_kernel_sizes,
|
662 |
+
resblock_dilation_sizes,
|
663 |
+
upsample_rates,
|
664 |
+
upsample_initial_channel,
|
665 |
+
upsample_kernel_sizes,
|
666 |
+
spk_embed_dim,
|
667 |
+
gin_channels,
|
668 |
+
sr,
|
669 |
+
**kwargs
|
670 |
+
):
|
671 |
+
super().__init__()
|
672 |
+
if type(sr) == type("strr"):
|
673 |
+
sr = sr2sr[sr]
|
674 |
+
self.spec_channels = spec_channels
|
675 |
+
self.inter_channels = inter_channels
|
676 |
+
self.hidden_channels = hidden_channels
|
677 |
+
self.filter_channels = filter_channels
|
678 |
+
self.n_heads = n_heads
|
679 |
+
self.n_layers = n_layers
|
680 |
+
self.kernel_size = kernel_size
|
681 |
+
self.p_dropout = p_dropout
|
682 |
+
self.resblock = resblock
|
683 |
+
self.resblock_kernel_sizes = resblock_kernel_sizes
|
684 |
+
self.resblock_dilation_sizes = resblock_dilation_sizes
|
685 |
+
self.upsample_rates = upsample_rates
|
686 |
+
self.upsample_initial_channel = upsample_initial_channel
|
687 |
+
self.upsample_kernel_sizes = upsample_kernel_sizes
|
688 |
+
self.segment_size = segment_size
|
689 |
+
self.gin_channels = gin_channels
|
690 |
+
# self.hop_length = hop_length#
|
691 |
+
self.spk_embed_dim = spk_embed_dim
|
692 |
+
self.enc_p = TextEncoder768(
|
693 |
+
inter_channels,
|
694 |
+
hidden_channels,
|
695 |
+
filter_channels,
|
696 |
+
n_heads,
|
697 |
+
n_layers,
|
698 |
+
kernel_size,
|
699 |
+
p_dropout,
|
700 |
+
)
|
701 |
+
self.dec = GeneratorNSF(
|
702 |
+
inter_channels,
|
703 |
+
resblock,
|
704 |
+
resblock_kernel_sizes,
|
705 |
+
resblock_dilation_sizes,
|
706 |
+
upsample_rates,
|
707 |
+
upsample_initial_channel,
|
708 |
+
upsample_kernel_sizes,
|
709 |
+
gin_channels=gin_channels,
|
710 |
+
sr=sr,
|
711 |
+
is_half=kwargs["is_half"],
|
712 |
+
)
|
713 |
+
self.enc_q = PosteriorEncoder(
|
714 |
+
spec_channels,
|
715 |
+
inter_channels,
|
716 |
+
hidden_channels,
|
717 |
+
5,
|
718 |
+
1,
|
719 |
+
16,
|
720 |
+
gin_channels=gin_channels,
|
721 |
+
)
|
722 |
+
self.flow = ResidualCouplingBlock(
|
723 |
+
inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels
|
724 |
+
)
|
725 |
+
self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels)
|
726 |
+
print("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim)
|
727 |
+
|
728 |
+
def remove_weight_norm(self):
|
729 |
+
self.dec.remove_weight_norm()
|
730 |
+
self.flow.remove_weight_norm()
|
731 |
+
self.enc_q.remove_weight_norm()
|
732 |
+
|
733 |
+
def forward(
|
734 |
+
self, phone, phone_lengths, pitch, pitchf, y, y_lengths, ds
|
735 |
+
): # 这里ds是id,[bs,1]
|
736 |
+
# print(1,pitch.shape)#[bs,t]
|
737 |
+
g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的
|
738 |
+
m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths)
|
739 |
+
z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g)
|
740 |
+
z_p = self.flow(z, y_mask, g=g)
|
741 |
+
z_slice, ids_slice = commons.rand_slice_segments(
|
742 |
+
z, y_lengths, self.segment_size
|
743 |
+
)
|
744 |
+
# print(-1,pitchf.shape,ids_slice,self.segment_size,self.hop_length,self.segment_size//self.hop_length)
|
745 |
+
pitchf = commons.slice_segments2(pitchf, ids_slice, self.segment_size)
|
746 |
+
# print(-2,pitchf.shape,z_slice.shape)
|
747 |
+
o = self.dec(z_slice, pitchf, g=g)
|
748 |
+
return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q)
|
749 |
+
|
750 |
+
def infer(self, phone, phone_lengths, pitch, nsff0, sid, rate=None):
|
751 |
+
g = self.emb_g(sid).unsqueeze(-1)
|
752 |
+
m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths)
|
753 |
+
z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask
|
754 |
+
if rate:
|
755 |
+
head = int(z_p.shape[2] * rate)
|
756 |
+
z_p = z_p[:, :, -head:]
|
757 |
+
x_mask = x_mask[:, :, -head:]
|
758 |
+
nsff0 = nsff0[:, -head:]
|
759 |
+
z = self.flow(z_p, x_mask, g=g, reverse=True)
|
760 |
+
o = self.dec(z * x_mask, nsff0, g=g)
|
761 |
+
return o, x_mask, (z, z_p, m_p, logs_p)
|
762 |
+
|
763 |
+
|
764 |
+
class SynthesizerTrnMs256NSFsid_nono(nn.Module):
|
765 |
+
def __init__(
|
766 |
+
self,
|
767 |
+
spec_channels,
|
768 |
+
segment_size,
|
769 |
+
inter_channels,
|
770 |
+
hidden_channels,
|
771 |
+
filter_channels,
|
772 |
+
n_heads,
|
773 |
+
n_layers,
|
774 |
+
kernel_size,
|
775 |
+
p_dropout,
|
776 |
+
resblock,
|
777 |
+
resblock_kernel_sizes,
|
778 |
+
resblock_dilation_sizes,
|
779 |
+
upsample_rates,
|
780 |
+
upsample_initial_channel,
|
781 |
+
upsample_kernel_sizes,
|
782 |
+
spk_embed_dim,
|
783 |
+
gin_channels,
|
784 |
+
sr=None,
|
785 |
+
**kwargs
|
786 |
+
):
|
787 |
+
super().__init__()
|
788 |
+
self.spec_channels = spec_channels
|
789 |
+
self.inter_channels = inter_channels
|
790 |
+
self.hidden_channels = hidden_channels
|
791 |
+
self.filter_channels = filter_channels
|
792 |
+
self.n_heads = n_heads
|
793 |
+
self.n_layers = n_layers
|
794 |
+
self.kernel_size = kernel_size
|
795 |
+
self.p_dropout = p_dropout
|
796 |
+
self.resblock = resblock
|
797 |
+
self.resblock_kernel_sizes = resblock_kernel_sizes
|
798 |
+
self.resblock_dilation_sizes = resblock_dilation_sizes
|
799 |
+
self.upsample_rates = upsample_rates
|
800 |
+
self.upsample_initial_channel = upsample_initial_channel
|
801 |
+
self.upsample_kernel_sizes = upsample_kernel_sizes
|
802 |
+
self.segment_size = segment_size
|
803 |
+
self.gin_channels = gin_channels
|
804 |
+
# self.hop_length = hop_length#
|
805 |
+
self.spk_embed_dim = spk_embed_dim
|
806 |
+
self.enc_p = TextEncoder256(
|
807 |
+
inter_channels,
|
808 |
+
hidden_channels,
|
809 |
+
filter_channels,
|
810 |
+
n_heads,
|
811 |
+
n_layers,
|
812 |
+
kernel_size,
|
813 |
+
p_dropout,
|
814 |
+
f0=False,
|
815 |
+
)
|
816 |
+
self.dec = Generator(
|
817 |
+
inter_channels,
|
818 |
+
resblock,
|
819 |
+
resblock_kernel_sizes,
|
820 |
+
resblock_dilation_sizes,
|
821 |
+
upsample_rates,
|
822 |
+
upsample_initial_channel,
|
823 |
+
upsample_kernel_sizes,
|
824 |
+
gin_channels=gin_channels,
|
825 |
+
)
|
826 |
+
self.enc_q = PosteriorEncoder(
|
827 |
+
spec_channels,
|
828 |
+
inter_channels,
|
829 |
+
hidden_channels,
|
830 |
+
5,
|
831 |
+
1,
|
832 |
+
16,
|
833 |
+
gin_channels=gin_channels,
|
834 |
+
)
|
835 |
+
self.flow = ResidualCouplingBlock(
|
836 |
+
inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels
|
837 |
+
)
|
838 |
+
self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels)
|
839 |
+
print("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim)
|
840 |
+
|
841 |
+
def remove_weight_norm(self):
|
842 |
+
self.dec.remove_weight_norm()
|
843 |
+
self.flow.remove_weight_norm()
|
844 |
+
self.enc_q.remove_weight_norm()
|
845 |
+
|
846 |
+
def forward(self, phone, phone_lengths, y, y_lengths, ds): # 这里ds是id,[bs,1]
|
847 |
+
g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的
|
848 |
+
m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths)
|
849 |
+
z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g)
|
850 |
+
z_p = self.flow(z, y_mask, g=g)
|
851 |
+
z_slice, ids_slice = commons.rand_slice_segments(
|
852 |
+
z, y_lengths, self.segment_size
|
853 |
+
)
|
854 |
+
o = self.dec(z_slice, g=g)
|
855 |
+
return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q)
|
856 |
+
|
857 |
+
def infer(self, phone, phone_lengths, sid, rate=None):
|
858 |
+
g = self.emb_g(sid).unsqueeze(-1)
|
859 |
+
m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths)
|
860 |
+
z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask
|
861 |
+
if rate:
|
862 |
+
head = int(z_p.shape[2] * rate)
|
863 |
+
z_p = z_p[:, :, -head:]
|
864 |
+
x_mask = x_mask[:, :, -head:]
|
865 |
+
z = self.flow(z_p, x_mask, g=g, reverse=True)
|
866 |
+
o = self.dec(z * x_mask, g=g)
|
867 |
+
return o, x_mask, (z, z_p, m_p, logs_p)
|
868 |
+
|
869 |
+
|
870 |
+
class SynthesizerTrnMs768NSFsid_nono(nn.Module):
|
871 |
+
def __init__(
|
872 |
+
self,
|
873 |
+
spec_channels,
|
874 |
+
segment_size,
|
875 |
+
inter_channels,
|
876 |
+
hidden_channels,
|
877 |
+
filter_channels,
|
878 |
+
n_heads,
|
879 |
+
n_layers,
|
880 |
+
kernel_size,
|
881 |
+
p_dropout,
|
882 |
+
resblock,
|
883 |
+
resblock_kernel_sizes,
|
884 |
+
resblock_dilation_sizes,
|
885 |
+
upsample_rates,
|
886 |
+
upsample_initial_channel,
|
887 |
+
upsample_kernel_sizes,
|
888 |
+
spk_embed_dim,
|
889 |
+
gin_channels,
|
890 |
+
sr=None,
|
891 |
+
**kwargs
|
892 |
+
):
|
893 |
+
super().__init__()
|
894 |
+
self.spec_channels = spec_channels
|
895 |
+
self.inter_channels = inter_channels
|
896 |
+
self.hidden_channels = hidden_channels
|
897 |
+
self.filter_channels = filter_channels
|
898 |
+
self.n_heads = n_heads
|
899 |
+
self.n_layers = n_layers
|
900 |
+
self.kernel_size = kernel_size
|
901 |
+
self.p_dropout = p_dropout
|
902 |
+
self.resblock = resblock
|
903 |
+
self.resblock_kernel_sizes = resblock_kernel_sizes
|
904 |
+
self.resblock_dilation_sizes = resblock_dilation_sizes
|
905 |
+
self.upsample_rates = upsample_rates
|
906 |
+
self.upsample_initial_channel = upsample_initial_channel
|
907 |
+
self.upsample_kernel_sizes = upsample_kernel_sizes
|
908 |
+
self.segment_size = segment_size
|
909 |
+
self.gin_channels = gin_channels
|
910 |
+
# self.hop_length = hop_length#
|
911 |
+
self.spk_embed_dim = spk_embed_dim
|
912 |
+
self.enc_p = TextEncoder768(
|
913 |
+
inter_channels,
|
914 |
+
hidden_channels,
|
915 |
+
filter_channels,
|
916 |
+
n_heads,
|
917 |
+
n_layers,
|
918 |
+
kernel_size,
|
919 |
+
p_dropout,
|
920 |
+
f0=False,
|
921 |
+
)
|
922 |
+
self.dec = Generator(
|
923 |
+
inter_channels,
|
924 |
+
resblock,
|
925 |
+
resblock_kernel_sizes,
|
926 |
+
resblock_dilation_sizes,
|
927 |
+
upsample_rates,
|
928 |
+
upsample_initial_channel,
|
929 |
+
upsample_kernel_sizes,
|
930 |
+
gin_channels=gin_channels,
|
931 |
+
)
|
932 |
+
self.enc_q = PosteriorEncoder(
|
933 |
+
spec_channels,
|
934 |
+
inter_channels,
|
935 |
+
hidden_channels,
|
936 |
+
5,
|
937 |
+
1,
|
938 |
+
16,
|
939 |
+
gin_channels=gin_channels,
|
940 |
+
)
|
941 |
+
self.flow = ResidualCouplingBlock(
|
942 |
+
inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels
|
943 |
+
)
|
944 |
+
self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels)
|
945 |
+
print("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim)
|
946 |
+
|
947 |
+
def remove_weight_norm(self):
|
948 |
+
self.dec.remove_weight_norm()
|
949 |
+
self.flow.remove_weight_norm()
|
950 |
+
self.enc_q.remove_weight_norm()
|
951 |
+
|
952 |
+
def forward(self, phone, phone_lengths, y, y_lengths, ds): # 这里ds是id,[bs,1]
|
953 |
+
g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的
|
954 |
+
m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths)
|
955 |
+
z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g)
|
956 |
+
z_p = self.flow(z, y_mask, g=g)
|
957 |
+
z_slice, ids_slice = commons.rand_slice_segments(
|
958 |
+
z, y_lengths, self.segment_size
|
959 |
+
)
|
960 |
+
o = self.dec(z_slice, g=g)
|
961 |
+
return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q)
|
962 |
+
|
963 |
+
def infer(self, phone, phone_lengths, sid, rate=None):
|
964 |
+
g = self.emb_g(sid).unsqueeze(-1)
|
965 |
+
m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths)
|
966 |
+
z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask
|
967 |
+
if rate:
|
968 |
+
head = int(z_p.shape[2] * rate)
|
969 |
+
z_p = z_p[:, :, -head:]
|
970 |
+
x_mask = x_mask[:, :, -head:]
|
971 |
+
z = self.flow(z_p, x_mask, g=g, reverse=True)
|
972 |
+
o = self.dec(z * x_mask, g=g)
|
973 |
+
return o, x_mask, (z, z_p, m_p, logs_p)
|
974 |
+
|
975 |
+
|
976 |
+
class MultiPeriodDiscriminator(torch.nn.Module):
|
977 |
+
def __init__(self, use_spectral_norm=False):
|
978 |
+
super(MultiPeriodDiscriminator, self).__init__()
|
979 |
+
periods = [2, 3, 5, 7, 11, 17]
|
980 |
+
# periods = [3, 5, 7, 11, 17, 23, 37]
|
981 |
+
|
982 |
+
discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)]
|
983 |
+
discs = discs + [
|
984 |
+
DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods
|
985 |
+
]
|
986 |
+
self.discriminators = nn.ModuleList(discs)
|
987 |
+
|
988 |
+
def forward(self, y, y_hat):
|
989 |
+
y_d_rs = [] #
|
990 |
+
y_d_gs = []
|
991 |
+
fmap_rs = []
|
992 |
+
fmap_gs = []
|
993 |
+
for i, d in enumerate(self.discriminators):
|
994 |
+
y_d_r, fmap_r = d(y)
|
995 |
+
y_d_g, fmap_g = d(y_hat)
|
996 |
+
# for j in range(len(fmap_r)):
|
997 |
+
# print(i,j,y.shape,y_hat.shape,fmap_r[j].shape,fmap_g[j].shape)
|
998 |
+
y_d_rs.append(y_d_r)
|
999 |
+
y_d_gs.append(y_d_g)
|
1000 |
+
fmap_rs.append(fmap_r)
|
1001 |
+
fmap_gs.append(fmap_g)
|
1002 |
+
|
1003 |
+
return y_d_rs, y_d_gs, fmap_rs, fmap_gs
|
1004 |
+
|
1005 |
+
|
1006 |
+
class MultiPeriodDiscriminatorV2(torch.nn.Module):
|
1007 |
+
def __init__(self, use_spectral_norm=False):
|
1008 |
+
super(MultiPeriodDiscriminatorV2, self).__init__()
|
1009 |
+
# periods = [2, 3, 5, 7, 11, 17]
|
1010 |
+
periods = [2, 3, 5, 7, 11, 17, 23, 37]
|
1011 |
+
|
1012 |
+
discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)]
|
1013 |
+
discs = discs + [
|
1014 |
+
DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods
|
1015 |
+
]
|
1016 |
+
self.discriminators = nn.ModuleList(discs)
|
1017 |
+
|
1018 |
+
def forward(self, y, y_hat):
|
1019 |
+
y_d_rs = [] #
|
1020 |
+
y_d_gs = []
|
1021 |
+
fmap_rs = []
|
1022 |
+
fmap_gs = []
|
1023 |
+
for i, d in enumerate(self.discriminators):
|
1024 |
+
y_d_r, fmap_r = d(y)
|
1025 |
+
y_d_g, fmap_g = d(y_hat)
|
1026 |
+
# for j in range(len(fmap_r)):
|
1027 |
+
# print(i,j,y.shape,y_hat.shape,fmap_r[j].shape,fmap_g[j].shape)
|
1028 |
+
y_d_rs.append(y_d_r)
|
1029 |
+
y_d_gs.append(y_d_g)
|
1030 |
+
fmap_rs.append(fmap_r)
|
1031 |
+
fmap_gs.append(fmap_g)
|
1032 |
+
|
1033 |
+
return y_d_rs, y_d_gs, fmap_rs, fmap_gs
|
1034 |
+
|
1035 |
+
|
1036 |
+
class DiscriminatorS(torch.nn.Module):
|
1037 |
+
def __init__(self, use_spectral_norm=False):
|
1038 |
+
super(DiscriminatorS, self).__init__()
|
1039 |
+
norm_f = weight_norm if use_spectral_norm == False else spectral_norm
|
1040 |
+
self.convs = nn.ModuleList(
|
1041 |
+
[
|
1042 |
+
norm_f(Conv1d(1, 16, 15, 1, padding=7)),
|
1043 |
+
norm_f(Conv1d(16, 64, 41, 4, groups=4, padding=20)),
|
1044 |
+
norm_f(Conv1d(64, 256, 41, 4, groups=16, padding=20)),
|
1045 |
+
norm_f(Conv1d(256, 1024, 41, 4, groups=64, padding=20)),
|
1046 |
+
norm_f(Conv1d(1024, 1024, 41, 4, groups=256, padding=20)),
|
1047 |
+
norm_f(Conv1d(1024, 1024, 5, 1, padding=2)),
|
1048 |
+
]
|
1049 |
+
)
|
1050 |
+
self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1))
|
1051 |
+
|
1052 |
+
def forward(self, x):
|
1053 |
+
fmap = []
|
1054 |
+
|
1055 |
+
for l in self.convs:
|
1056 |
+
x = l(x)
|
1057 |
+
x = F.leaky_relu(x, modules.LRELU_SLOPE)
|
1058 |
+
fmap.append(x)
|
1059 |
+
x = self.conv_post(x)
|
1060 |
+
fmap.append(x)
|
1061 |
+
x = torch.flatten(x, 1, -1)
|
1062 |
+
|
1063 |
+
return x, fmap
|
1064 |
+
|
1065 |
+
|
1066 |
+
class DiscriminatorP(torch.nn.Module):
|
1067 |
+
def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False):
|
1068 |
+
super(DiscriminatorP, self).__init__()
|
1069 |
+
self.period = period
|
1070 |
+
self.use_spectral_norm = use_spectral_norm
|
1071 |
+
norm_f = weight_norm if use_spectral_norm == False else spectral_norm
|
1072 |
+
self.convs = nn.ModuleList(
|
1073 |
+
[
|
1074 |
+
norm_f(
|
1075 |
+
Conv2d(
|
1076 |
+
1,
|
1077 |
+
32,
|
1078 |
+
(kernel_size, 1),
|
1079 |
+
(stride, 1),
|
1080 |
+
padding=(get_padding(kernel_size, 1), 0),
|
1081 |
+
)
|
1082 |
+
),
|
1083 |
+
norm_f(
|
1084 |
+
Conv2d(
|
1085 |
+
32,
|
1086 |
+
128,
|
1087 |
+
(kernel_size, 1),
|
1088 |
+
(stride, 1),
|
1089 |
+
padding=(get_padding(kernel_size, 1), 0),
|
1090 |
+
)
|
1091 |
+
),
|
1092 |
+
norm_f(
|
1093 |
+
Conv2d(
|
1094 |
+
128,
|
1095 |
+
512,
|
1096 |
+
(kernel_size, 1),
|
1097 |
+
(stride, 1),
|
1098 |
+
padding=(get_padding(kernel_size, 1), 0),
|
1099 |
+
)
|
1100 |
+
),
|
1101 |
+
norm_f(
|
1102 |
+
Conv2d(
|
1103 |
+
512,
|
1104 |
+
1024,
|
1105 |
+
(kernel_size, 1),
|
1106 |
+
(stride, 1),
|
1107 |
+
padding=(get_padding(kernel_size, 1), 0),
|
1108 |
+
)
|
1109 |
+
),
|
1110 |
+
norm_f(
|
1111 |
+
Conv2d(
|
1112 |
+
1024,
|
1113 |
+
1024,
|
1114 |
+
(kernel_size, 1),
|
1115 |
+
1,
|
1116 |
+
padding=(get_padding(kernel_size, 1), 0),
|
1117 |
+
)
|
1118 |
+
),
|
1119 |
+
]
|
1120 |
+
)
|
1121 |
+
self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0)))
|
1122 |
+
|
1123 |
+
def forward(self, x):
|
1124 |
+
fmap = []
|
1125 |
+
|
1126 |
+
# 1d to 2d
|
1127 |
+
b, c, t = x.shape
|
1128 |
+
if t % self.period != 0: # pad first
|
1129 |
+
n_pad = self.period - (t % self.period)
|
1130 |
+
x = F.pad(x, (0, n_pad), "reflect")
|
1131 |
+
t = t + n_pad
|
1132 |
+
x = x.view(b, c, t // self.period, self.period)
|
1133 |
+
|
1134 |
+
for l in self.convs:
|
1135 |
+
x = l(x)
|
1136 |
+
x = F.leaky_relu(x, modules.LRELU_SLOPE)
|
1137 |
+
fmap.append(x)
|
1138 |
+
x = self.conv_post(x)
|
1139 |
+
fmap.append(x)
|
1140 |
+
x = torch.flatten(x, 1, -1)
|
1141 |
+
|
1142 |
+
return x, fmap
|
lib/infer_pack/models_dml.py
ADDED
@@ -0,0 +1,1124 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import math, pdb, os
|
2 |
+
from time import time as ttime
|
3 |
+
import torch
|
4 |
+
from torch import nn
|
5 |
+
from torch.nn import functional as F
|
6 |
+
from lib.infer_pack import modules
|
7 |
+
from lib.infer_pack import attentions
|
8 |
+
from lib.infer_pack import commons
|
9 |
+
from lib.infer_pack.commons import init_weights, get_padding
|
10 |
+
from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d
|
11 |
+
from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm
|
12 |
+
from lib.infer_pack.commons import init_weights
|
13 |
+
import numpy as np
|
14 |
+
from lib.infer_pack import commons
|
15 |
+
|
16 |
+
|
17 |
+
class TextEncoder256(nn.Module):
|
18 |
+
def __init__(
|
19 |
+
self,
|
20 |
+
out_channels,
|
21 |
+
hidden_channels,
|
22 |
+
filter_channels,
|
23 |
+
n_heads,
|
24 |
+
n_layers,
|
25 |
+
kernel_size,
|
26 |
+
p_dropout,
|
27 |
+
f0=True,
|
28 |
+
):
|
29 |
+
super().__init__()
|
30 |
+
self.out_channels = out_channels
|
31 |
+
self.hidden_channels = hidden_channels
|
32 |
+
self.filter_channels = filter_channels
|
33 |
+
self.n_heads = n_heads
|
34 |
+
self.n_layers = n_layers
|
35 |
+
self.kernel_size = kernel_size
|
36 |
+
self.p_dropout = p_dropout
|
37 |
+
self.emb_phone = nn.Linear(256, hidden_channels)
|
38 |
+
self.lrelu = nn.LeakyReLU(0.1, inplace=True)
|
39 |
+
if f0 == True:
|
40 |
+
self.emb_pitch = nn.Embedding(256, hidden_channels) # pitch 256
|
41 |
+
self.encoder = attentions.Encoder(
|
42 |
+
hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout
|
43 |
+
)
|
44 |
+
self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
|
45 |
+
|
46 |
+
def forward(self, phone, pitch, lengths):
|
47 |
+
if pitch == None:
|
48 |
+
x = self.emb_phone(phone)
|
49 |
+
else:
|
50 |
+
x = self.emb_phone(phone) + self.emb_pitch(pitch)
|
51 |
+
x = x * math.sqrt(self.hidden_channels) # [b, t, h]
|
52 |
+
x = self.lrelu(x)
|
53 |
+
x = torch.transpose(x, 1, -1) # [b, h, t]
|
54 |
+
x_mask = torch.unsqueeze(commons.sequence_mask(lengths, x.size(2)), 1).to(
|
55 |
+
x.dtype
|
56 |
+
)
|
57 |
+
x = self.encoder(x * x_mask, x_mask)
|
58 |
+
stats = self.proj(x) * x_mask
|
59 |
+
|
60 |
+
m, logs = torch.split(stats, self.out_channels, dim=1)
|
61 |
+
return m, logs, x_mask
|
62 |
+
|
63 |
+
|
64 |
+
class TextEncoder768(nn.Module):
|
65 |
+
def __init__(
|
66 |
+
self,
|
67 |
+
out_channels,
|
68 |
+
hidden_channels,
|
69 |
+
filter_channels,
|
70 |
+
n_heads,
|
71 |
+
n_layers,
|
72 |
+
kernel_size,
|
73 |
+
p_dropout,
|
74 |
+
f0=True,
|
75 |
+
):
|
76 |
+
super().__init__()
|
77 |
+
self.out_channels = out_channels
|
78 |
+
self.hidden_channels = hidden_channels
|
79 |
+
self.filter_channels = filter_channels
|
80 |
+
self.n_heads = n_heads
|
81 |
+
self.n_layers = n_layers
|
82 |
+
self.kernel_size = kernel_size
|
83 |
+
self.p_dropout = p_dropout
|
84 |
+
self.emb_phone = nn.Linear(768, hidden_channels)
|
85 |
+
self.lrelu = nn.LeakyReLU(0.1, inplace=True)
|
86 |
+
if f0 == True:
|
87 |
+
self.emb_pitch = nn.Embedding(256, hidden_channels) # pitch 256
|
88 |
+
self.encoder = attentions.Encoder(
|
89 |
+
hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout
|
90 |
+
)
|
91 |
+
self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
|
92 |
+
|
93 |
+
def forward(self, phone, pitch, lengths):
|
94 |
+
if pitch == None:
|
95 |
+
x = self.emb_phone(phone)
|
96 |
+
else:
|
97 |
+
x = self.emb_phone(phone) + self.emb_pitch(pitch)
|
98 |
+
x = x * math.sqrt(self.hidden_channels) # [b, t, h]
|
99 |
+
x = self.lrelu(x)
|
100 |
+
x = torch.transpose(x, 1, -1) # [b, h, t]
|
101 |
+
x_mask = torch.unsqueeze(commons.sequence_mask(lengths, x.size(2)), 1).to(
|
102 |
+
x.dtype
|
103 |
+
)
|
104 |
+
x = self.encoder(x * x_mask, x_mask)
|
105 |
+
stats = self.proj(x) * x_mask
|
106 |
+
|
107 |
+
m, logs = torch.split(stats, self.out_channels, dim=1)
|
108 |
+
return m, logs, x_mask
|
109 |
+
|
110 |
+
|
111 |
+
class ResidualCouplingBlock(nn.Module):
|
112 |
+
def __init__(
|
113 |
+
self,
|
114 |
+
channels,
|
115 |
+
hidden_channels,
|
116 |
+
kernel_size,
|
117 |
+
dilation_rate,
|
118 |
+
n_layers,
|
119 |
+
n_flows=4,
|
120 |
+
gin_channels=0,
|
121 |
+
):
|
122 |
+
super().__init__()
|
123 |
+
self.channels = channels
|
124 |
+
self.hidden_channels = hidden_channels
|
125 |
+
self.kernel_size = kernel_size
|
126 |
+
self.dilation_rate = dilation_rate
|
127 |
+
self.n_layers = n_layers
|
128 |
+
self.n_flows = n_flows
|
129 |
+
self.gin_channels = gin_channels
|
130 |
+
|
131 |
+
self.flows = nn.ModuleList()
|
132 |
+
for i in range(n_flows):
|
133 |
+
self.flows.append(
|
134 |
+
modules.ResidualCouplingLayer(
|
135 |
+
channels,
|
136 |
+
hidden_channels,
|
137 |
+
kernel_size,
|
138 |
+
dilation_rate,
|
139 |
+
n_layers,
|
140 |
+
gin_channels=gin_channels,
|
141 |
+
mean_only=True,
|
142 |
+
)
|
143 |
+
)
|
144 |
+
self.flows.append(modules.Flip())
|
145 |
+
|
146 |
+
def forward(self, x, x_mask, g=None, reverse=False):
|
147 |
+
if not reverse:
|
148 |
+
for flow in self.flows:
|
149 |
+
x, _ = flow(x, x_mask, g=g, reverse=reverse)
|
150 |
+
else:
|
151 |
+
for flow in reversed(self.flows):
|
152 |
+
x = flow(x, x_mask, g=g, reverse=reverse)
|
153 |
+
return x
|
154 |
+
|
155 |
+
def remove_weight_norm(self):
|
156 |
+
for i in range(self.n_flows):
|
157 |
+
self.flows[i * 2].remove_weight_norm()
|
158 |
+
|
159 |
+
|
160 |
+
class PosteriorEncoder(nn.Module):
|
161 |
+
def __init__(
|
162 |
+
self,
|
163 |
+
in_channels,
|
164 |
+
out_channels,
|
165 |
+
hidden_channels,
|
166 |
+
kernel_size,
|
167 |
+
dilation_rate,
|
168 |
+
n_layers,
|
169 |
+
gin_channels=0,
|
170 |
+
):
|
171 |
+
super().__init__()
|
172 |
+
self.in_channels = in_channels
|
173 |
+
self.out_channels = out_channels
|
174 |
+
self.hidden_channels = hidden_channels
|
175 |
+
self.kernel_size = kernel_size
|
176 |
+
self.dilation_rate = dilation_rate
|
177 |
+
self.n_layers = n_layers
|
178 |
+
self.gin_channels = gin_channels
|
179 |
+
|
180 |
+
self.pre = nn.Conv1d(in_channels, hidden_channels, 1)
|
181 |
+
self.enc = modules.WN(
|
182 |
+
hidden_channels,
|
183 |
+
kernel_size,
|
184 |
+
dilation_rate,
|
185 |
+
n_layers,
|
186 |
+
gin_channels=gin_channels,
|
187 |
+
)
|
188 |
+
self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
|
189 |
+
|
190 |
+
def forward(self, x, x_lengths, g=None):
|
191 |
+
x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(
|
192 |
+
x.dtype
|
193 |
+
)
|
194 |
+
x = self.pre(x) * x_mask
|
195 |
+
x = self.enc(x, x_mask, g=g)
|
196 |
+
stats = self.proj(x) * x_mask
|
197 |
+
m, logs = torch.split(stats, self.out_channels, dim=1)
|
198 |
+
z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask
|
199 |
+
return z, m, logs, x_mask
|
200 |
+
|
201 |
+
def remove_weight_norm(self):
|
202 |
+
self.enc.remove_weight_norm()
|
203 |
+
|
204 |
+
|
205 |
+
class Generator(torch.nn.Module):
|
206 |
+
def __init__(
|
207 |
+
self,
|
208 |
+
initial_channel,
|
209 |
+
resblock,
|
210 |
+
resblock_kernel_sizes,
|
211 |
+
resblock_dilation_sizes,
|
212 |
+
upsample_rates,
|
213 |
+
upsample_initial_channel,
|
214 |
+
upsample_kernel_sizes,
|
215 |
+
gin_channels=0,
|
216 |
+
):
|
217 |
+
super(Generator, self).__init__()
|
218 |
+
self.num_kernels = len(resblock_kernel_sizes)
|
219 |
+
self.num_upsamples = len(upsample_rates)
|
220 |
+
self.conv_pre = Conv1d(
|
221 |
+
initial_channel, upsample_initial_channel, 7, 1, padding=3
|
222 |
+
)
|
223 |
+
resblock = modules.ResBlock1 if resblock == "1" else modules.ResBlock2
|
224 |
+
|
225 |
+
self.ups = nn.ModuleList()
|
226 |
+
for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)):
|
227 |
+
self.ups.append(
|
228 |
+
weight_norm(
|
229 |
+
ConvTranspose1d(
|
230 |
+
upsample_initial_channel // (2**i),
|
231 |
+
upsample_initial_channel // (2 ** (i + 1)),
|
232 |
+
k,
|
233 |
+
u,
|
234 |
+
padding=(k - u) // 2,
|
235 |
+
)
|
236 |
+
)
|
237 |
+
)
|
238 |
+
|
239 |
+
self.resblocks = nn.ModuleList()
|
240 |
+
for i in range(len(self.ups)):
|
241 |
+
ch = upsample_initial_channel // (2 ** (i + 1))
|
242 |
+
for j, (k, d) in enumerate(
|
243 |
+
zip(resblock_kernel_sizes, resblock_dilation_sizes)
|
244 |
+
):
|
245 |
+
self.resblocks.append(resblock(ch, k, d))
|
246 |
+
|
247 |
+
self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False)
|
248 |
+
self.ups.apply(init_weights)
|
249 |
+
|
250 |
+
if gin_channels != 0:
|
251 |
+
self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1)
|
252 |
+
|
253 |
+
def forward(self, x, g=None):
|
254 |
+
x = self.conv_pre(x)
|
255 |
+
if g is not None:
|
256 |
+
x = x + self.cond(g)
|
257 |
+
|
258 |
+
for i in range(self.num_upsamples):
|
259 |
+
x = F.leaky_relu(x, modules.LRELU_SLOPE)
|
260 |
+
x = self.ups[i](x)
|
261 |
+
xs = None
|
262 |
+
for j in range(self.num_kernels):
|
263 |
+
if xs is None:
|
264 |
+
xs = self.resblocks[i * self.num_kernels + j](x)
|
265 |
+
else:
|
266 |
+
xs += self.resblocks[i * self.num_kernels + j](x)
|
267 |
+
x = xs / self.num_kernels
|
268 |
+
x = F.leaky_relu(x)
|
269 |
+
x = self.conv_post(x)
|
270 |
+
x = torch.tanh(x)
|
271 |
+
|
272 |
+
return x
|
273 |
+
|
274 |
+
def remove_weight_norm(self):
|
275 |
+
for l in self.ups:
|
276 |
+
remove_weight_norm(l)
|
277 |
+
for l in self.resblocks:
|
278 |
+
l.remove_weight_norm()
|
279 |
+
|
280 |
+
|
281 |
+
class SineGen(torch.nn.Module):
|
282 |
+
"""Definition of sine generator
|
283 |
+
SineGen(samp_rate, harmonic_num = 0,
|
284 |
+
sine_amp = 0.1, noise_std = 0.003,
|
285 |
+
voiced_threshold = 0,
|
286 |
+
flag_for_pulse=False)
|
287 |
+
samp_rate: sampling rate in Hz
|
288 |
+
harmonic_num: number of harmonic overtones (default 0)
|
289 |
+
sine_amp: amplitude of sine-wavefrom (default 0.1)
|
290 |
+
noise_std: std of Gaussian noise (default 0.003)
|
291 |
+
voiced_thoreshold: F0 threshold for U/V classification (default 0)
|
292 |
+
flag_for_pulse: this SinGen is used inside PulseGen (default False)
|
293 |
+
Note: when flag_for_pulse is True, the first time step of a voiced
|
294 |
+
segment is always sin(np.pi) or cos(0)
|
295 |
+
"""
|
296 |
+
|
297 |
+
def __init__(
|
298 |
+
self,
|
299 |
+
samp_rate,
|
300 |
+
harmonic_num=0,
|
301 |
+
sine_amp=0.1,
|
302 |
+
noise_std=0.003,
|
303 |
+
voiced_threshold=0,
|
304 |
+
flag_for_pulse=False,
|
305 |
+
):
|
306 |
+
super(SineGen, self).__init__()
|
307 |
+
self.sine_amp = sine_amp
|
308 |
+
self.noise_std = noise_std
|
309 |
+
self.harmonic_num = harmonic_num
|
310 |
+
self.dim = self.harmonic_num + 1
|
311 |
+
self.sampling_rate = samp_rate
|
312 |
+
self.voiced_threshold = voiced_threshold
|
313 |
+
|
314 |
+
def _f02uv(self, f0):
|
315 |
+
# generate uv signal
|
316 |
+
uv = torch.ones_like(f0)
|
317 |
+
uv = uv * (f0 > self.voiced_threshold)
|
318 |
+
return uv.float()
|
319 |
+
|
320 |
+
def forward(self, f0, upp):
|
321 |
+
"""sine_tensor, uv = forward(f0)
|
322 |
+
input F0: tensor(batchsize=1, length, dim=1)
|
323 |
+
f0 for unvoiced steps should be 0
|
324 |
+
output sine_tensor: tensor(batchsize=1, length, dim)
|
325 |
+
output uv: tensor(batchsize=1, length, 1)
|
326 |
+
"""
|
327 |
+
with torch.no_grad():
|
328 |
+
f0 = f0[:, None].transpose(1, 2)
|
329 |
+
f0_buf = torch.zeros(f0.shape[0], f0.shape[1], self.dim, device=f0.device)
|
330 |
+
# fundamental component
|
331 |
+
f0_buf[:, :, 0] = f0[:, :, 0]
|
332 |
+
for idx in np.arange(self.harmonic_num):
|
333 |
+
f0_buf[:, :, idx + 1] = f0_buf[:, :, 0] * (
|
334 |
+
idx + 2
|
335 |
+
) # idx + 2: the (idx+1)-th overtone, (idx+2)-th harmonic
|
336 |
+
rad_values = (f0_buf / self.sampling_rate) % 1 ###%1意味着n_har的乘积无法后处理优化
|
337 |
+
rand_ini = torch.rand(
|
338 |
+
f0_buf.shape[0], f0_buf.shape[2], device=f0_buf.device
|
339 |
+
)
|
340 |
+
rand_ini[:, 0] = 0
|
341 |
+
rad_values[:, 0, :] = rad_values[:, 0, :] + rand_ini
|
342 |
+
tmp_over_one = torch.cumsum(rad_values, 1) # % 1 #####%1意味着后面的cumsum无法再优化
|
343 |
+
tmp_over_one *= upp
|
344 |
+
tmp_over_one = F.interpolate(
|
345 |
+
tmp_over_one.transpose(2, 1),
|
346 |
+
scale_factor=upp,
|
347 |
+
mode="linear",
|
348 |
+
align_corners=True,
|
349 |
+
).transpose(2, 1)
|
350 |
+
rad_values = F.interpolate(
|
351 |
+
rad_values.transpose(2, 1), scale_factor=upp, mode="nearest"
|
352 |
+
).transpose(
|
353 |
+
2, 1
|
354 |
+
) #######
|
355 |
+
tmp_over_one %= 1
|
356 |
+
tmp_over_one_idx = (tmp_over_one[:, 1:, :] - tmp_over_one[:, :-1, :]) < 0
|
357 |
+
cumsum_shift = torch.zeros_like(rad_values)
|
358 |
+
cumsum_shift[:, 1:, :] = tmp_over_one_idx * -1.0
|
359 |
+
sine_waves = torch.sin(
|
360 |
+
torch.cumsum(rad_values + cumsum_shift, dim=1) * 2 * np.pi
|
361 |
+
)
|
362 |
+
sine_waves = sine_waves * self.sine_amp
|
363 |
+
uv = self._f02uv(f0)
|
364 |
+
uv = F.interpolate(
|
365 |
+
uv.transpose(2, 1), scale_factor=upp, mode="nearest"
|
366 |
+
).transpose(2, 1)
|
367 |
+
noise_amp = uv * self.noise_std + (1 - uv) * self.sine_amp / 3
|
368 |
+
noise = noise_amp * torch.randn_like(sine_waves)
|
369 |
+
sine_waves = sine_waves * uv + noise
|
370 |
+
return sine_waves, uv, noise
|
371 |
+
|
372 |
+
|
373 |
+
class SourceModuleHnNSF(torch.nn.Module):
|
374 |
+
"""SourceModule for hn-nsf
|
375 |
+
SourceModule(sampling_rate, harmonic_num=0, sine_amp=0.1,
|
376 |
+
add_noise_std=0.003, voiced_threshod=0)
|
377 |
+
sampling_rate: sampling_rate in Hz
|
378 |
+
harmonic_num: number of harmonic above F0 (default: 0)
|
379 |
+
sine_amp: amplitude of sine source signal (default: 0.1)
|
380 |
+
add_noise_std: std of additive Gaussian noise (default: 0.003)
|
381 |
+
note that amplitude of noise in unvoiced is decided
|
382 |
+
by sine_amp
|
383 |
+
voiced_threshold: threhold to set U/V given F0 (default: 0)
|
384 |
+
Sine_source, noise_source = SourceModuleHnNSF(F0_sampled)
|
385 |
+
F0_sampled (batchsize, length, 1)
|
386 |
+
Sine_source (batchsize, length, 1)
|
387 |
+
noise_source (batchsize, length 1)
|
388 |
+
uv (batchsize, length, 1)
|
389 |
+
"""
|
390 |
+
|
391 |
+
def __init__(
|
392 |
+
self,
|
393 |
+
sampling_rate,
|
394 |
+
harmonic_num=0,
|
395 |
+
sine_amp=0.1,
|
396 |
+
add_noise_std=0.003,
|
397 |
+
voiced_threshod=0,
|
398 |
+
is_half=True,
|
399 |
+
):
|
400 |
+
super(SourceModuleHnNSF, self).__init__()
|
401 |
+
|
402 |
+
self.sine_amp = sine_amp
|
403 |
+
self.noise_std = add_noise_std
|
404 |
+
self.is_half = is_half
|
405 |
+
# to produce sine waveforms
|
406 |
+
self.l_sin_gen = SineGen(
|
407 |
+
sampling_rate, harmonic_num, sine_amp, add_noise_std, voiced_threshod
|
408 |
+
)
|
409 |
+
|
410 |
+
# to merge source harmonics into a single excitation
|
411 |
+
self.l_linear = torch.nn.Linear(harmonic_num + 1, 1)
|
412 |
+
self.l_tanh = torch.nn.Tanh()
|
413 |
+
|
414 |
+
def forward(self, x, upp=None):
|
415 |
+
sine_wavs, uv, _ = self.l_sin_gen(x, upp)
|
416 |
+
if self.is_half:
|
417 |
+
sine_wavs = sine_wavs.half()
|
418 |
+
sine_merge = self.l_tanh(self.l_linear(sine_wavs))
|
419 |
+
return sine_merge, None, None # noise, uv
|
420 |
+
|
421 |
+
|
422 |
+
class GeneratorNSF(torch.nn.Module):
|
423 |
+
def __init__(
|
424 |
+
self,
|
425 |
+
initial_channel,
|
426 |
+
resblock,
|
427 |
+
resblock_kernel_sizes,
|
428 |
+
resblock_dilation_sizes,
|
429 |
+
upsample_rates,
|
430 |
+
upsample_initial_channel,
|
431 |
+
upsample_kernel_sizes,
|
432 |
+
gin_channels,
|
433 |
+
sr,
|
434 |
+
is_half=False,
|
435 |
+
):
|
436 |
+
super(GeneratorNSF, self).__init__()
|
437 |
+
self.num_kernels = len(resblock_kernel_sizes)
|
438 |
+
self.num_upsamples = len(upsample_rates)
|
439 |
+
|
440 |
+
self.f0_upsamp = torch.nn.Upsample(scale_factor=np.prod(upsample_rates))
|
441 |
+
self.m_source = SourceModuleHnNSF(
|
442 |
+
sampling_rate=sr, harmonic_num=0, is_half=is_half
|
443 |
+
)
|
444 |
+
self.noise_convs = nn.ModuleList()
|
445 |
+
self.conv_pre = Conv1d(
|
446 |
+
initial_channel, upsample_initial_channel, 7, 1, padding=3
|
447 |
+
)
|
448 |
+
resblock = modules.ResBlock1 if resblock == "1" else modules.ResBlock2
|
449 |
+
|
450 |
+
self.ups = nn.ModuleList()
|
451 |
+
for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)):
|
452 |
+
c_cur = upsample_initial_channel // (2 ** (i + 1))
|
453 |
+
self.ups.append(
|
454 |
+
weight_norm(
|
455 |
+
ConvTranspose1d(
|
456 |
+
upsample_initial_channel // (2**i),
|
457 |
+
upsample_initial_channel // (2 ** (i + 1)),
|
458 |
+
k,
|
459 |
+
u,
|
460 |
+
padding=(k - u) // 2,
|
461 |
+
)
|
462 |
+
)
|
463 |
+
)
|
464 |
+
if i + 1 < len(upsample_rates):
|
465 |
+
stride_f0 = np.prod(upsample_rates[i + 1 :])
|
466 |
+
self.noise_convs.append(
|
467 |
+
Conv1d(
|
468 |
+
1,
|
469 |
+
c_cur,
|
470 |
+
kernel_size=stride_f0 * 2,
|
471 |
+
stride=stride_f0,
|
472 |
+
padding=stride_f0 // 2,
|
473 |
+
)
|
474 |
+
)
|
475 |
+
else:
|
476 |
+
self.noise_convs.append(Conv1d(1, c_cur, kernel_size=1))
|
477 |
+
|
478 |
+
self.resblocks = nn.ModuleList()
|
479 |
+
for i in range(len(self.ups)):
|
480 |
+
ch = upsample_initial_channel // (2 ** (i + 1))
|
481 |
+
for j, (k, d) in enumerate(
|
482 |
+
zip(resblock_kernel_sizes, resblock_dilation_sizes)
|
483 |
+
):
|
484 |
+
self.resblocks.append(resblock(ch, k, d))
|
485 |
+
|
486 |
+
self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False)
|
487 |
+
self.ups.apply(init_weights)
|
488 |
+
|
489 |
+
if gin_channels != 0:
|
490 |
+
self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1)
|
491 |
+
|
492 |
+
self.upp = np.prod(upsample_rates)
|
493 |
+
|
494 |
+
def forward(self, x, f0, g=None):
|
495 |
+
har_source, noi_source, uv = self.m_source(f0, self.upp)
|
496 |
+
har_source = har_source.transpose(1, 2)
|
497 |
+
x = self.conv_pre(x)
|
498 |
+
if g is not None:
|
499 |
+
x = x + self.cond(g)
|
500 |
+
|
501 |
+
for i in range(self.num_upsamples):
|
502 |
+
x = F.leaky_relu(x, modules.LRELU_SLOPE)
|
503 |
+
x = self.ups[i](x)
|
504 |
+
x_source = self.noise_convs[i](har_source)
|
505 |
+
x = x + x_source
|
506 |
+
xs = None
|
507 |
+
for j in range(self.num_kernels):
|
508 |
+
if xs is None:
|
509 |
+
xs = self.resblocks[i * self.num_kernels + j](x)
|
510 |
+
else:
|
511 |
+
xs += self.resblocks[i * self.num_kernels + j](x)
|
512 |
+
x = xs / self.num_kernels
|
513 |
+
x = F.leaky_relu(x)
|
514 |
+
x = self.conv_post(x)
|
515 |
+
x = torch.tanh(x)
|
516 |
+
return x
|
517 |
+
|
518 |
+
def remove_weight_norm(self):
|
519 |
+
for l in self.ups:
|
520 |
+
remove_weight_norm(l)
|
521 |
+
for l in self.resblocks:
|
522 |
+
l.remove_weight_norm()
|
523 |
+
|
524 |
+
|
525 |
+
sr2sr = {
|
526 |
+
"32k": 32000,
|
527 |
+
"40k": 40000,
|
528 |
+
"48k": 48000,
|
529 |
+
}
|
530 |
+
|
531 |
+
|
532 |
+
class SynthesizerTrnMs256NSFsid(nn.Module):
|
533 |
+
def __init__(
|
534 |
+
self,
|
535 |
+
spec_channels,
|
536 |
+
segment_size,
|
537 |
+
inter_channels,
|
538 |
+
hidden_channels,
|
539 |
+
filter_channels,
|
540 |
+
n_heads,
|
541 |
+
n_layers,
|
542 |
+
kernel_size,
|
543 |
+
p_dropout,
|
544 |
+
resblock,
|
545 |
+
resblock_kernel_sizes,
|
546 |
+
resblock_dilation_sizes,
|
547 |
+
upsample_rates,
|
548 |
+
upsample_initial_channel,
|
549 |
+
upsample_kernel_sizes,
|
550 |
+
spk_embed_dim,
|
551 |
+
gin_channels,
|
552 |
+
sr,
|
553 |
+
**kwargs
|
554 |
+
):
|
555 |
+
super().__init__()
|
556 |
+
if type(sr) == type("strr"):
|
557 |
+
sr = sr2sr[sr]
|
558 |
+
self.spec_channels = spec_channels
|
559 |
+
self.inter_channels = inter_channels
|
560 |
+
self.hidden_channels = hidden_channels
|
561 |
+
self.filter_channels = filter_channels
|
562 |
+
self.n_heads = n_heads
|
563 |
+
self.n_layers = n_layers
|
564 |
+
self.kernel_size = kernel_size
|
565 |
+
self.p_dropout = p_dropout
|
566 |
+
self.resblock = resblock
|
567 |
+
self.resblock_kernel_sizes = resblock_kernel_sizes
|
568 |
+
self.resblock_dilation_sizes = resblock_dilation_sizes
|
569 |
+
self.upsample_rates = upsample_rates
|
570 |
+
self.upsample_initial_channel = upsample_initial_channel
|
571 |
+
self.upsample_kernel_sizes = upsample_kernel_sizes
|
572 |
+
self.segment_size = segment_size
|
573 |
+
self.gin_channels = gin_channels
|
574 |
+
# self.hop_length = hop_length#
|
575 |
+
self.spk_embed_dim = spk_embed_dim
|
576 |
+
self.enc_p = TextEncoder256(
|
577 |
+
inter_channels,
|
578 |
+
hidden_channels,
|
579 |
+
filter_channels,
|
580 |
+
n_heads,
|
581 |
+
n_layers,
|
582 |
+
kernel_size,
|
583 |
+
p_dropout,
|
584 |
+
)
|
585 |
+
self.dec = GeneratorNSF(
|
586 |
+
inter_channels,
|
587 |
+
resblock,
|
588 |
+
resblock_kernel_sizes,
|
589 |
+
resblock_dilation_sizes,
|
590 |
+
upsample_rates,
|
591 |
+
upsample_initial_channel,
|
592 |
+
upsample_kernel_sizes,
|
593 |
+
gin_channels=gin_channels,
|
594 |
+
sr=sr,
|
595 |
+
is_half=kwargs["is_half"],
|
596 |
+
)
|
597 |
+
self.enc_q = PosteriorEncoder(
|
598 |
+
spec_channels,
|
599 |
+
inter_channels,
|
600 |
+
hidden_channels,
|
601 |
+
5,
|
602 |
+
1,
|
603 |
+
16,
|
604 |
+
gin_channels=gin_channels,
|
605 |
+
)
|
606 |
+
self.flow = ResidualCouplingBlock(
|
607 |
+
inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels
|
608 |
+
)
|
609 |
+
self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels)
|
610 |
+
print("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim)
|
611 |
+
|
612 |
+
def remove_weight_norm(self):
|
613 |
+
self.dec.remove_weight_norm()
|
614 |
+
self.flow.remove_weight_norm()
|
615 |
+
self.enc_q.remove_weight_norm()
|
616 |
+
|
617 |
+
def forward(
|
618 |
+
self, phone, phone_lengths, pitch, pitchf, y, y_lengths, ds
|
619 |
+
): # 这里ds是id,[bs,1]
|
620 |
+
# print(1,pitch.shape)#[bs,t]
|
621 |
+
g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的
|
622 |
+
m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths)
|
623 |
+
z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g)
|
624 |
+
z_p = self.flow(z, y_mask, g=g)
|
625 |
+
z_slice, ids_slice = commons.rand_slice_segments(
|
626 |
+
z, y_lengths, self.segment_size
|
627 |
+
)
|
628 |
+
# print(-1,pitchf.shape,ids_slice,self.segment_size,self.hop_length,self.segment_size//self.hop_length)
|
629 |
+
pitchf = commons.slice_segments2(pitchf, ids_slice, self.segment_size)
|
630 |
+
# print(-2,pitchf.shape,z_slice.shape)
|
631 |
+
o = self.dec(z_slice, pitchf, g=g)
|
632 |
+
return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q)
|
633 |
+
|
634 |
+
def infer(self, phone, phone_lengths, pitch, nsff0, sid, max_len=None):
|
635 |
+
g = self.emb_g(sid).unsqueeze(-1)
|
636 |
+
m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths)
|
637 |
+
z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask
|
638 |
+
z = self.flow(z_p, x_mask, g=g, reverse=True)
|
639 |
+
o = self.dec((z * x_mask)[:, :, :max_len], nsff0, g=g)
|
640 |
+
return o, x_mask, (z, z_p, m_p, logs_p)
|
641 |
+
|
642 |
+
|
643 |
+
class SynthesizerTrnMs768NSFsid(nn.Module):
|
644 |
+
def __init__(
|
645 |
+
self,
|
646 |
+
spec_channels,
|
647 |
+
segment_size,
|
648 |
+
inter_channels,
|
649 |
+
hidden_channels,
|
650 |
+
filter_channels,
|
651 |
+
n_heads,
|
652 |
+
n_layers,
|
653 |
+
kernel_size,
|
654 |
+
p_dropout,
|
655 |
+
resblock,
|
656 |
+
resblock_kernel_sizes,
|
657 |
+
resblock_dilation_sizes,
|
658 |
+
upsample_rates,
|
659 |
+
upsample_initial_channel,
|
660 |
+
upsample_kernel_sizes,
|
661 |
+
spk_embed_dim,
|
662 |
+
gin_channels,
|
663 |
+
sr,
|
664 |
+
**kwargs
|
665 |
+
):
|
666 |
+
super().__init__()
|
667 |
+
if type(sr) == type("strr"):
|
668 |
+
sr = sr2sr[sr]
|
669 |
+
self.spec_channels = spec_channels
|
670 |
+
self.inter_channels = inter_channels
|
671 |
+
self.hidden_channels = hidden_channels
|
672 |
+
self.filter_channels = filter_channels
|
673 |
+
self.n_heads = n_heads
|
674 |
+
self.n_layers = n_layers
|
675 |
+
self.kernel_size = kernel_size
|
676 |
+
self.p_dropout = p_dropout
|
677 |
+
self.resblock = resblock
|
678 |
+
self.resblock_kernel_sizes = resblock_kernel_sizes
|
679 |
+
self.resblock_dilation_sizes = resblock_dilation_sizes
|
680 |
+
self.upsample_rates = upsample_rates
|
681 |
+
self.upsample_initial_channel = upsample_initial_channel
|
682 |
+
self.upsample_kernel_sizes = upsample_kernel_sizes
|
683 |
+
self.segment_size = segment_size
|
684 |
+
self.gin_channels = gin_channels
|
685 |
+
# self.hop_length = hop_length#
|
686 |
+
self.spk_embed_dim = spk_embed_dim
|
687 |
+
self.enc_p = TextEncoder768(
|
688 |
+
inter_channels,
|
689 |
+
hidden_channels,
|
690 |
+
filter_channels,
|
691 |
+
n_heads,
|
692 |
+
n_layers,
|
693 |
+
kernel_size,
|
694 |
+
p_dropout,
|
695 |
+
)
|
696 |
+
self.dec = GeneratorNSF(
|
697 |
+
inter_channels,
|
698 |
+
resblock,
|
699 |
+
resblock_kernel_sizes,
|
700 |
+
resblock_dilation_sizes,
|
701 |
+
upsample_rates,
|
702 |
+
upsample_initial_channel,
|
703 |
+
upsample_kernel_sizes,
|
704 |
+
gin_channels=gin_channels,
|
705 |
+
sr=sr,
|
706 |
+
is_half=kwargs["is_half"],
|
707 |
+
)
|
708 |
+
self.enc_q = PosteriorEncoder(
|
709 |
+
spec_channels,
|
710 |
+
inter_channels,
|
711 |
+
hidden_channels,
|
712 |
+
5,
|
713 |
+
1,
|
714 |
+
16,
|
715 |
+
gin_channels=gin_channels,
|
716 |
+
)
|
717 |
+
self.flow = ResidualCouplingBlock(
|
718 |
+
inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels
|
719 |
+
)
|
720 |
+
self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels)
|
721 |
+
print("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim)
|
722 |
+
|
723 |
+
def remove_weight_norm(self):
|
724 |
+
self.dec.remove_weight_norm()
|
725 |
+
self.flow.remove_weight_norm()
|
726 |
+
self.enc_q.remove_weight_norm()
|
727 |
+
|
728 |
+
def forward(
|
729 |
+
self, phone, phone_lengths, pitch, pitchf, y, y_lengths, ds
|
730 |
+
): # 这里ds是id,[bs,1]
|
731 |
+
# print(1,pitch.shape)#[bs,t]
|
732 |
+
g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的
|
733 |
+
m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths)
|
734 |
+
z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g)
|
735 |
+
z_p = self.flow(z, y_mask, g=g)
|
736 |
+
z_slice, ids_slice = commons.rand_slice_segments(
|
737 |
+
z, y_lengths, self.segment_size
|
738 |
+
)
|
739 |
+
# print(-1,pitchf.shape,ids_slice,self.segment_size,self.hop_length,self.segment_size//self.hop_length)
|
740 |
+
pitchf = commons.slice_segments2(pitchf, ids_slice, self.segment_size)
|
741 |
+
# print(-2,pitchf.shape,z_slice.shape)
|
742 |
+
o = self.dec(z_slice, pitchf, g=g)
|
743 |
+
return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q)
|
744 |
+
|
745 |
+
def infer(self, phone, phone_lengths, pitch, nsff0, sid, max_len=None):
|
746 |
+
g = self.emb_g(sid).unsqueeze(-1)
|
747 |
+
m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths)
|
748 |
+
z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask
|
749 |
+
z = self.flow(z_p, x_mask, g=g, reverse=True)
|
750 |
+
o = self.dec((z * x_mask)[:, :, :max_len], nsff0, g=g)
|
751 |
+
return o, x_mask, (z, z_p, m_p, logs_p)
|
752 |
+
|
753 |
+
|
754 |
+
class SynthesizerTrnMs256NSFsid_nono(nn.Module):
|
755 |
+
def __init__(
|
756 |
+
self,
|
757 |
+
spec_channels,
|
758 |
+
segment_size,
|
759 |
+
inter_channels,
|
760 |
+
hidden_channels,
|
761 |
+
filter_channels,
|
762 |
+
n_heads,
|
763 |
+
n_layers,
|
764 |
+
kernel_size,
|
765 |
+
p_dropout,
|
766 |
+
resblock,
|
767 |
+
resblock_kernel_sizes,
|
768 |
+
resblock_dilation_sizes,
|
769 |
+
upsample_rates,
|
770 |
+
upsample_initial_channel,
|
771 |
+
upsample_kernel_sizes,
|
772 |
+
spk_embed_dim,
|
773 |
+
gin_channels,
|
774 |
+
sr=None,
|
775 |
+
**kwargs
|
776 |
+
):
|
777 |
+
super().__init__()
|
778 |
+
self.spec_channels = spec_channels
|
779 |
+
self.inter_channels = inter_channels
|
780 |
+
self.hidden_channels = hidden_channels
|
781 |
+
self.filter_channels = filter_channels
|
782 |
+
self.n_heads = n_heads
|
783 |
+
self.n_layers = n_layers
|
784 |
+
self.kernel_size = kernel_size
|
785 |
+
self.p_dropout = p_dropout
|
786 |
+
self.resblock = resblock
|
787 |
+
self.resblock_kernel_sizes = resblock_kernel_sizes
|
788 |
+
self.resblock_dilation_sizes = resblock_dilation_sizes
|
789 |
+
self.upsample_rates = upsample_rates
|
790 |
+
self.upsample_initial_channel = upsample_initial_channel
|
791 |
+
self.upsample_kernel_sizes = upsample_kernel_sizes
|
792 |
+
self.segment_size = segment_size
|
793 |
+
self.gin_channels = gin_channels
|
794 |
+
# self.hop_length = hop_length#
|
795 |
+
self.spk_embed_dim = spk_embed_dim
|
796 |
+
self.enc_p = TextEncoder256(
|
797 |
+
inter_channels,
|
798 |
+
hidden_channels,
|
799 |
+
filter_channels,
|
800 |
+
n_heads,
|
801 |
+
n_layers,
|
802 |
+
kernel_size,
|
803 |
+
p_dropout,
|
804 |
+
f0=False,
|
805 |
+
)
|
806 |
+
self.dec = Generator(
|
807 |
+
inter_channels,
|
808 |
+
resblock,
|
809 |
+
resblock_kernel_sizes,
|
810 |
+
resblock_dilation_sizes,
|
811 |
+
upsample_rates,
|
812 |
+
upsample_initial_channel,
|
813 |
+
upsample_kernel_sizes,
|
814 |
+
gin_channels=gin_channels,
|
815 |
+
)
|
816 |
+
self.enc_q = PosteriorEncoder(
|
817 |
+
spec_channels,
|
818 |
+
inter_channels,
|
819 |
+
hidden_channels,
|
820 |
+
5,
|
821 |
+
1,
|
822 |
+
16,
|
823 |
+
gin_channels=gin_channels,
|
824 |
+
)
|
825 |
+
self.flow = ResidualCouplingBlock(
|
826 |
+
inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels
|
827 |
+
)
|
828 |
+
self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels)
|
829 |
+
print("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim)
|
830 |
+
|
831 |
+
def remove_weight_norm(self):
|
832 |
+
self.dec.remove_weight_norm()
|
833 |
+
self.flow.remove_weight_norm()
|
834 |
+
self.enc_q.remove_weight_norm()
|
835 |
+
|
836 |
+
def forward(self, phone, phone_lengths, y, y_lengths, ds): # 这里ds是id,[bs,1]
|
837 |
+
g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的
|
838 |
+
m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths)
|
839 |
+
z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g)
|
840 |
+
z_p = self.flow(z, y_mask, g=g)
|
841 |
+
z_slice, ids_slice = commons.rand_slice_segments(
|
842 |
+
z, y_lengths, self.segment_size
|
843 |
+
)
|
844 |
+
o = self.dec(z_slice, g=g)
|
845 |
+
return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q)
|
846 |
+
|
847 |
+
def infer(self, phone, phone_lengths, sid, max_len=None):
|
848 |
+
g = self.emb_g(sid).unsqueeze(-1)
|
849 |
+
m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths)
|
850 |
+
z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask
|
851 |
+
z = self.flow(z_p, x_mask, g=g, reverse=True)
|
852 |
+
o = self.dec((z * x_mask)[:, :, :max_len], g=g)
|
853 |
+
return o, x_mask, (z, z_p, m_p, logs_p)
|
854 |
+
|
855 |
+
|
856 |
+
class SynthesizerTrnMs768NSFsid_nono(nn.Module):
|
857 |
+
def __init__(
|
858 |
+
self,
|
859 |
+
spec_channels,
|
860 |
+
segment_size,
|
861 |
+
inter_channels,
|
862 |
+
hidden_channels,
|
863 |
+
filter_channels,
|
864 |
+
n_heads,
|
865 |
+
n_layers,
|
866 |
+
kernel_size,
|
867 |
+
p_dropout,
|
868 |
+
resblock,
|
869 |
+
resblock_kernel_sizes,
|
870 |
+
resblock_dilation_sizes,
|
871 |
+
upsample_rates,
|
872 |
+
upsample_initial_channel,
|
873 |
+
upsample_kernel_sizes,
|
874 |
+
spk_embed_dim,
|
875 |
+
gin_channels,
|
876 |
+
sr=None,
|
877 |
+
**kwargs
|
878 |
+
):
|
879 |
+
super().__init__()
|
880 |
+
self.spec_channels = spec_channels
|
881 |
+
self.inter_channels = inter_channels
|
882 |
+
self.hidden_channels = hidden_channels
|
883 |
+
self.filter_channels = filter_channels
|
884 |
+
self.n_heads = n_heads
|
885 |
+
self.n_layers = n_layers
|
886 |
+
self.kernel_size = kernel_size
|
887 |
+
self.p_dropout = p_dropout
|
888 |
+
self.resblock = resblock
|
889 |
+
self.resblock_kernel_sizes = resblock_kernel_sizes
|
890 |
+
self.resblock_dilation_sizes = resblock_dilation_sizes
|
891 |
+
self.upsample_rates = upsample_rates
|
892 |
+
self.upsample_initial_channel = upsample_initial_channel
|
893 |
+
self.upsample_kernel_sizes = upsample_kernel_sizes
|
894 |
+
self.segment_size = segment_size
|
895 |
+
self.gin_channels = gin_channels
|
896 |
+
# self.hop_length = hop_length#
|
897 |
+
self.spk_embed_dim = spk_embed_dim
|
898 |
+
self.enc_p = TextEncoder768(
|
899 |
+
inter_channels,
|
900 |
+
hidden_channels,
|
901 |
+
filter_channels,
|
902 |
+
n_heads,
|
903 |
+
n_layers,
|
904 |
+
kernel_size,
|
905 |
+
p_dropout,
|
906 |
+
f0=False,
|
907 |
+
)
|
908 |
+
self.dec = Generator(
|
909 |
+
inter_channels,
|
910 |
+
resblock,
|
911 |
+
resblock_kernel_sizes,
|
912 |
+
resblock_dilation_sizes,
|
913 |
+
upsample_rates,
|
914 |
+
upsample_initial_channel,
|
915 |
+
upsample_kernel_sizes,
|
916 |
+
gin_channels=gin_channels,
|
917 |
+
)
|
918 |
+
self.enc_q = PosteriorEncoder(
|
919 |
+
spec_channels,
|
920 |
+
inter_channels,
|
921 |
+
hidden_channels,
|
922 |
+
5,
|
923 |
+
1,
|
924 |
+
16,
|
925 |
+
gin_channels=gin_channels,
|
926 |
+
)
|
927 |
+
self.flow = ResidualCouplingBlock(
|
928 |
+
inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels
|
929 |
+
)
|
930 |
+
self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels)
|
931 |
+
print("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim)
|
932 |
+
|
933 |
+
def remove_weight_norm(self):
|
934 |
+
self.dec.remove_weight_norm()
|
935 |
+
self.flow.remove_weight_norm()
|
936 |
+
self.enc_q.remove_weight_norm()
|
937 |
+
|
938 |
+
def forward(self, phone, phone_lengths, y, y_lengths, ds): # 这里ds是id,[bs,1]
|
939 |
+
g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的
|
940 |
+
m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths)
|
941 |
+
z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g)
|
942 |
+
z_p = self.flow(z, y_mask, g=g)
|
943 |
+
z_slice, ids_slice = commons.rand_slice_segments(
|
944 |
+
z, y_lengths, self.segment_size
|
945 |
+
)
|
946 |
+
o = self.dec(z_slice, g=g)
|
947 |
+
return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q)
|
948 |
+
|
949 |
+
def infer(self, phone, phone_lengths, sid, max_len=None):
|
950 |
+
g = self.emb_g(sid).unsqueeze(-1)
|
951 |
+
m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths)
|
952 |
+
z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask
|
953 |
+
z = self.flow(z_p, x_mask, g=g, reverse=True)
|
954 |
+
o = self.dec((z * x_mask)[:, :, :max_len], g=g)
|
955 |
+
return o, x_mask, (z, z_p, m_p, logs_p)
|
956 |
+
|
957 |
+
|
958 |
+
class MultiPeriodDiscriminator(torch.nn.Module):
|
959 |
+
def __init__(self, use_spectral_norm=False):
|
960 |
+
super(MultiPeriodDiscriminator, self).__init__()
|
961 |
+
periods = [2, 3, 5, 7, 11, 17]
|
962 |
+
# periods = [3, 5, 7, 11, 17, 23, 37]
|
963 |
+
|
964 |
+
discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)]
|
965 |
+
discs = discs + [
|
966 |
+
DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods
|
967 |
+
]
|
968 |
+
self.discriminators = nn.ModuleList(discs)
|
969 |
+
|
970 |
+
def forward(self, y, y_hat):
|
971 |
+
y_d_rs = [] #
|
972 |
+
y_d_gs = []
|
973 |
+
fmap_rs = []
|
974 |
+
fmap_gs = []
|
975 |
+
for i, d in enumerate(self.discriminators):
|
976 |
+
y_d_r, fmap_r = d(y)
|
977 |
+
y_d_g, fmap_g = d(y_hat)
|
978 |
+
# for j in range(len(fmap_r)):
|
979 |
+
# print(i,j,y.shape,y_hat.shape,fmap_r[j].shape,fmap_g[j].shape)
|
980 |
+
y_d_rs.append(y_d_r)
|
981 |
+
y_d_gs.append(y_d_g)
|
982 |
+
fmap_rs.append(fmap_r)
|
983 |
+
fmap_gs.append(fmap_g)
|
984 |
+
|
985 |
+
return y_d_rs, y_d_gs, fmap_rs, fmap_gs
|
986 |
+
|
987 |
+
|
988 |
+
class MultiPeriodDiscriminatorV2(torch.nn.Module):
|
989 |
+
def __init__(self, use_spectral_norm=False):
|
990 |
+
super(MultiPeriodDiscriminatorV2, self).__init__()
|
991 |
+
# periods = [2, 3, 5, 7, 11, 17]
|
992 |
+
periods = [2, 3, 5, 7, 11, 17, 23, 37]
|
993 |
+
|
994 |
+
discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)]
|
995 |
+
discs = discs + [
|
996 |
+
DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods
|
997 |
+
]
|
998 |
+
self.discriminators = nn.ModuleList(discs)
|
999 |
+
|
1000 |
+
def forward(self, y, y_hat):
|
1001 |
+
y_d_rs = [] #
|
1002 |
+
y_d_gs = []
|
1003 |
+
fmap_rs = []
|
1004 |
+
fmap_gs = []
|
1005 |
+
for i, d in enumerate(self.discriminators):
|
1006 |
+
y_d_r, fmap_r = d(y)
|
1007 |
+
y_d_g, fmap_g = d(y_hat)
|
1008 |
+
# for j in range(len(fmap_r)):
|
1009 |
+
# print(i,j,y.shape,y_hat.shape,fmap_r[j].shape,fmap_g[j].shape)
|
1010 |
+
y_d_rs.append(y_d_r)
|
1011 |
+
y_d_gs.append(y_d_g)
|
1012 |
+
fmap_rs.append(fmap_r)
|
1013 |
+
fmap_gs.append(fmap_g)
|
1014 |
+
|
1015 |
+
return y_d_rs, y_d_gs, fmap_rs, fmap_gs
|
1016 |
+
|
1017 |
+
|
1018 |
+
class DiscriminatorS(torch.nn.Module):
|
1019 |
+
def __init__(self, use_spectral_norm=False):
|
1020 |
+
super(DiscriminatorS, self).__init__()
|
1021 |
+
norm_f = weight_norm if use_spectral_norm == False else spectral_norm
|
1022 |
+
self.convs = nn.ModuleList(
|
1023 |
+
[
|
1024 |
+
norm_f(Conv1d(1, 16, 15, 1, padding=7)),
|
1025 |
+
norm_f(Conv1d(16, 64, 41, 4, groups=4, padding=20)),
|
1026 |
+
norm_f(Conv1d(64, 256, 41, 4, groups=16, padding=20)),
|
1027 |
+
norm_f(Conv1d(256, 1024, 41, 4, groups=64, padding=20)),
|
1028 |
+
norm_f(Conv1d(1024, 1024, 41, 4, groups=256, padding=20)),
|
1029 |
+
norm_f(Conv1d(1024, 1024, 5, 1, padding=2)),
|
1030 |
+
]
|
1031 |
+
)
|
1032 |
+
self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1))
|
1033 |
+
|
1034 |
+
def forward(self, x):
|
1035 |
+
fmap = []
|
1036 |
+
|
1037 |
+
for l in self.convs:
|
1038 |
+
x = l(x)
|
1039 |
+
x = F.leaky_relu(x, modules.LRELU_SLOPE)
|
1040 |
+
fmap.append(x)
|
1041 |
+
x = self.conv_post(x)
|
1042 |
+
fmap.append(x)
|
1043 |
+
x = torch.flatten(x, 1, -1)
|
1044 |
+
|
1045 |
+
return x, fmap
|
1046 |
+
|
1047 |
+
|
1048 |
+
class DiscriminatorP(torch.nn.Module):
|
1049 |
+
def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False):
|
1050 |
+
super(DiscriminatorP, self).__init__()
|
1051 |
+
self.period = period
|
1052 |
+
self.use_spectral_norm = use_spectral_norm
|
1053 |
+
norm_f = weight_norm if use_spectral_norm == False else spectral_norm
|
1054 |
+
self.convs = nn.ModuleList(
|
1055 |
+
[
|
1056 |
+
norm_f(
|
1057 |
+
Conv2d(
|
1058 |
+
1,
|
1059 |
+
32,
|
1060 |
+
(kernel_size, 1),
|
1061 |
+
(stride, 1),
|
1062 |
+
padding=(get_padding(kernel_size, 1), 0),
|
1063 |
+
)
|
1064 |
+
),
|
1065 |
+
norm_f(
|
1066 |
+
Conv2d(
|
1067 |
+
32,
|
1068 |
+
128,
|
1069 |
+
(kernel_size, 1),
|
1070 |
+
(stride, 1),
|
1071 |
+
padding=(get_padding(kernel_size, 1), 0),
|
1072 |
+
)
|
1073 |
+
),
|
1074 |
+
norm_f(
|
1075 |
+
Conv2d(
|
1076 |
+
128,
|
1077 |
+
512,
|
1078 |
+
(kernel_size, 1),
|
1079 |
+
(stride, 1),
|
1080 |
+
padding=(get_padding(kernel_size, 1), 0),
|
1081 |
+
)
|
1082 |
+
),
|
1083 |
+
norm_f(
|
1084 |
+
Conv2d(
|
1085 |
+
512,
|
1086 |
+
1024,
|
1087 |
+
(kernel_size, 1),
|
1088 |
+
(stride, 1),
|
1089 |
+
padding=(get_padding(kernel_size, 1), 0),
|
1090 |
+
)
|
1091 |
+
),
|
1092 |
+
norm_f(
|
1093 |
+
Conv2d(
|
1094 |
+
1024,
|
1095 |
+
1024,
|
1096 |
+
(kernel_size, 1),
|
1097 |
+
1,
|
1098 |
+
padding=(get_padding(kernel_size, 1), 0),
|
1099 |
+
)
|
1100 |
+
),
|
1101 |
+
]
|
1102 |
+
)
|
1103 |
+
self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0)))
|
1104 |
+
|
1105 |
+
def forward(self, x):
|
1106 |
+
fmap = []
|
1107 |
+
|
1108 |
+
# 1d to 2d
|
1109 |
+
b, c, t = x.shape
|
1110 |
+
if t % self.period != 0: # pad first
|
1111 |
+
n_pad = self.period - (t % self.period)
|
1112 |
+
x = F.pad(x, (0, n_pad), "reflect")
|
1113 |
+
t = t + n_pad
|
1114 |
+
x = x.view(b, c, t // self.period, self.period)
|
1115 |
+
|
1116 |
+
for l in self.convs:
|
1117 |
+
x = l(x)
|
1118 |
+
x = F.leaky_relu(x, modules.LRELU_SLOPE)
|
1119 |
+
fmap.append(x)
|
1120 |
+
x = self.conv_post(x)
|
1121 |
+
fmap.append(x)
|
1122 |
+
x = torch.flatten(x, 1, -1)
|
1123 |
+
|
1124 |
+
return x, fmap
|
lib/infer_pack/models_onnx.py
ADDED
@@ -0,0 +1,819 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import math, pdb, os
|
2 |
+
from time import time as ttime
|
3 |
+
import torch
|
4 |
+
from torch import nn
|
5 |
+
from torch.nn import functional as F
|
6 |
+
from lib.infer_pack import modules
|
7 |
+
from lib.infer_pack import attentions
|
8 |
+
from lib.infer_pack import commons
|
9 |
+
from lib.infer_pack.commons import init_weights, get_padding
|
10 |
+
from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d
|
11 |
+
from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm
|
12 |
+
from lib.infer_pack.commons import init_weights
|
13 |
+
import numpy as np
|
14 |
+
from lib.infer_pack import commons
|
15 |
+
|
16 |
+
|
17 |
+
class TextEncoder256(nn.Module):
|
18 |
+
def __init__(
|
19 |
+
self,
|
20 |
+
out_channels,
|
21 |
+
hidden_channels,
|
22 |
+
filter_channels,
|
23 |
+
n_heads,
|
24 |
+
n_layers,
|
25 |
+
kernel_size,
|
26 |
+
p_dropout,
|
27 |
+
f0=True,
|
28 |
+
):
|
29 |
+
super().__init__()
|
30 |
+
self.out_channels = out_channels
|
31 |
+
self.hidden_channels = hidden_channels
|
32 |
+
self.filter_channels = filter_channels
|
33 |
+
self.n_heads = n_heads
|
34 |
+
self.n_layers = n_layers
|
35 |
+
self.kernel_size = kernel_size
|
36 |
+
self.p_dropout = p_dropout
|
37 |
+
self.emb_phone = nn.Linear(256, hidden_channels)
|
38 |
+
self.lrelu = nn.LeakyReLU(0.1, inplace=True)
|
39 |
+
if f0 == True:
|
40 |
+
self.emb_pitch = nn.Embedding(256, hidden_channels) # pitch 256
|
41 |
+
self.encoder = attentions.Encoder(
|
42 |
+
hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout
|
43 |
+
)
|
44 |
+
self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
|
45 |
+
|
46 |
+
def forward(self, phone, pitch, lengths):
|
47 |
+
if pitch == None:
|
48 |
+
x = self.emb_phone(phone)
|
49 |
+
else:
|
50 |
+
x = self.emb_phone(phone) + self.emb_pitch(pitch)
|
51 |
+
x = x * math.sqrt(self.hidden_channels) # [b, t, h]
|
52 |
+
x = self.lrelu(x)
|
53 |
+
x = torch.transpose(x, 1, -1) # [b, h, t]
|
54 |
+
x_mask = torch.unsqueeze(commons.sequence_mask(lengths, x.size(2)), 1).to(
|
55 |
+
x.dtype
|
56 |
+
)
|
57 |
+
x = self.encoder(x * x_mask, x_mask)
|
58 |
+
stats = self.proj(x) * x_mask
|
59 |
+
|
60 |
+
m, logs = torch.split(stats, self.out_channels, dim=1)
|
61 |
+
return m, logs, x_mask
|
62 |
+
|
63 |
+
|
64 |
+
class TextEncoder768(nn.Module):
|
65 |
+
def __init__(
|
66 |
+
self,
|
67 |
+
out_channels,
|
68 |
+
hidden_channels,
|
69 |
+
filter_channels,
|
70 |
+
n_heads,
|
71 |
+
n_layers,
|
72 |
+
kernel_size,
|
73 |
+
p_dropout,
|
74 |
+
f0=True,
|
75 |
+
):
|
76 |
+
super().__init__()
|
77 |
+
self.out_channels = out_channels
|
78 |
+
self.hidden_channels = hidden_channels
|
79 |
+
self.filter_channels = filter_channels
|
80 |
+
self.n_heads = n_heads
|
81 |
+
self.n_layers = n_layers
|
82 |
+
self.kernel_size = kernel_size
|
83 |
+
self.p_dropout = p_dropout
|
84 |
+
self.emb_phone = nn.Linear(768, hidden_channels)
|
85 |
+
self.lrelu = nn.LeakyReLU(0.1, inplace=True)
|
86 |
+
if f0 == True:
|
87 |
+
self.emb_pitch = nn.Embedding(256, hidden_channels) # pitch 256
|
88 |
+
self.encoder = attentions.Encoder(
|
89 |
+
hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout
|
90 |
+
)
|
91 |
+
self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
|
92 |
+
|
93 |
+
def forward(self, phone, pitch, lengths):
|
94 |
+
if pitch == None:
|
95 |
+
x = self.emb_phone(phone)
|
96 |
+
else:
|
97 |
+
x = self.emb_phone(phone) + self.emb_pitch(pitch)
|
98 |
+
x = x * math.sqrt(self.hidden_channels) # [b, t, h]
|
99 |
+
x = self.lrelu(x)
|
100 |
+
x = torch.transpose(x, 1, -1) # [b, h, t]
|
101 |
+
x_mask = torch.unsqueeze(commons.sequence_mask(lengths, x.size(2)), 1).to(
|
102 |
+
x.dtype
|
103 |
+
)
|
104 |
+
x = self.encoder(x * x_mask, x_mask)
|
105 |
+
stats = self.proj(x) * x_mask
|
106 |
+
|
107 |
+
m, logs = torch.split(stats, self.out_channels, dim=1)
|
108 |
+
return m, logs, x_mask
|
109 |
+
|
110 |
+
|
111 |
+
class ResidualCouplingBlock(nn.Module):
|
112 |
+
def __init__(
|
113 |
+
self,
|
114 |
+
channels,
|
115 |
+
hidden_channels,
|
116 |
+
kernel_size,
|
117 |
+
dilation_rate,
|
118 |
+
n_layers,
|
119 |
+
n_flows=4,
|
120 |
+
gin_channels=0,
|
121 |
+
):
|
122 |
+
super().__init__()
|
123 |
+
self.channels = channels
|
124 |
+
self.hidden_channels = hidden_channels
|
125 |
+
self.kernel_size = kernel_size
|
126 |
+
self.dilation_rate = dilation_rate
|
127 |
+
self.n_layers = n_layers
|
128 |
+
self.n_flows = n_flows
|
129 |
+
self.gin_channels = gin_channels
|
130 |
+
|
131 |
+
self.flows = nn.ModuleList()
|
132 |
+
for i in range(n_flows):
|
133 |
+
self.flows.append(
|
134 |
+
modules.ResidualCouplingLayer(
|
135 |
+
channels,
|
136 |
+
hidden_channels,
|
137 |
+
kernel_size,
|
138 |
+
dilation_rate,
|
139 |
+
n_layers,
|
140 |
+
gin_channels=gin_channels,
|
141 |
+
mean_only=True,
|
142 |
+
)
|
143 |
+
)
|
144 |
+
self.flows.append(modules.Flip())
|
145 |
+
|
146 |
+
def forward(self, x, x_mask, g=None, reverse=False):
|
147 |
+
if not reverse:
|
148 |
+
for flow in self.flows:
|
149 |
+
x, _ = flow(x, x_mask, g=g, reverse=reverse)
|
150 |
+
else:
|
151 |
+
for flow in reversed(self.flows):
|
152 |
+
x = flow(x, x_mask, g=g, reverse=reverse)
|
153 |
+
return x
|
154 |
+
|
155 |
+
def remove_weight_norm(self):
|
156 |
+
for i in range(self.n_flows):
|
157 |
+
self.flows[i * 2].remove_weight_norm()
|
158 |
+
|
159 |
+
|
160 |
+
class PosteriorEncoder(nn.Module):
|
161 |
+
def __init__(
|
162 |
+
self,
|
163 |
+
in_channels,
|
164 |
+
out_channels,
|
165 |
+
hidden_channels,
|
166 |
+
kernel_size,
|
167 |
+
dilation_rate,
|
168 |
+
n_layers,
|
169 |
+
gin_channels=0,
|
170 |
+
):
|
171 |
+
super().__init__()
|
172 |
+
self.in_channels = in_channels
|
173 |
+
self.out_channels = out_channels
|
174 |
+
self.hidden_channels = hidden_channels
|
175 |
+
self.kernel_size = kernel_size
|
176 |
+
self.dilation_rate = dilation_rate
|
177 |
+
self.n_layers = n_layers
|
178 |
+
self.gin_channels = gin_channels
|
179 |
+
|
180 |
+
self.pre = nn.Conv1d(in_channels, hidden_channels, 1)
|
181 |
+
self.enc = modules.WN(
|
182 |
+
hidden_channels,
|
183 |
+
kernel_size,
|
184 |
+
dilation_rate,
|
185 |
+
n_layers,
|
186 |
+
gin_channels=gin_channels,
|
187 |
+
)
|
188 |
+
self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
|
189 |
+
|
190 |
+
def forward(self, x, x_lengths, g=None):
|
191 |
+
x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(
|
192 |
+
x.dtype
|
193 |
+
)
|
194 |
+
x = self.pre(x) * x_mask
|
195 |
+
x = self.enc(x, x_mask, g=g)
|
196 |
+
stats = self.proj(x) * x_mask
|
197 |
+
m, logs = torch.split(stats, self.out_channels, dim=1)
|
198 |
+
z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask
|
199 |
+
return z, m, logs, x_mask
|
200 |
+
|
201 |
+
def remove_weight_norm(self):
|
202 |
+
self.enc.remove_weight_norm()
|
203 |
+
|
204 |
+
|
205 |
+
class Generator(torch.nn.Module):
|
206 |
+
def __init__(
|
207 |
+
self,
|
208 |
+
initial_channel,
|
209 |
+
resblock,
|
210 |
+
resblock_kernel_sizes,
|
211 |
+
resblock_dilation_sizes,
|
212 |
+
upsample_rates,
|
213 |
+
upsample_initial_channel,
|
214 |
+
upsample_kernel_sizes,
|
215 |
+
gin_channels=0,
|
216 |
+
):
|
217 |
+
super(Generator, self).__init__()
|
218 |
+
self.num_kernels = len(resblock_kernel_sizes)
|
219 |
+
self.num_upsamples = len(upsample_rates)
|
220 |
+
self.conv_pre = Conv1d(
|
221 |
+
initial_channel, upsample_initial_channel, 7, 1, padding=3
|
222 |
+
)
|
223 |
+
resblock = modules.ResBlock1 if resblock == "1" else modules.ResBlock2
|
224 |
+
|
225 |
+
self.ups = nn.ModuleList()
|
226 |
+
for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)):
|
227 |
+
self.ups.append(
|
228 |
+
weight_norm(
|
229 |
+
ConvTranspose1d(
|
230 |
+
upsample_initial_channel // (2**i),
|
231 |
+
upsample_initial_channel // (2 ** (i + 1)),
|
232 |
+
k,
|
233 |
+
u,
|
234 |
+
padding=(k - u) // 2,
|
235 |
+
)
|
236 |
+
)
|
237 |
+
)
|
238 |
+
|
239 |
+
self.resblocks = nn.ModuleList()
|
240 |
+
for i in range(len(self.ups)):
|
241 |
+
ch = upsample_initial_channel // (2 ** (i + 1))
|
242 |
+
for j, (k, d) in enumerate(
|
243 |
+
zip(resblock_kernel_sizes, resblock_dilation_sizes)
|
244 |
+
):
|
245 |
+
self.resblocks.append(resblock(ch, k, d))
|
246 |
+
|
247 |
+
self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False)
|
248 |
+
self.ups.apply(init_weights)
|
249 |
+
|
250 |
+
if gin_channels != 0:
|
251 |
+
self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1)
|
252 |
+
|
253 |
+
def forward(self, x, g=None):
|
254 |
+
x = self.conv_pre(x)
|
255 |
+
if g is not None:
|
256 |
+
x = x + self.cond(g)
|
257 |
+
|
258 |
+
for i in range(self.num_upsamples):
|
259 |
+
x = F.leaky_relu(x, modules.LRELU_SLOPE)
|
260 |
+
x = self.ups[i](x)
|
261 |
+
xs = None
|
262 |
+
for j in range(self.num_kernels):
|
263 |
+
if xs is None:
|
264 |
+
xs = self.resblocks[i * self.num_kernels + j](x)
|
265 |
+
else:
|
266 |
+
xs += self.resblocks[i * self.num_kernels + j](x)
|
267 |
+
x = xs / self.num_kernels
|
268 |
+
x = F.leaky_relu(x)
|
269 |
+
x = self.conv_post(x)
|
270 |
+
x = torch.tanh(x)
|
271 |
+
|
272 |
+
return x
|
273 |
+
|
274 |
+
def remove_weight_norm(self):
|
275 |
+
for l in self.ups:
|
276 |
+
remove_weight_norm(l)
|
277 |
+
for l in self.resblocks:
|
278 |
+
l.remove_weight_norm()
|
279 |
+
|
280 |
+
|
281 |
+
class SineGen(torch.nn.Module):
|
282 |
+
"""Definition of sine generator
|
283 |
+
SineGen(samp_rate, harmonic_num = 0,
|
284 |
+
sine_amp = 0.1, noise_std = 0.003,
|
285 |
+
voiced_threshold = 0,
|
286 |
+
flag_for_pulse=False)
|
287 |
+
samp_rate: sampling rate in Hz
|
288 |
+
harmonic_num: number of harmonic overtones (default 0)
|
289 |
+
sine_amp: amplitude of sine-wavefrom (default 0.1)
|
290 |
+
noise_std: std of Gaussian noise (default 0.003)
|
291 |
+
voiced_thoreshold: F0 threshold for U/V classification (default 0)
|
292 |
+
flag_for_pulse: this SinGen is used inside PulseGen (default False)
|
293 |
+
Note: when flag_for_pulse is True, the first time step of a voiced
|
294 |
+
segment is always sin(np.pi) or cos(0)
|
295 |
+
"""
|
296 |
+
|
297 |
+
def __init__(
|
298 |
+
self,
|
299 |
+
samp_rate,
|
300 |
+
harmonic_num=0,
|
301 |
+
sine_amp=0.1,
|
302 |
+
noise_std=0.003,
|
303 |
+
voiced_threshold=0,
|
304 |
+
flag_for_pulse=False,
|
305 |
+
):
|
306 |
+
super(SineGen, self).__init__()
|
307 |
+
self.sine_amp = sine_amp
|
308 |
+
self.noise_std = noise_std
|
309 |
+
self.harmonic_num = harmonic_num
|
310 |
+
self.dim = self.harmonic_num + 1
|
311 |
+
self.sampling_rate = samp_rate
|
312 |
+
self.voiced_threshold = voiced_threshold
|
313 |
+
|
314 |
+
def _f02uv(self, f0):
|
315 |
+
# generate uv signal
|
316 |
+
uv = torch.ones_like(f0)
|
317 |
+
uv = uv * (f0 > self.voiced_threshold)
|
318 |
+
return uv
|
319 |
+
|
320 |
+
def forward(self, f0, upp):
|
321 |
+
"""sine_tensor, uv = forward(f0)
|
322 |
+
input F0: tensor(batchsize=1, length, dim=1)
|
323 |
+
f0 for unvoiced steps should be 0
|
324 |
+
output sine_tensor: tensor(batchsize=1, length, dim)
|
325 |
+
output uv: tensor(batchsize=1, length, 1)
|
326 |
+
"""
|
327 |
+
with torch.no_grad():
|
328 |
+
f0 = f0[:, None].transpose(1, 2)
|
329 |
+
f0_buf = torch.zeros(f0.shape[0], f0.shape[1], self.dim, device=f0.device)
|
330 |
+
# fundamental component
|
331 |
+
f0_buf[:, :, 0] = f0[:, :, 0]
|
332 |
+
for idx in np.arange(self.harmonic_num):
|
333 |
+
f0_buf[:, :, idx + 1] = f0_buf[:, :, 0] * (
|
334 |
+
idx + 2
|
335 |
+
) # idx + 2: the (idx+1)-th overtone, (idx+2)-th harmonic
|
336 |
+
rad_values = (f0_buf / self.sampling_rate) % 1 ###%1意味着n_har的乘积无法后处理优化
|
337 |
+
rand_ini = torch.rand(
|
338 |
+
f0_buf.shape[0], f0_buf.shape[2], device=f0_buf.device
|
339 |
+
)
|
340 |
+
rand_ini[:, 0] = 0
|
341 |
+
rad_values[:, 0, :] = rad_values[:, 0, :] + rand_ini
|
342 |
+
tmp_over_one = torch.cumsum(rad_values, 1) # % 1 #####%1意味着后面的cumsum无法再优化
|
343 |
+
tmp_over_one *= upp
|
344 |
+
tmp_over_one = F.interpolate(
|
345 |
+
tmp_over_one.transpose(2, 1),
|
346 |
+
scale_factor=upp,
|
347 |
+
mode="linear",
|
348 |
+
align_corners=True,
|
349 |
+
).transpose(2, 1)
|
350 |
+
rad_values = F.interpolate(
|
351 |
+
rad_values.transpose(2, 1), scale_factor=upp, mode="nearest"
|
352 |
+
).transpose(
|
353 |
+
2, 1
|
354 |
+
) #######
|
355 |
+
tmp_over_one %= 1
|
356 |
+
tmp_over_one_idx = (tmp_over_one[:, 1:, :] - tmp_over_one[:, :-1, :]) < 0
|
357 |
+
cumsum_shift = torch.zeros_like(rad_values)
|
358 |
+
cumsum_shift[:, 1:, :] = tmp_over_one_idx * -1.0
|
359 |
+
sine_waves = torch.sin(
|
360 |
+
torch.cumsum(rad_values + cumsum_shift, dim=1) * 2 * np.pi
|
361 |
+
)
|
362 |
+
sine_waves = sine_waves * self.sine_amp
|
363 |
+
uv = self._f02uv(f0)
|
364 |
+
uv = F.interpolate(
|
365 |
+
uv.transpose(2, 1), scale_factor=upp, mode="nearest"
|
366 |
+
).transpose(2, 1)
|
367 |
+
noise_amp = uv * self.noise_std + (1 - uv) * self.sine_amp / 3
|
368 |
+
noise = noise_amp * torch.randn_like(sine_waves)
|
369 |
+
sine_waves = sine_waves * uv + noise
|
370 |
+
return sine_waves, uv, noise
|
371 |
+
|
372 |
+
|
373 |
+
class SourceModuleHnNSF(torch.nn.Module):
|
374 |
+
"""SourceModule for hn-nsf
|
375 |
+
SourceModule(sampling_rate, harmonic_num=0, sine_amp=0.1,
|
376 |
+
add_noise_std=0.003, voiced_threshod=0)
|
377 |
+
sampling_rate: sampling_rate in Hz
|
378 |
+
harmonic_num: number of harmonic above F0 (default: 0)
|
379 |
+
sine_amp: amplitude of sine source signal (default: 0.1)
|
380 |
+
add_noise_std: std of additive Gaussian noise (default: 0.003)
|
381 |
+
note that amplitude of noise in unvoiced is decided
|
382 |
+
by sine_amp
|
383 |
+
voiced_threshold: threhold to set U/V given F0 (default: 0)
|
384 |
+
Sine_source, noise_source = SourceModuleHnNSF(F0_sampled)
|
385 |
+
F0_sampled (batchsize, length, 1)
|
386 |
+
Sine_source (batchsize, length, 1)
|
387 |
+
noise_source (batchsize, length 1)
|
388 |
+
uv (batchsize, length, 1)
|
389 |
+
"""
|
390 |
+
|
391 |
+
def __init__(
|
392 |
+
self,
|
393 |
+
sampling_rate,
|
394 |
+
harmonic_num=0,
|
395 |
+
sine_amp=0.1,
|
396 |
+
add_noise_std=0.003,
|
397 |
+
voiced_threshod=0,
|
398 |
+
is_half=True,
|
399 |
+
):
|
400 |
+
super(SourceModuleHnNSF, self).__init__()
|
401 |
+
|
402 |
+
self.sine_amp = sine_amp
|
403 |
+
self.noise_std = add_noise_std
|
404 |
+
self.is_half = is_half
|
405 |
+
# to produce sine waveforms
|
406 |
+
self.l_sin_gen = SineGen(
|
407 |
+
sampling_rate, harmonic_num, sine_amp, add_noise_std, voiced_threshod
|
408 |
+
)
|
409 |
+
|
410 |
+
# to merge source harmonics into a single excitation
|
411 |
+
self.l_linear = torch.nn.Linear(harmonic_num + 1, 1)
|
412 |
+
self.l_tanh = torch.nn.Tanh()
|
413 |
+
|
414 |
+
def forward(self, x, upp=None):
|
415 |
+
sine_wavs, uv, _ = self.l_sin_gen(x, upp)
|
416 |
+
if self.is_half:
|
417 |
+
sine_wavs = sine_wavs.half()
|
418 |
+
sine_merge = self.l_tanh(self.l_linear(sine_wavs))
|
419 |
+
return sine_merge, None, None # noise, uv
|
420 |
+
|
421 |
+
|
422 |
+
class GeneratorNSF(torch.nn.Module):
|
423 |
+
def __init__(
|
424 |
+
self,
|
425 |
+
initial_channel,
|
426 |
+
resblock,
|
427 |
+
resblock_kernel_sizes,
|
428 |
+
resblock_dilation_sizes,
|
429 |
+
upsample_rates,
|
430 |
+
upsample_initial_channel,
|
431 |
+
upsample_kernel_sizes,
|
432 |
+
gin_channels,
|
433 |
+
sr,
|
434 |
+
is_half=False,
|
435 |
+
):
|
436 |
+
super(GeneratorNSF, self).__init__()
|
437 |
+
self.num_kernels = len(resblock_kernel_sizes)
|
438 |
+
self.num_upsamples = len(upsample_rates)
|
439 |
+
|
440 |
+
self.f0_upsamp = torch.nn.Upsample(scale_factor=np.prod(upsample_rates))
|
441 |
+
self.m_source = SourceModuleHnNSF(
|
442 |
+
sampling_rate=sr, harmonic_num=0, is_half=is_half
|
443 |
+
)
|
444 |
+
self.noise_convs = nn.ModuleList()
|
445 |
+
self.conv_pre = Conv1d(
|
446 |
+
initial_channel, upsample_initial_channel, 7, 1, padding=3
|
447 |
+
)
|
448 |
+
resblock = modules.ResBlock1 if resblock == "1" else modules.ResBlock2
|
449 |
+
|
450 |
+
self.ups = nn.ModuleList()
|
451 |
+
for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)):
|
452 |
+
c_cur = upsample_initial_channel // (2 ** (i + 1))
|
453 |
+
self.ups.append(
|
454 |
+
weight_norm(
|
455 |
+
ConvTranspose1d(
|
456 |
+
upsample_initial_channel // (2**i),
|
457 |
+
upsample_initial_channel // (2 ** (i + 1)),
|
458 |
+
k,
|
459 |
+
u,
|
460 |
+
padding=(k - u) // 2,
|
461 |
+
)
|
462 |
+
)
|
463 |
+
)
|
464 |
+
if i + 1 < len(upsample_rates):
|
465 |
+
stride_f0 = np.prod(upsample_rates[i + 1 :])
|
466 |
+
self.noise_convs.append(
|
467 |
+
Conv1d(
|
468 |
+
1,
|
469 |
+
c_cur,
|
470 |
+
kernel_size=stride_f0 * 2,
|
471 |
+
stride=stride_f0,
|
472 |
+
padding=stride_f0 // 2,
|
473 |
+
)
|
474 |
+
)
|
475 |
+
else:
|
476 |
+
self.noise_convs.append(Conv1d(1, c_cur, kernel_size=1))
|
477 |
+
|
478 |
+
self.resblocks = nn.ModuleList()
|
479 |
+
for i in range(len(self.ups)):
|
480 |
+
ch = upsample_initial_channel // (2 ** (i + 1))
|
481 |
+
for j, (k, d) in enumerate(
|
482 |
+
zip(resblock_kernel_sizes, resblock_dilation_sizes)
|
483 |
+
):
|
484 |
+
self.resblocks.append(resblock(ch, k, d))
|
485 |
+
|
486 |
+
self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False)
|
487 |
+
self.ups.apply(init_weights)
|
488 |
+
|
489 |
+
if gin_channels != 0:
|
490 |
+
self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1)
|
491 |
+
|
492 |
+
self.upp = np.prod(upsample_rates)
|
493 |
+
|
494 |
+
def forward(self, x, f0, g=None):
|
495 |
+
har_source, noi_source, uv = self.m_source(f0, self.upp)
|
496 |
+
har_source = har_source.transpose(1, 2)
|
497 |
+
x = self.conv_pre(x)
|
498 |
+
if g is not None:
|
499 |
+
x = x + self.cond(g)
|
500 |
+
|
501 |
+
for i in range(self.num_upsamples):
|
502 |
+
x = F.leaky_relu(x, modules.LRELU_SLOPE)
|
503 |
+
x = self.ups[i](x)
|
504 |
+
x_source = self.noise_convs[i](har_source)
|
505 |
+
x = x + x_source
|
506 |
+
xs = None
|
507 |
+
for j in range(self.num_kernels):
|
508 |
+
if xs is None:
|
509 |
+
xs = self.resblocks[i * self.num_kernels + j](x)
|
510 |
+
else:
|
511 |
+
xs += self.resblocks[i * self.num_kernels + j](x)
|
512 |
+
x = xs / self.num_kernels
|
513 |
+
x = F.leaky_relu(x)
|
514 |
+
x = self.conv_post(x)
|
515 |
+
x = torch.tanh(x)
|
516 |
+
return x
|
517 |
+
|
518 |
+
def remove_weight_norm(self):
|
519 |
+
for l in self.ups:
|
520 |
+
remove_weight_norm(l)
|
521 |
+
for l in self.resblocks:
|
522 |
+
l.remove_weight_norm()
|
523 |
+
|
524 |
+
|
525 |
+
sr2sr = {
|
526 |
+
"32k": 32000,
|
527 |
+
"40k": 40000,
|
528 |
+
"48k": 48000,
|
529 |
+
}
|
530 |
+
|
531 |
+
|
532 |
+
class SynthesizerTrnMsNSFsidM(nn.Module):
|
533 |
+
def __init__(
|
534 |
+
self,
|
535 |
+
spec_channels,
|
536 |
+
segment_size,
|
537 |
+
inter_channels,
|
538 |
+
hidden_channels,
|
539 |
+
filter_channels,
|
540 |
+
n_heads,
|
541 |
+
n_layers,
|
542 |
+
kernel_size,
|
543 |
+
p_dropout,
|
544 |
+
resblock,
|
545 |
+
resblock_kernel_sizes,
|
546 |
+
resblock_dilation_sizes,
|
547 |
+
upsample_rates,
|
548 |
+
upsample_initial_channel,
|
549 |
+
upsample_kernel_sizes,
|
550 |
+
spk_embed_dim,
|
551 |
+
gin_channels,
|
552 |
+
sr,
|
553 |
+
version,
|
554 |
+
**kwargs
|
555 |
+
):
|
556 |
+
super().__init__()
|
557 |
+
if type(sr) == type("strr"):
|
558 |
+
sr = sr2sr[sr]
|
559 |
+
self.spec_channels = spec_channels
|
560 |
+
self.inter_channels = inter_channels
|
561 |
+
self.hidden_channels = hidden_channels
|
562 |
+
self.filter_channels = filter_channels
|
563 |
+
self.n_heads = n_heads
|
564 |
+
self.n_layers = n_layers
|
565 |
+
self.kernel_size = kernel_size
|
566 |
+
self.p_dropout = p_dropout
|
567 |
+
self.resblock = resblock
|
568 |
+
self.resblock_kernel_sizes = resblock_kernel_sizes
|
569 |
+
self.resblock_dilation_sizes = resblock_dilation_sizes
|
570 |
+
self.upsample_rates = upsample_rates
|
571 |
+
self.upsample_initial_channel = upsample_initial_channel
|
572 |
+
self.upsample_kernel_sizes = upsample_kernel_sizes
|
573 |
+
self.segment_size = segment_size
|
574 |
+
self.gin_channels = gin_channels
|
575 |
+
# self.hop_length = hop_length#
|
576 |
+
self.spk_embed_dim = spk_embed_dim
|
577 |
+
if version == "v1":
|
578 |
+
self.enc_p = TextEncoder256(
|
579 |
+
inter_channels,
|
580 |
+
hidden_channels,
|
581 |
+
filter_channels,
|
582 |
+
n_heads,
|
583 |
+
n_layers,
|
584 |
+
kernel_size,
|
585 |
+
p_dropout,
|
586 |
+
)
|
587 |
+
else:
|
588 |
+
self.enc_p = TextEncoder768(
|
589 |
+
inter_channels,
|
590 |
+
hidden_channels,
|
591 |
+
filter_channels,
|
592 |
+
n_heads,
|
593 |
+
n_layers,
|
594 |
+
kernel_size,
|
595 |
+
p_dropout,
|
596 |
+
)
|
597 |
+
self.dec = GeneratorNSF(
|
598 |
+
inter_channels,
|
599 |
+
resblock,
|
600 |
+
resblock_kernel_sizes,
|
601 |
+
resblock_dilation_sizes,
|
602 |
+
upsample_rates,
|
603 |
+
upsample_initial_channel,
|
604 |
+
upsample_kernel_sizes,
|
605 |
+
gin_channels=gin_channels,
|
606 |
+
sr=sr,
|
607 |
+
is_half=kwargs["is_half"],
|
608 |
+
)
|
609 |
+
self.enc_q = PosteriorEncoder(
|
610 |
+
spec_channels,
|
611 |
+
inter_channels,
|
612 |
+
hidden_channels,
|
613 |
+
5,
|
614 |
+
1,
|
615 |
+
16,
|
616 |
+
gin_channels=gin_channels,
|
617 |
+
)
|
618 |
+
self.flow = ResidualCouplingBlock(
|
619 |
+
inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels
|
620 |
+
)
|
621 |
+
self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels)
|
622 |
+
self.speaker_map = None
|
623 |
+
print("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim)
|
624 |
+
|
625 |
+
def remove_weight_norm(self):
|
626 |
+
self.dec.remove_weight_norm()
|
627 |
+
self.flow.remove_weight_norm()
|
628 |
+
self.enc_q.remove_weight_norm()
|
629 |
+
|
630 |
+
def construct_spkmixmap(self, n_speaker):
|
631 |
+
self.speaker_map = torch.zeros((n_speaker, 1, 1, self.gin_channels))
|
632 |
+
for i in range(n_speaker):
|
633 |
+
self.speaker_map[i] = self.emb_g(torch.LongTensor([[i]]))
|
634 |
+
self.speaker_map = self.speaker_map.unsqueeze(0)
|
635 |
+
|
636 |
+
def forward(self, phone, phone_lengths, pitch, nsff0, g, rnd, max_len=None):
|
637 |
+
if self.speaker_map is not None: # [N, S] * [S, B, 1, H]
|
638 |
+
g = g.reshape((g.shape[0], g.shape[1], 1, 1, 1)) # [N, S, B, 1, 1]
|
639 |
+
g = g * self.speaker_map # [N, S, B, 1, H]
|
640 |
+
g = torch.sum(g, dim=1) # [N, 1, B, 1, H]
|
641 |
+
g = g.transpose(0, -1).transpose(0, -2).squeeze(0) # [B, H, N]
|
642 |
+
else:
|
643 |
+
g = g.unsqueeze(0)
|
644 |
+
g = self.emb_g(g).transpose(1, 2)
|
645 |
+
|
646 |
+
m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths)
|
647 |
+
z_p = (m_p + torch.exp(logs_p) * rnd) * x_mask
|
648 |
+
z = self.flow(z_p, x_mask, g=g, reverse=True)
|
649 |
+
o = self.dec((z * x_mask)[:, :, :max_len], nsff0, g=g)
|
650 |
+
return o
|
651 |
+
|
652 |
+
|
653 |
+
class MultiPeriodDiscriminator(torch.nn.Module):
|
654 |
+
def __init__(self, use_spectral_norm=False):
|
655 |
+
super(MultiPeriodDiscriminator, self).__init__()
|
656 |
+
periods = [2, 3, 5, 7, 11, 17]
|
657 |
+
# periods = [3, 5, 7, 11, 17, 23, 37]
|
658 |
+
|
659 |
+
discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)]
|
660 |
+
discs = discs + [
|
661 |
+
DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods
|
662 |
+
]
|
663 |
+
self.discriminators = nn.ModuleList(discs)
|
664 |
+
|
665 |
+
def forward(self, y, y_hat):
|
666 |
+
y_d_rs = [] #
|
667 |
+
y_d_gs = []
|
668 |
+
fmap_rs = []
|
669 |
+
fmap_gs = []
|
670 |
+
for i, d in enumerate(self.discriminators):
|
671 |
+
y_d_r, fmap_r = d(y)
|
672 |
+
y_d_g, fmap_g = d(y_hat)
|
673 |
+
# for j in range(len(fmap_r)):
|
674 |
+
# print(i,j,y.shape,y_hat.shape,fmap_r[j].shape,fmap_g[j].shape)
|
675 |
+
y_d_rs.append(y_d_r)
|
676 |
+
y_d_gs.append(y_d_g)
|
677 |
+
fmap_rs.append(fmap_r)
|
678 |
+
fmap_gs.append(fmap_g)
|
679 |
+
|
680 |
+
return y_d_rs, y_d_gs, fmap_rs, fmap_gs
|
681 |
+
|
682 |
+
|
683 |
+
class MultiPeriodDiscriminatorV2(torch.nn.Module):
|
684 |
+
def __init__(self, use_spectral_norm=False):
|
685 |
+
super(MultiPeriodDiscriminatorV2, self).__init__()
|
686 |
+
# periods = [2, 3, 5, 7, 11, 17]
|
687 |
+
periods = [2, 3, 5, 7, 11, 17, 23, 37]
|
688 |
+
|
689 |
+
discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)]
|
690 |
+
discs = discs + [
|
691 |
+
DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods
|
692 |
+
]
|
693 |
+
self.discriminators = nn.ModuleList(discs)
|
694 |
+
|
695 |
+
def forward(self, y, y_hat):
|
696 |
+
y_d_rs = [] #
|
697 |
+
y_d_gs = []
|
698 |
+
fmap_rs = []
|
699 |
+
fmap_gs = []
|
700 |
+
for i, d in enumerate(self.discriminators):
|
701 |
+
y_d_r, fmap_r = d(y)
|
702 |
+
y_d_g, fmap_g = d(y_hat)
|
703 |
+
# for j in range(len(fmap_r)):
|
704 |
+
# print(i,j,y.shape,y_hat.shape,fmap_r[j].shape,fmap_g[j].shape)
|
705 |
+
y_d_rs.append(y_d_r)
|
706 |
+
y_d_gs.append(y_d_g)
|
707 |
+
fmap_rs.append(fmap_r)
|
708 |
+
fmap_gs.append(fmap_g)
|
709 |
+
|
710 |
+
return y_d_rs, y_d_gs, fmap_rs, fmap_gs
|
711 |
+
|
712 |
+
|
713 |
+
class DiscriminatorS(torch.nn.Module):
|
714 |
+
def __init__(self, use_spectral_norm=False):
|
715 |
+
super(DiscriminatorS, self).__init__()
|
716 |
+
norm_f = weight_norm if use_spectral_norm == False else spectral_norm
|
717 |
+
self.convs = nn.ModuleList(
|
718 |
+
[
|
719 |
+
norm_f(Conv1d(1, 16, 15, 1, padding=7)),
|
720 |
+
norm_f(Conv1d(16, 64, 41, 4, groups=4, padding=20)),
|
721 |
+
norm_f(Conv1d(64, 256, 41, 4, groups=16, padding=20)),
|
722 |
+
norm_f(Conv1d(256, 1024, 41, 4, groups=64, padding=20)),
|
723 |
+
norm_f(Conv1d(1024, 1024, 41, 4, groups=256, padding=20)),
|
724 |
+
norm_f(Conv1d(1024, 1024, 5, 1, padding=2)),
|
725 |
+
]
|
726 |
+
)
|
727 |
+
self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1))
|
728 |
+
|
729 |
+
def forward(self, x):
|
730 |
+
fmap = []
|
731 |
+
|
732 |
+
for l in self.convs:
|
733 |
+
x = l(x)
|
734 |
+
x = F.leaky_relu(x, modules.LRELU_SLOPE)
|
735 |
+
fmap.append(x)
|
736 |
+
x = self.conv_post(x)
|
737 |
+
fmap.append(x)
|
738 |
+
x = torch.flatten(x, 1, -1)
|
739 |
+
|
740 |
+
return x, fmap
|
741 |
+
|
742 |
+
|
743 |
+
class DiscriminatorP(torch.nn.Module):
|
744 |
+
def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False):
|
745 |
+
super(DiscriminatorP, self).__init__()
|
746 |
+
self.period = period
|
747 |
+
self.use_spectral_norm = use_spectral_norm
|
748 |
+
norm_f = weight_norm if use_spectral_norm == False else spectral_norm
|
749 |
+
self.convs = nn.ModuleList(
|
750 |
+
[
|
751 |
+
norm_f(
|
752 |
+
Conv2d(
|
753 |
+
1,
|
754 |
+
32,
|
755 |
+
(kernel_size, 1),
|
756 |
+
(stride, 1),
|
757 |
+
padding=(get_padding(kernel_size, 1), 0),
|
758 |
+
)
|
759 |
+
),
|
760 |
+
norm_f(
|
761 |
+
Conv2d(
|
762 |
+
32,
|
763 |
+
128,
|
764 |
+
(kernel_size, 1),
|
765 |
+
(stride, 1),
|
766 |
+
padding=(get_padding(kernel_size, 1), 0),
|
767 |
+
)
|
768 |
+
),
|
769 |
+
norm_f(
|
770 |
+
Conv2d(
|
771 |
+
128,
|
772 |
+
512,
|
773 |
+
(kernel_size, 1),
|
774 |
+
(stride, 1),
|
775 |
+
padding=(get_padding(kernel_size, 1), 0),
|
776 |
+
)
|
777 |
+
),
|
778 |
+
norm_f(
|
779 |
+
Conv2d(
|
780 |
+
512,
|
781 |
+
1024,
|
782 |
+
(kernel_size, 1),
|
783 |
+
(stride, 1),
|
784 |
+
padding=(get_padding(kernel_size, 1), 0),
|
785 |
+
)
|
786 |
+
),
|
787 |
+
norm_f(
|
788 |
+
Conv2d(
|
789 |
+
1024,
|
790 |
+
1024,
|
791 |
+
(kernel_size, 1),
|
792 |
+
1,
|
793 |
+
padding=(get_padding(kernel_size, 1), 0),
|
794 |
+
)
|
795 |
+
),
|
796 |
+
]
|
797 |
+
)
|
798 |
+
self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0)))
|
799 |
+
|
800 |
+
def forward(self, x):
|
801 |
+
fmap = []
|
802 |
+
|
803 |
+
# 1d to 2d
|
804 |
+
b, c, t = x.shape
|
805 |
+
if t % self.period != 0: # pad first
|
806 |
+
n_pad = self.period - (t % self.period)
|
807 |
+
x = F.pad(x, (0, n_pad), "reflect")
|
808 |
+
t = t + n_pad
|
809 |
+
x = x.view(b, c, t // self.period, self.period)
|
810 |
+
|
811 |
+
for l in self.convs:
|
812 |
+
x = l(x)
|
813 |
+
x = F.leaky_relu(x, modules.LRELU_SLOPE)
|
814 |
+
fmap.append(x)
|
815 |
+
x = self.conv_post(x)
|
816 |
+
fmap.append(x)
|
817 |
+
x = torch.flatten(x, 1, -1)
|
818 |
+
|
819 |
+
return x, fmap
|
lib/infer_pack/modules.py
ADDED
@@ -0,0 +1,522 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import copy
|
2 |
+
import math
|
3 |
+
import numpy as np
|
4 |
+
import scipy
|
5 |
+
import torch
|
6 |
+
from torch import nn
|
7 |
+
from torch.nn import functional as F
|
8 |
+
|
9 |
+
from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d
|
10 |
+
from torch.nn.utils import weight_norm, remove_weight_norm
|
11 |
+
|
12 |
+
from lib.infer_pack import commons
|
13 |
+
from lib.infer_pack.commons import init_weights, get_padding
|
14 |
+
from lib.infer_pack.transforms import piecewise_rational_quadratic_transform
|
15 |
+
|
16 |
+
|
17 |
+
LRELU_SLOPE = 0.1
|
18 |
+
|
19 |
+
|
20 |
+
class LayerNorm(nn.Module):
|
21 |
+
def __init__(self, channels, eps=1e-5):
|
22 |
+
super().__init__()
|
23 |
+
self.channels = channels
|
24 |
+
self.eps = eps
|
25 |
+
|
26 |
+
self.gamma = nn.Parameter(torch.ones(channels))
|
27 |
+
self.beta = nn.Parameter(torch.zeros(channels))
|
28 |
+
|
29 |
+
def forward(self, x):
|
30 |
+
x = x.transpose(1, -1)
|
31 |
+
x = F.layer_norm(x, (self.channels,), self.gamma, self.beta, self.eps)
|
32 |
+
return x.transpose(1, -1)
|
33 |
+
|
34 |
+
|
35 |
+
class ConvReluNorm(nn.Module):
|
36 |
+
def __init__(
|
37 |
+
self,
|
38 |
+
in_channels,
|
39 |
+
hidden_channels,
|
40 |
+
out_channels,
|
41 |
+
kernel_size,
|
42 |
+
n_layers,
|
43 |
+
p_dropout,
|
44 |
+
):
|
45 |
+
super().__init__()
|
46 |
+
self.in_channels = in_channels
|
47 |
+
self.hidden_channels = hidden_channels
|
48 |
+
self.out_channels = out_channels
|
49 |
+
self.kernel_size = kernel_size
|
50 |
+
self.n_layers = n_layers
|
51 |
+
self.p_dropout = p_dropout
|
52 |
+
assert n_layers > 1, "Number of layers should be larger than 0."
|
53 |
+
|
54 |
+
self.conv_layers = nn.ModuleList()
|
55 |
+
self.norm_layers = nn.ModuleList()
|
56 |
+
self.conv_layers.append(
|
57 |
+
nn.Conv1d(
|
58 |
+
in_channels, hidden_channels, kernel_size, padding=kernel_size // 2
|
59 |
+
)
|
60 |
+
)
|
61 |
+
self.norm_layers.append(LayerNorm(hidden_channels))
|
62 |
+
self.relu_drop = nn.Sequential(nn.ReLU(), nn.Dropout(p_dropout))
|
63 |
+
for _ in range(n_layers - 1):
|
64 |
+
self.conv_layers.append(
|
65 |
+
nn.Conv1d(
|
66 |
+
hidden_channels,
|
67 |
+
hidden_channels,
|
68 |
+
kernel_size,
|
69 |
+
padding=kernel_size // 2,
|
70 |
+
)
|
71 |
+
)
|
72 |
+
self.norm_layers.append(LayerNorm(hidden_channels))
|
73 |
+
self.proj = nn.Conv1d(hidden_channels, out_channels, 1)
|
74 |
+
self.proj.weight.data.zero_()
|
75 |
+
self.proj.bias.data.zero_()
|
76 |
+
|
77 |
+
def forward(self, x, x_mask):
|
78 |
+
x_org = x
|
79 |
+
for i in range(self.n_layers):
|
80 |
+
x = self.conv_layers[i](x * x_mask)
|
81 |
+
x = self.norm_layers[i](x)
|
82 |
+
x = self.relu_drop(x)
|
83 |
+
x = x_org + self.proj(x)
|
84 |
+
return x * x_mask
|
85 |
+
|
86 |
+
|
87 |
+
class DDSConv(nn.Module):
|
88 |
+
"""
|
89 |
+
Dialted and Depth-Separable Convolution
|
90 |
+
"""
|
91 |
+
|
92 |
+
def __init__(self, channels, kernel_size, n_layers, p_dropout=0.0):
|
93 |
+
super().__init__()
|
94 |
+
self.channels = channels
|
95 |
+
self.kernel_size = kernel_size
|
96 |
+
self.n_layers = n_layers
|
97 |
+
self.p_dropout = p_dropout
|
98 |
+
|
99 |
+
self.drop = nn.Dropout(p_dropout)
|
100 |
+
self.convs_sep = nn.ModuleList()
|
101 |
+
self.convs_1x1 = nn.ModuleList()
|
102 |
+
self.norms_1 = nn.ModuleList()
|
103 |
+
self.norms_2 = nn.ModuleList()
|
104 |
+
for i in range(n_layers):
|
105 |
+
dilation = kernel_size**i
|
106 |
+
padding = (kernel_size * dilation - dilation) // 2
|
107 |
+
self.convs_sep.append(
|
108 |
+
nn.Conv1d(
|
109 |
+
channels,
|
110 |
+
channels,
|
111 |
+
kernel_size,
|
112 |
+
groups=channels,
|
113 |
+
dilation=dilation,
|
114 |
+
padding=padding,
|
115 |
+
)
|
116 |
+
)
|
117 |
+
self.convs_1x1.append(nn.Conv1d(channels, channels, 1))
|
118 |
+
self.norms_1.append(LayerNorm(channels))
|
119 |
+
self.norms_2.append(LayerNorm(channels))
|
120 |
+
|
121 |
+
def forward(self, x, x_mask, g=None):
|
122 |
+
if g is not None:
|
123 |
+
x = x + g
|
124 |
+
for i in range(self.n_layers):
|
125 |
+
y = self.convs_sep[i](x * x_mask)
|
126 |
+
y = self.norms_1[i](y)
|
127 |
+
y = F.gelu(y)
|
128 |
+
y = self.convs_1x1[i](y)
|
129 |
+
y = self.norms_2[i](y)
|
130 |
+
y = F.gelu(y)
|
131 |
+
y = self.drop(y)
|
132 |
+
x = x + y
|
133 |
+
return x * x_mask
|
134 |
+
|
135 |
+
|
136 |
+
class WN(torch.nn.Module):
|
137 |
+
def __init__(
|
138 |
+
self,
|
139 |
+
hidden_channels,
|
140 |
+
kernel_size,
|
141 |
+
dilation_rate,
|
142 |
+
n_layers,
|
143 |
+
gin_channels=0,
|
144 |
+
p_dropout=0,
|
145 |
+
):
|
146 |
+
super(WN, self).__init__()
|
147 |
+
assert kernel_size % 2 == 1
|
148 |
+
self.hidden_channels = hidden_channels
|
149 |
+
self.kernel_size = (kernel_size,)
|
150 |
+
self.dilation_rate = dilation_rate
|
151 |
+
self.n_layers = n_layers
|
152 |
+
self.gin_channels = gin_channels
|
153 |
+
self.p_dropout = p_dropout
|
154 |
+
|
155 |
+
self.in_layers = torch.nn.ModuleList()
|
156 |
+
self.res_skip_layers = torch.nn.ModuleList()
|
157 |
+
self.drop = nn.Dropout(p_dropout)
|
158 |
+
|
159 |
+
if gin_channels != 0:
|
160 |
+
cond_layer = torch.nn.Conv1d(
|
161 |
+
gin_channels, 2 * hidden_channels * n_layers, 1
|
162 |
+
)
|
163 |
+
self.cond_layer = torch.nn.utils.weight_norm(cond_layer, name="weight")
|
164 |
+
|
165 |
+
for i in range(n_layers):
|
166 |
+
dilation = dilation_rate**i
|
167 |
+
padding = int((kernel_size * dilation - dilation) / 2)
|
168 |
+
in_layer = torch.nn.Conv1d(
|
169 |
+
hidden_channels,
|
170 |
+
2 * hidden_channels,
|
171 |
+
kernel_size,
|
172 |
+
dilation=dilation,
|
173 |
+
padding=padding,
|
174 |
+
)
|
175 |
+
in_layer = torch.nn.utils.weight_norm(in_layer, name="weight")
|
176 |
+
self.in_layers.append(in_layer)
|
177 |
+
|
178 |
+
# last one is not necessary
|
179 |
+
if i < n_layers - 1:
|
180 |
+
res_skip_channels = 2 * hidden_channels
|
181 |
+
else:
|
182 |
+
res_skip_channels = hidden_channels
|
183 |
+
|
184 |
+
res_skip_layer = torch.nn.Conv1d(hidden_channels, res_skip_channels, 1)
|
185 |
+
res_skip_layer = torch.nn.utils.weight_norm(res_skip_layer, name="weight")
|
186 |
+
self.res_skip_layers.append(res_skip_layer)
|
187 |
+
|
188 |
+
def forward(self, x, x_mask, g=None, **kwargs):
|
189 |
+
output = torch.zeros_like(x)
|
190 |
+
n_channels_tensor = torch.IntTensor([self.hidden_channels])
|
191 |
+
|
192 |
+
if g is not None:
|
193 |
+
g = self.cond_layer(g)
|
194 |
+
|
195 |
+
for i in range(self.n_layers):
|
196 |
+
x_in = self.in_layers[i](x)
|
197 |
+
if g is not None:
|
198 |
+
cond_offset = i * 2 * self.hidden_channels
|
199 |
+
g_l = g[:, cond_offset : cond_offset + 2 * self.hidden_channels, :]
|
200 |
+
else:
|
201 |
+
g_l = torch.zeros_like(x_in)
|
202 |
+
|
203 |
+
acts = commons.fused_add_tanh_sigmoid_multiply(x_in, g_l, n_channels_tensor)
|
204 |
+
acts = self.drop(acts)
|
205 |
+
|
206 |
+
res_skip_acts = self.res_skip_layers[i](acts)
|
207 |
+
if i < self.n_layers - 1:
|
208 |
+
res_acts = res_skip_acts[:, : self.hidden_channels, :]
|
209 |
+
x = (x + res_acts) * x_mask
|
210 |
+
output = output + res_skip_acts[:, self.hidden_channels :, :]
|
211 |
+
else:
|
212 |
+
output = output + res_skip_acts
|
213 |
+
return output * x_mask
|
214 |
+
|
215 |
+
def remove_weight_norm(self):
|
216 |
+
if self.gin_channels != 0:
|
217 |
+
torch.nn.utils.remove_weight_norm(self.cond_layer)
|
218 |
+
for l in self.in_layers:
|
219 |
+
torch.nn.utils.remove_weight_norm(l)
|
220 |
+
for l in self.res_skip_layers:
|
221 |
+
torch.nn.utils.remove_weight_norm(l)
|
222 |
+
|
223 |
+
|
224 |
+
class ResBlock1(torch.nn.Module):
|
225 |
+
def __init__(self, channels, kernel_size=3, dilation=(1, 3, 5)):
|
226 |
+
super(ResBlock1, self).__init__()
|
227 |
+
self.convs1 = nn.ModuleList(
|
228 |
+
[
|
229 |
+
weight_norm(
|
230 |
+
Conv1d(
|
231 |
+
channels,
|
232 |
+
channels,
|
233 |
+
kernel_size,
|
234 |
+
1,
|
235 |
+
dilation=dilation[0],
|
236 |
+
padding=get_padding(kernel_size, dilation[0]),
|
237 |
+
)
|
238 |
+
),
|
239 |
+
weight_norm(
|
240 |
+
Conv1d(
|
241 |
+
channels,
|
242 |
+
channels,
|
243 |
+
kernel_size,
|
244 |
+
1,
|
245 |
+
dilation=dilation[1],
|
246 |
+
padding=get_padding(kernel_size, dilation[1]),
|
247 |
+
)
|
248 |
+
),
|
249 |
+
weight_norm(
|
250 |
+
Conv1d(
|
251 |
+
channels,
|
252 |
+
channels,
|
253 |
+
kernel_size,
|
254 |
+
1,
|
255 |
+
dilation=dilation[2],
|
256 |
+
padding=get_padding(kernel_size, dilation[2]),
|
257 |
+
)
|
258 |
+
),
|
259 |
+
]
|
260 |
+
)
|
261 |
+
self.convs1.apply(init_weights)
|
262 |
+
|
263 |
+
self.convs2 = nn.ModuleList(
|
264 |
+
[
|
265 |
+
weight_norm(
|
266 |
+
Conv1d(
|
267 |
+
channels,
|
268 |
+
channels,
|
269 |
+
kernel_size,
|
270 |
+
1,
|
271 |
+
dilation=1,
|
272 |
+
padding=get_padding(kernel_size, 1),
|
273 |
+
)
|
274 |
+
),
|
275 |
+
weight_norm(
|
276 |
+
Conv1d(
|
277 |
+
channels,
|
278 |
+
channels,
|
279 |
+
kernel_size,
|
280 |
+
1,
|
281 |
+
dilation=1,
|
282 |
+
padding=get_padding(kernel_size, 1),
|
283 |
+
)
|
284 |
+
),
|
285 |
+
weight_norm(
|
286 |
+
Conv1d(
|
287 |
+
channels,
|
288 |
+
channels,
|
289 |
+
kernel_size,
|
290 |
+
1,
|
291 |
+
dilation=1,
|
292 |
+
padding=get_padding(kernel_size, 1),
|
293 |
+
)
|
294 |
+
),
|
295 |
+
]
|
296 |
+
)
|
297 |
+
self.convs2.apply(init_weights)
|
298 |
+
|
299 |
+
def forward(self, x, x_mask=None):
|
300 |
+
for c1, c2 in zip(self.convs1, self.convs2):
|
301 |
+
xt = F.leaky_relu(x, LRELU_SLOPE)
|
302 |
+
if x_mask is not None:
|
303 |
+
xt = xt * x_mask
|
304 |
+
xt = c1(xt)
|
305 |
+
xt = F.leaky_relu(xt, LRELU_SLOPE)
|
306 |
+
if x_mask is not None:
|
307 |
+
xt = xt * x_mask
|
308 |
+
xt = c2(xt)
|
309 |
+
x = xt + x
|
310 |
+
if x_mask is not None:
|
311 |
+
x = x * x_mask
|
312 |
+
return x
|
313 |
+
|
314 |
+
def remove_weight_norm(self):
|
315 |
+
for l in self.convs1:
|
316 |
+
remove_weight_norm(l)
|
317 |
+
for l in self.convs2:
|
318 |
+
remove_weight_norm(l)
|
319 |
+
|
320 |
+
|
321 |
+
class ResBlock2(torch.nn.Module):
|
322 |
+
def __init__(self, channels, kernel_size=3, dilation=(1, 3)):
|
323 |
+
super(ResBlock2, self).__init__()
|
324 |
+
self.convs = nn.ModuleList(
|
325 |
+
[
|
326 |
+
weight_norm(
|
327 |
+
Conv1d(
|
328 |
+
channels,
|
329 |
+
channels,
|
330 |
+
kernel_size,
|
331 |
+
1,
|
332 |
+
dilation=dilation[0],
|
333 |
+
padding=get_padding(kernel_size, dilation[0]),
|
334 |
+
)
|
335 |
+
),
|
336 |
+
weight_norm(
|
337 |
+
Conv1d(
|
338 |
+
channels,
|
339 |
+
channels,
|
340 |
+
kernel_size,
|
341 |
+
1,
|
342 |
+
dilation=dilation[1],
|
343 |
+
padding=get_padding(kernel_size, dilation[1]),
|
344 |
+
)
|
345 |
+
),
|
346 |
+
]
|
347 |
+
)
|
348 |
+
self.convs.apply(init_weights)
|
349 |
+
|
350 |
+
def forward(self, x, x_mask=None):
|
351 |
+
for c in self.convs:
|
352 |
+
xt = F.leaky_relu(x, LRELU_SLOPE)
|
353 |
+
if x_mask is not None:
|
354 |
+
xt = xt * x_mask
|
355 |
+
xt = c(xt)
|
356 |
+
x = xt + x
|
357 |
+
if x_mask is not None:
|
358 |
+
x = x * x_mask
|
359 |
+
return x
|
360 |
+
|
361 |
+
def remove_weight_norm(self):
|
362 |
+
for l in self.convs:
|
363 |
+
remove_weight_norm(l)
|
364 |
+
|
365 |
+
|
366 |
+
class Log(nn.Module):
|
367 |
+
def forward(self, x, x_mask, reverse=False, **kwargs):
|
368 |
+
if not reverse:
|
369 |
+
y = torch.log(torch.clamp_min(x, 1e-5)) * x_mask
|
370 |
+
logdet = torch.sum(-y, [1, 2])
|
371 |
+
return y, logdet
|
372 |
+
else:
|
373 |
+
x = torch.exp(x) * x_mask
|
374 |
+
return x
|
375 |
+
|
376 |
+
|
377 |
+
class Flip(nn.Module):
|
378 |
+
def forward(self, x, *args, reverse=False, **kwargs):
|
379 |
+
x = torch.flip(x, [1])
|
380 |
+
if not reverse:
|
381 |
+
logdet = torch.zeros(x.size(0)).to(dtype=x.dtype, device=x.device)
|
382 |
+
return x, logdet
|
383 |
+
else:
|
384 |
+
return x
|
385 |
+
|
386 |
+
|
387 |
+
class ElementwiseAffine(nn.Module):
|
388 |
+
def __init__(self, channels):
|
389 |
+
super().__init__()
|
390 |
+
self.channels = channels
|
391 |
+
self.m = nn.Parameter(torch.zeros(channels, 1))
|
392 |
+
self.logs = nn.Parameter(torch.zeros(channels, 1))
|
393 |
+
|
394 |
+
def forward(self, x, x_mask, reverse=False, **kwargs):
|
395 |
+
if not reverse:
|
396 |
+
y = self.m + torch.exp(self.logs) * x
|
397 |
+
y = y * x_mask
|
398 |
+
logdet = torch.sum(self.logs * x_mask, [1, 2])
|
399 |
+
return y, logdet
|
400 |
+
else:
|
401 |
+
x = (x - self.m) * torch.exp(-self.logs) * x_mask
|
402 |
+
return x
|
403 |
+
|
404 |
+
|
405 |
+
class ResidualCouplingLayer(nn.Module):
|
406 |
+
def __init__(
|
407 |
+
self,
|
408 |
+
channels,
|
409 |
+
hidden_channels,
|
410 |
+
kernel_size,
|
411 |
+
dilation_rate,
|
412 |
+
n_layers,
|
413 |
+
p_dropout=0,
|
414 |
+
gin_channels=0,
|
415 |
+
mean_only=False,
|
416 |
+
):
|
417 |
+
assert channels % 2 == 0, "channels should be divisible by 2"
|
418 |
+
super().__init__()
|
419 |
+
self.channels = channels
|
420 |
+
self.hidden_channels = hidden_channels
|
421 |
+
self.kernel_size = kernel_size
|
422 |
+
self.dilation_rate = dilation_rate
|
423 |
+
self.n_layers = n_layers
|
424 |
+
self.half_channels = channels // 2
|
425 |
+
self.mean_only = mean_only
|
426 |
+
|
427 |
+
self.pre = nn.Conv1d(self.half_channels, hidden_channels, 1)
|
428 |
+
self.enc = WN(
|
429 |
+
hidden_channels,
|
430 |
+
kernel_size,
|
431 |
+
dilation_rate,
|
432 |
+
n_layers,
|
433 |
+
p_dropout=p_dropout,
|
434 |
+
gin_channels=gin_channels,
|
435 |
+
)
|
436 |
+
self.post = nn.Conv1d(hidden_channels, self.half_channels * (2 - mean_only), 1)
|
437 |
+
self.post.weight.data.zero_()
|
438 |
+
self.post.bias.data.zero_()
|
439 |
+
|
440 |
+
def forward(self, x, x_mask, g=None, reverse=False):
|
441 |
+
x0, x1 = torch.split(x, [self.half_channels] * 2, 1)
|
442 |
+
h = self.pre(x0) * x_mask
|
443 |
+
h = self.enc(h, x_mask, g=g)
|
444 |
+
stats = self.post(h) * x_mask
|
445 |
+
if not self.mean_only:
|
446 |
+
m, logs = torch.split(stats, [self.half_channels] * 2, 1)
|
447 |
+
else:
|
448 |
+
m = stats
|
449 |
+
logs = torch.zeros_like(m)
|
450 |
+
|
451 |
+
if not reverse:
|
452 |
+
x1 = m + x1 * torch.exp(logs) * x_mask
|
453 |
+
x = torch.cat([x0, x1], 1)
|
454 |
+
logdet = torch.sum(logs, [1, 2])
|
455 |
+
return x, logdet
|
456 |
+
else:
|
457 |
+
x1 = (x1 - m) * torch.exp(-logs) * x_mask
|
458 |
+
x = torch.cat([x0, x1], 1)
|
459 |
+
return x
|
460 |
+
|
461 |
+
def remove_weight_norm(self):
|
462 |
+
self.enc.remove_weight_norm()
|
463 |
+
|
464 |
+
|
465 |
+
class ConvFlow(nn.Module):
|
466 |
+
def __init__(
|
467 |
+
self,
|
468 |
+
in_channels,
|
469 |
+
filter_channels,
|
470 |
+
kernel_size,
|
471 |
+
n_layers,
|
472 |
+
num_bins=10,
|
473 |
+
tail_bound=5.0,
|
474 |
+
):
|
475 |
+
super().__init__()
|
476 |
+
self.in_channels = in_channels
|
477 |
+
self.filter_channels = filter_channels
|
478 |
+
self.kernel_size = kernel_size
|
479 |
+
self.n_layers = n_layers
|
480 |
+
self.num_bins = num_bins
|
481 |
+
self.tail_bound = tail_bound
|
482 |
+
self.half_channels = in_channels // 2
|
483 |
+
|
484 |
+
self.pre = nn.Conv1d(self.half_channels, filter_channels, 1)
|
485 |
+
self.convs = DDSConv(filter_channels, kernel_size, n_layers, p_dropout=0.0)
|
486 |
+
self.proj = nn.Conv1d(
|
487 |
+
filter_channels, self.half_channels * (num_bins * 3 - 1), 1
|
488 |
+
)
|
489 |
+
self.proj.weight.data.zero_()
|
490 |
+
self.proj.bias.data.zero_()
|
491 |
+
|
492 |
+
def forward(self, x, x_mask, g=None, reverse=False):
|
493 |
+
x0, x1 = torch.split(x, [self.half_channels] * 2, 1)
|
494 |
+
h = self.pre(x0)
|
495 |
+
h = self.convs(h, x_mask, g=g)
|
496 |
+
h = self.proj(h) * x_mask
|
497 |
+
|
498 |
+
b, c, t = x0.shape
|
499 |
+
h = h.reshape(b, c, -1, t).permute(0, 1, 3, 2) # [b, cx?, t] -> [b, c, t, ?]
|
500 |
+
|
501 |
+
unnormalized_widths = h[..., : self.num_bins] / math.sqrt(self.filter_channels)
|
502 |
+
unnormalized_heights = h[..., self.num_bins : 2 * self.num_bins] / math.sqrt(
|
503 |
+
self.filter_channels
|
504 |
+
)
|
505 |
+
unnormalized_derivatives = h[..., 2 * self.num_bins :]
|
506 |
+
|
507 |
+
x1, logabsdet = piecewise_rational_quadratic_transform(
|
508 |
+
x1,
|
509 |
+
unnormalized_widths,
|
510 |
+
unnormalized_heights,
|
511 |
+
unnormalized_derivatives,
|
512 |
+
inverse=reverse,
|
513 |
+
tails="linear",
|
514 |
+
tail_bound=self.tail_bound,
|
515 |
+
)
|
516 |
+
|
517 |
+
x = torch.cat([x0, x1], 1) * x_mask
|
518 |
+
logdet = torch.sum(logabsdet * x_mask, [1, 2])
|
519 |
+
if not reverse:
|
520 |
+
return x, logdet
|
521 |
+
else:
|
522 |
+
return x
|
lib/infer_pack/modules/F0Predictor/DioF0Predictor.py
ADDED
@@ -0,0 +1,90 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from lib.infer_pack.modules.F0Predictor.F0Predictor import F0Predictor
|
2 |
+
import pyworld
|
3 |
+
import numpy as np
|
4 |
+
|
5 |
+
|
6 |
+
class DioF0Predictor(F0Predictor):
|
7 |
+
def __init__(self, hop_length=512, f0_min=50, f0_max=1100, sampling_rate=44100):
|
8 |
+
self.hop_length = hop_length
|
9 |
+
self.f0_min = f0_min
|
10 |
+
self.f0_max = f0_max
|
11 |
+
self.sampling_rate = sampling_rate
|
12 |
+
|
13 |
+
def interpolate_f0(self, f0):
|
14 |
+
"""
|
15 |
+
对F0进行插值处理
|
16 |
+
"""
|
17 |
+
|
18 |
+
data = np.reshape(f0, (f0.size, 1))
|
19 |
+
|
20 |
+
vuv_vector = np.zeros((data.size, 1), dtype=np.float32)
|
21 |
+
vuv_vector[data > 0.0] = 1.0
|
22 |
+
vuv_vector[data <= 0.0] = 0.0
|
23 |
+
|
24 |
+
ip_data = data
|
25 |
+
|
26 |
+
frame_number = data.size
|
27 |
+
last_value = 0.0
|
28 |
+
for i in range(frame_number):
|
29 |
+
if data[i] <= 0.0:
|
30 |
+
j = i + 1
|
31 |
+
for j in range(i + 1, frame_number):
|
32 |
+
if data[j] > 0.0:
|
33 |
+
break
|
34 |
+
if j < frame_number - 1:
|
35 |
+
if last_value > 0.0:
|
36 |
+
step = (data[j] - data[i - 1]) / float(j - i)
|
37 |
+
for k in range(i, j):
|
38 |
+
ip_data[k] = data[i - 1] + step * (k - i + 1)
|
39 |
+
else:
|
40 |
+
for k in range(i, j):
|
41 |
+
ip_data[k] = data[j]
|
42 |
+
else:
|
43 |
+
for k in range(i, frame_number):
|
44 |
+
ip_data[k] = last_value
|
45 |
+
else:
|
46 |
+
ip_data[i] = data[i] # 这里可能存在一个没有必要的拷贝
|
47 |
+
last_value = data[i]
|
48 |
+
|
49 |
+
return ip_data[:, 0], vuv_vector[:, 0]
|
50 |
+
|
51 |
+
def resize_f0(self, x, target_len):
|
52 |
+
source = np.array(x)
|
53 |
+
source[source < 0.001] = np.nan
|
54 |
+
target = np.interp(
|
55 |
+
np.arange(0, len(source) * target_len, len(source)) / target_len,
|
56 |
+
np.arange(0, len(source)),
|
57 |
+
source,
|
58 |
+
)
|
59 |
+
res = np.nan_to_num(target)
|
60 |
+
return res
|
61 |
+
|
62 |
+
def compute_f0(self, wav, p_len=None):
|
63 |
+
if p_len is None:
|
64 |
+
p_len = wav.shape[0] // self.hop_length
|
65 |
+
f0, t = pyworld.dio(
|
66 |
+
wav.astype(np.double),
|
67 |
+
fs=self.sampling_rate,
|
68 |
+
f0_floor=self.f0_min,
|
69 |
+
f0_ceil=self.f0_max,
|
70 |
+
frame_period=1000 * self.hop_length / self.sampling_rate,
|
71 |
+
)
|
72 |
+
f0 = pyworld.stonemask(wav.astype(np.double), f0, t, self.sampling_rate)
|
73 |
+
for index, pitch in enumerate(f0):
|
74 |
+
f0[index] = round(pitch, 1)
|
75 |
+
return self.interpolate_f0(self.resize_f0(f0, p_len))[0]
|
76 |
+
|
77 |
+
def compute_f0_uv(self, wav, p_len=None):
|
78 |
+
if p_len is None:
|
79 |
+
p_len = wav.shape[0] // self.hop_length
|
80 |
+
f0, t = pyworld.dio(
|
81 |
+
wav.astype(np.double),
|
82 |
+
fs=self.sampling_rate,
|
83 |
+
f0_floor=self.f0_min,
|
84 |
+
f0_ceil=self.f0_max,
|
85 |
+
frame_period=1000 * self.hop_length / self.sampling_rate,
|
86 |
+
)
|
87 |
+
f0 = pyworld.stonemask(wav.astype(np.double), f0, t, self.sampling_rate)
|
88 |
+
for index, pitch in enumerate(f0):
|
89 |
+
f0[index] = round(pitch, 1)
|
90 |
+
return self.interpolate_f0(self.resize_f0(f0, p_len))
|
lib/infer_pack/modules/F0Predictor/F0Predictor.py
ADDED
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
class F0Predictor(object):
|
2 |
+
def compute_f0(self, wav, p_len):
|
3 |
+
"""
|
4 |
+
input: wav:[signal_length]
|
5 |
+
p_len:int
|
6 |
+
output: f0:[signal_length//hop_length]
|
7 |
+
"""
|
8 |
+
pass
|
9 |
+
|
10 |
+
def compute_f0_uv(self, wav, p_len):
|
11 |
+
"""
|
12 |
+
input: wav:[signal_length]
|
13 |
+
p_len:int
|
14 |
+
output: f0:[signal_length//hop_length],uv:[signal_length//hop_length]
|
15 |
+
"""
|
16 |
+
pass
|
lib/infer_pack/modules/F0Predictor/HarvestF0Predictor.py
ADDED
@@ -0,0 +1,86 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from lib.infer_pack.modules.F0Predictor.F0Predictor import F0Predictor
|
2 |
+
import pyworld
|
3 |
+
import numpy as np
|
4 |
+
|
5 |
+
|
6 |
+
class HarvestF0Predictor(F0Predictor):
|
7 |
+
def __init__(self, hop_length=512, f0_min=50, f0_max=1100, sampling_rate=44100):
|
8 |
+
self.hop_length = hop_length
|
9 |
+
self.f0_min = f0_min
|
10 |
+
self.f0_max = f0_max
|
11 |
+
self.sampling_rate = sampling_rate
|
12 |
+
|
13 |
+
def interpolate_f0(self, f0):
|
14 |
+
"""
|
15 |
+
对F0进行插值处理
|
16 |
+
"""
|
17 |
+
|
18 |
+
data = np.reshape(f0, (f0.size, 1))
|
19 |
+
|
20 |
+
vuv_vector = np.zeros((data.size, 1), dtype=np.float32)
|
21 |
+
vuv_vector[data > 0.0] = 1.0
|
22 |
+
vuv_vector[data <= 0.0] = 0.0
|
23 |
+
|
24 |
+
ip_data = data
|
25 |
+
|
26 |
+
frame_number = data.size
|
27 |
+
last_value = 0.0
|
28 |
+
for i in range(frame_number):
|
29 |
+
if data[i] <= 0.0:
|
30 |
+
j = i + 1
|
31 |
+
for j in range(i + 1, frame_number):
|
32 |
+
if data[j] > 0.0:
|
33 |
+
break
|
34 |
+
if j < frame_number - 1:
|
35 |
+
if last_value > 0.0:
|
36 |
+
step = (data[j] - data[i - 1]) / float(j - i)
|
37 |
+
for k in range(i, j):
|
38 |
+
ip_data[k] = data[i - 1] + step * (k - i + 1)
|
39 |
+
else:
|
40 |
+
for k in range(i, j):
|
41 |
+
ip_data[k] = data[j]
|
42 |
+
else:
|
43 |
+
for k in range(i, frame_number):
|
44 |
+
ip_data[k] = last_value
|
45 |
+
else:
|
46 |
+
ip_data[i] = data[i] # 这里可能存在一个没有必要的拷贝
|
47 |
+
last_value = data[i]
|
48 |
+
|
49 |
+
return ip_data[:, 0], vuv_vector[:, 0]
|
50 |
+
|
51 |
+
def resize_f0(self, x, target_len):
|
52 |
+
source = np.array(x)
|
53 |
+
source[source < 0.001] = np.nan
|
54 |
+
target = np.interp(
|
55 |
+
np.arange(0, len(source) * target_len, len(source)) / target_len,
|
56 |
+
np.arange(0, len(source)),
|
57 |
+
source,
|
58 |
+
)
|
59 |
+
res = np.nan_to_num(target)
|
60 |
+
return res
|
61 |
+
|
62 |
+
def compute_f0(self, wav, p_len=None):
|
63 |
+
if p_len is None:
|
64 |
+
p_len = wav.shape[0] // self.hop_length
|
65 |
+
f0, t = pyworld.harvest(
|
66 |
+
wav.astype(np.double),
|
67 |
+
fs=self.hop_length,
|
68 |
+
f0_ceil=self.f0_max,
|
69 |
+
f0_floor=self.f0_min,
|
70 |
+
frame_period=1000 * self.hop_length / self.sampling_rate,
|
71 |
+
)
|
72 |
+
f0 = pyworld.stonemask(wav.astype(np.double), f0, t, self.fs)
|
73 |
+
return self.interpolate_f0(self.resize_f0(f0, p_len))[0]
|
74 |
+
|
75 |
+
def compute_f0_uv(self, wav, p_len=None):
|
76 |
+
if p_len is None:
|
77 |
+
p_len = wav.shape[0] // self.hop_length
|
78 |
+
f0, t = pyworld.harvest(
|
79 |
+
wav.astype(np.double),
|
80 |
+
fs=self.sampling_rate,
|
81 |
+
f0_floor=self.f0_min,
|
82 |
+
f0_ceil=self.f0_max,
|
83 |
+
frame_period=1000 * self.hop_length / self.sampling_rate,
|
84 |
+
)
|
85 |
+
f0 = pyworld.stonemask(wav.astype(np.double), f0, t, self.sampling_rate)
|
86 |
+
return self.interpolate_f0(self.resize_f0(f0, p_len))
|
lib/infer_pack/modules/F0Predictor/PMF0Predictor.py
ADDED
@@ -0,0 +1,97 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from lib.infer_pack.modules.F0Predictor.F0Predictor import F0Predictor
|
2 |
+
import parselmouth
|
3 |
+
import numpy as np
|
4 |
+
|
5 |
+
|
6 |
+
class PMF0Predictor(F0Predictor):
|
7 |
+
def __init__(self, hop_length=512, f0_min=50, f0_max=1100, sampling_rate=44100):
|
8 |
+
self.hop_length = hop_length
|
9 |
+
self.f0_min = f0_min
|
10 |
+
self.f0_max = f0_max
|
11 |
+
self.sampling_rate = sampling_rate
|
12 |
+
|
13 |
+
def interpolate_f0(self, f0):
|
14 |
+
"""
|
15 |
+
对F0进行插值处理
|
16 |
+
"""
|
17 |
+
|
18 |
+
data = np.reshape(f0, (f0.size, 1))
|
19 |
+
|
20 |
+
vuv_vector = np.zeros((data.size, 1), dtype=np.float32)
|
21 |
+
vuv_vector[data > 0.0] = 1.0
|
22 |
+
vuv_vector[data <= 0.0] = 0.0
|
23 |
+
|
24 |
+
ip_data = data
|
25 |
+
|
26 |
+
frame_number = data.size
|
27 |
+
last_value = 0.0
|
28 |
+
for i in range(frame_number):
|
29 |
+
if data[i] <= 0.0:
|
30 |
+
j = i + 1
|
31 |
+
for j in range(i + 1, frame_number):
|
32 |
+
if data[j] > 0.0:
|
33 |
+
break
|
34 |
+
if j < frame_number - 1:
|
35 |
+
if last_value > 0.0:
|
36 |
+
step = (data[j] - data[i - 1]) / float(j - i)
|
37 |
+
for k in range(i, j):
|
38 |
+
ip_data[k] = data[i - 1] + step * (k - i + 1)
|
39 |
+
else:
|
40 |
+
for k in range(i, j):
|
41 |
+
ip_data[k] = data[j]
|
42 |
+
else:
|
43 |
+
for k in range(i, frame_number):
|
44 |
+
ip_data[k] = last_value
|
45 |
+
else:
|
46 |
+
ip_data[i] = data[i] # 这里可能存在一个没有必要的拷贝
|
47 |
+
last_value = data[i]
|
48 |
+
|
49 |
+
return ip_data[:, 0], vuv_vector[:, 0]
|
50 |
+
|
51 |
+
def compute_f0(self, wav, p_len=None):
|
52 |
+
x = wav
|
53 |
+
if p_len is None:
|
54 |
+
p_len = x.shape[0] // self.hop_length
|
55 |
+
else:
|
56 |
+
assert abs(p_len - x.shape[0] // self.hop_length) < 4, "pad length error"
|
57 |
+
time_step = self.hop_length / self.sampling_rate * 1000
|
58 |
+
f0 = (
|
59 |
+
parselmouth.Sound(x, self.sampling_rate)
|
60 |
+
.to_pitch_ac(
|
61 |
+
time_step=time_step / 1000,
|
62 |
+
voicing_threshold=0.6,
|
63 |
+
pitch_floor=self.f0_min,
|
64 |
+
pitch_ceiling=self.f0_max,
|
65 |
+
)
|
66 |
+
.selected_array["frequency"]
|
67 |
+
)
|
68 |
+
|
69 |
+
pad_size = (p_len - len(f0) + 1) // 2
|
70 |
+
if pad_size > 0 or p_len - len(f0) - pad_size > 0:
|
71 |
+
f0 = np.pad(f0, [[pad_size, p_len - len(f0) - pad_size]], mode="constant")
|
72 |
+
f0, uv = self.interpolate_f0(f0)
|
73 |
+
return f0
|
74 |
+
|
75 |
+
def compute_f0_uv(self, wav, p_len=None):
|
76 |
+
x = wav
|
77 |
+
if p_len is None:
|
78 |
+
p_len = x.shape[0] // self.hop_length
|
79 |
+
else:
|
80 |
+
assert abs(p_len - x.shape[0] // self.hop_length) < 4, "pad length error"
|
81 |
+
time_step = self.hop_length / self.sampling_rate * 1000
|
82 |
+
f0 = (
|
83 |
+
parselmouth.Sound(x, self.sampling_rate)
|
84 |
+
.to_pitch_ac(
|
85 |
+
time_step=time_step / 1000,
|
86 |
+
voicing_threshold=0.6,
|
87 |
+
pitch_floor=self.f0_min,
|
88 |
+
pitch_ceiling=self.f0_max,
|
89 |
+
)
|
90 |
+
.selected_array["frequency"]
|
91 |
+
)
|
92 |
+
|
93 |
+
pad_size = (p_len - len(f0) + 1) // 2
|
94 |
+
if pad_size > 0 or p_len - len(f0) - pad_size > 0:
|
95 |
+
f0 = np.pad(f0, [[pad_size, p_len - len(f0) - pad_size]], mode="constant")
|
96 |
+
f0, uv = self.interpolate_f0(f0)
|
97 |
+
return f0, uv
|
lib/infer_pack/modules/F0Predictor/__init__.py
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
|
lib/infer_pack/onnx_inference.py
ADDED
@@ -0,0 +1,145 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import onnxruntime
|
2 |
+
import librosa
|
3 |
+
import numpy as np
|
4 |
+
import soundfile
|
5 |
+
|
6 |
+
|
7 |
+
class ContentVec:
|
8 |
+
def __init__(self, vec_path="pretrained/vec-768-layer-12.onnx", device=None):
|
9 |
+
print("load model(s) from {}".format(vec_path))
|
10 |
+
if device == "cpu" or device is None:
|
11 |
+
providers = ["CPUExecutionProvider"]
|
12 |
+
elif device == "cuda":
|
13 |
+
providers = ["CUDAExecutionProvider", "CPUExecutionProvider"]
|
14 |
+
elif device == "dml":
|
15 |
+
providers = ["DmlExecutionProvider"]
|
16 |
+
else:
|
17 |
+
raise RuntimeError("Unsportted Device")
|
18 |
+
self.model = onnxruntime.InferenceSession(vec_path, providers=providers)
|
19 |
+
|
20 |
+
def __call__(self, wav):
|
21 |
+
return self.forward(wav)
|
22 |
+
|
23 |
+
def forward(self, wav):
|
24 |
+
feats = wav
|
25 |
+
if feats.ndim == 2: # double channels
|
26 |
+
feats = feats.mean(-1)
|
27 |
+
assert feats.ndim == 1, feats.ndim
|
28 |
+
feats = np.expand_dims(np.expand_dims(feats, 0), 0)
|
29 |
+
onnx_input = {self.model.get_inputs()[0].name: feats}
|
30 |
+
logits = self.model.run(None, onnx_input)[0]
|
31 |
+
return logits.transpose(0, 2, 1)
|
32 |
+
|
33 |
+
|
34 |
+
def get_f0_predictor(f0_predictor, hop_length, sampling_rate, **kargs):
|
35 |
+
if f0_predictor == "pm":
|
36 |
+
from lib.infer_pack.modules.F0Predictor.PMF0Predictor import PMF0Predictor
|
37 |
+
|
38 |
+
f0_predictor_object = PMF0Predictor(
|
39 |
+
hop_length=hop_length, sampling_rate=sampling_rate
|
40 |
+
)
|
41 |
+
elif f0_predictor == "harvest":
|
42 |
+
from lib.infer_pack.modules.F0Predictor.HarvestF0Predictor import (
|
43 |
+
HarvestF0Predictor,
|
44 |
+
)
|
45 |
+
|
46 |
+
f0_predictor_object = HarvestF0Predictor(
|
47 |
+
hop_length=hop_length, sampling_rate=sampling_rate
|
48 |
+
)
|
49 |
+
elif f0_predictor == "dio":
|
50 |
+
from lib.infer_pack.modules.F0Predictor.DioF0Predictor import DioF0Predictor
|
51 |
+
|
52 |
+
f0_predictor_object = DioF0Predictor(
|
53 |
+
hop_length=hop_length, sampling_rate=sampling_rate
|
54 |
+
)
|
55 |
+
else:
|
56 |
+
raise Exception("Unknown f0 predictor")
|
57 |
+
return f0_predictor_object
|
58 |
+
|
59 |
+
|
60 |
+
class OnnxRVC:
|
61 |
+
def __init__(
|
62 |
+
self,
|
63 |
+
model_path,
|
64 |
+
sr=40000,
|
65 |
+
hop_size=512,
|
66 |
+
vec_path="vec-768-layer-12",
|
67 |
+
device="cpu",
|
68 |
+
):
|
69 |
+
vec_path = f"pretrained/{vec_path}.onnx"
|
70 |
+
self.vec_model = ContentVec(vec_path, device)
|
71 |
+
if device == "cpu" or device is None:
|
72 |
+
providers = ["CPUExecutionProvider"]
|
73 |
+
elif device == "cuda":
|
74 |
+
providers = ["CUDAExecutionProvider", "CPUExecutionProvider"]
|
75 |
+
elif device == "dml":
|
76 |
+
providers = ["DmlExecutionProvider"]
|
77 |
+
else:
|
78 |
+
raise RuntimeError("Unsportted Device")
|
79 |
+
self.model = onnxruntime.InferenceSession(model_path, providers=providers)
|
80 |
+
self.sampling_rate = sr
|
81 |
+
self.hop_size = hop_size
|
82 |
+
|
83 |
+
def forward(self, hubert, hubert_length, pitch, pitchf, ds, rnd):
|
84 |
+
onnx_input = {
|
85 |
+
self.model.get_inputs()[0].name: hubert,
|
86 |
+
self.model.get_inputs()[1].name: hubert_length,
|
87 |
+
self.model.get_inputs()[2].name: pitch,
|
88 |
+
self.model.get_inputs()[3].name: pitchf,
|
89 |
+
self.model.get_inputs()[4].name: ds,
|
90 |
+
self.model.get_inputs()[5].name: rnd,
|
91 |
+
}
|
92 |
+
return (self.model.run(None, onnx_input)[0] * 32767).astype(np.int16)
|
93 |
+
|
94 |
+
def inference(
|
95 |
+
self,
|
96 |
+
raw_path,
|
97 |
+
sid,
|
98 |
+
f0_method="dio",
|
99 |
+
f0_up_key=0,
|
100 |
+
pad_time=0.5,
|
101 |
+
cr_threshold=0.02,
|
102 |
+
):
|
103 |
+
f0_min = 50
|
104 |
+
f0_max = 1100
|
105 |
+
f0_mel_min = 1127 * np.log(1 + f0_min / 700)
|
106 |
+
f0_mel_max = 1127 * np.log(1 + f0_max / 700)
|
107 |
+
f0_predictor = get_f0_predictor(
|
108 |
+
f0_method,
|
109 |
+
hop_length=self.hop_size,
|
110 |
+
sampling_rate=self.sampling_rate,
|
111 |
+
threshold=cr_threshold,
|
112 |
+
)
|
113 |
+
wav, sr = librosa.load(raw_path, sr=self.sampling_rate)
|
114 |
+
org_length = len(wav)
|
115 |
+
if org_length / sr > 50.0:
|
116 |
+
raise RuntimeError("Reached Max Length")
|
117 |
+
|
118 |
+
wav16k = librosa.resample(wav, orig_sr=self.sampling_rate, target_sr=16000)
|
119 |
+
wav16k = wav16k
|
120 |
+
|
121 |
+
hubert = self.vec_model(wav16k)
|
122 |
+
hubert = np.repeat(hubert, 2, axis=2).transpose(0, 2, 1).astype(np.float32)
|
123 |
+
hubert_length = hubert.shape[1]
|
124 |
+
|
125 |
+
pitchf = f0_predictor.compute_f0(wav, hubert_length)
|
126 |
+
pitchf = pitchf * 2 ** (f0_up_key / 12)
|
127 |
+
pitch = pitchf.copy()
|
128 |
+
f0_mel = 1127 * np.log(1 + pitch / 700)
|
129 |
+
f0_mel[f0_mel > 0] = (f0_mel[f0_mel > 0] - f0_mel_min) * 254 / (
|
130 |
+
f0_mel_max - f0_mel_min
|
131 |
+
) + 1
|
132 |
+
f0_mel[f0_mel <= 1] = 1
|
133 |
+
f0_mel[f0_mel > 255] = 255
|
134 |
+
pitch = np.rint(f0_mel).astype(np.int64)
|
135 |
+
|
136 |
+
pitchf = pitchf.reshape(1, len(pitchf)).astype(np.float32)
|
137 |
+
pitch = pitch.reshape(1, len(pitch))
|
138 |
+
ds = np.array([sid]).astype(np.int64)
|
139 |
+
|
140 |
+
rnd = np.random.randn(1, 192, hubert_length).astype(np.float32)
|
141 |
+
hubert_length = np.array([hubert_length]).astype(np.int64)
|
142 |
+
|
143 |
+
out_wav = self.forward(hubert, hubert_length, pitch, pitchf, ds, rnd).squeeze()
|
144 |
+
out_wav = np.pad(out_wav, (0, 2 * self.hop_size), "constant")
|
145 |
+
return out_wav[0:org_length]
|
lib/infer_pack/transforms.py
ADDED
@@ -0,0 +1,209 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
from torch.nn import functional as F
|
3 |
+
|
4 |
+
import numpy as np
|
5 |
+
|
6 |
+
|
7 |
+
DEFAULT_MIN_BIN_WIDTH = 1e-3
|
8 |
+
DEFAULT_MIN_BIN_HEIGHT = 1e-3
|
9 |
+
DEFAULT_MIN_DERIVATIVE = 1e-3
|
10 |
+
|
11 |
+
|
12 |
+
def piecewise_rational_quadratic_transform(
|
13 |
+
inputs,
|
14 |
+
unnormalized_widths,
|
15 |
+
unnormalized_heights,
|
16 |
+
unnormalized_derivatives,
|
17 |
+
inverse=False,
|
18 |
+
tails=None,
|
19 |
+
tail_bound=1.0,
|
20 |
+
min_bin_width=DEFAULT_MIN_BIN_WIDTH,
|
21 |
+
min_bin_height=DEFAULT_MIN_BIN_HEIGHT,
|
22 |
+
min_derivative=DEFAULT_MIN_DERIVATIVE,
|
23 |
+
):
|
24 |
+
if tails is None:
|
25 |
+
spline_fn = rational_quadratic_spline
|
26 |
+
spline_kwargs = {}
|
27 |
+
else:
|
28 |
+
spline_fn = unconstrained_rational_quadratic_spline
|
29 |
+
spline_kwargs = {"tails": tails, "tail_bound": tail_bound}
|
30 |
+
|
31 |
+
outputs, logabsdet = spline_fn(
|
32 |
+
inputs=inputs,
|
33 |
+
unnormalized_widths=unnormalized_widths,
|
34 |
+
unnormalized_heights=unnormalized_heights,
|
35 |
+
unnormalized_derivatives=unnormalized_derivatives,
|
36 |
+
inverse=inverse,
|
37 |
+
min_bin_width=min_bin_width,
|
38 |
+
min_bin_height=min_bin_height,
|
39 |
+
min_derivative=min_derivative,
|
40 |
+
**spline_kwargs
|
41 |
+
)
|
42 |
+
return outputs, logabsdet
|
43 |
+
|
44 |
+
|
45 |
+
def searchsorted(bin_locations, inputs, eps=1e-6):
|
46 |
+
bin_locations[..., -1] += eps
|
47 |
+
return torch.sum(inputs[..., None] >= bin_locations, dim=-1) - 1
|
48 |
+
|
49 |
+
|
50 |
+
def unconstrained_rational_quadratic_spline(
|
51 |
+
inputs,
|
52 |
+
unnormalized_widths,
|
53 |
+
unnormalized_heights,
|
54 |
+
unnormalized_derivatives,
|
55 |
+
inverse=False,
|
56 |
+
tails="linear",
|
57 |
+
tail_bound=1.0,
|
58 |
+
min_bin_width=DEFAULT_MIN_BIN_WIDTH,
|
59 |
+
min_bin_height=DEFAULT_MIN_BIN_HEIGHT,
|
60 |
+
min_derivative=DEFAULT_MIN_DERIVATIVE,
|
61 |
+
):
|
62 |
+
inside_interval_mask = (inputs >= -tail_bound) & (inputs <= tail_bound)
|
63 |
+
outside_interval_mask = ~inside_interval_mask
|
64 |
+
|
65 |
+
outputs = torch.zeros_like(inputs)
|
66 |
+
logabsdet = torch.zeros_like(inputs)
|
67 |
+
|
68 |
+
if tails == "linear":
|
69 |
+
unnormalized_derivatives = F.pad(unnormalized_derivatives, pad=(1, 1))
|
70 |
+
constant = np.log(np.exp(1 - min_derivative) - 1)
|
71 |
+
unnormalized_derivatives[..., 0] = constant
|
72 |
+
unnormalized_derivatives[..., -1] = constant
|
73 |
+
|
74 |
+
outputs[outside_interval_mask] = inputs[outside_interval_mask]
|
75 |
+
logabsdet[outside_interval_mask] = 0
|
76 |
+
else:
|
77 |
+
raise RuntimeError("{} tails are not implemented.".format(tails))
|
78 |
+
|
79 |
+
(
|
80 |
+
outputs[inside_interval_mask],
|
81 |
+
logabsdet[inside_interval_mask],
|
82 |
+
) = rational_quadratic_spline(
|
83 |
+
inputs=inputs[inside_interval_mask],
|
84 |
+
unnormalized_widths=unnormalized_widths[inside_interval_mask, :],
|
85 |
+
unnormalized_heights=unnormalized_heights[inside_interval_mask, :],
|
86 |
+
unnormalized_derivatives=unnormalized_derivatives[inside_interval_mask, :],
|
87 |
+
inverse=inverse,
|
88 |
+
left=-tail_bound,
|
89 |
+
right=tail_bound,
|
90 |
+
bottom=-tail_bound,
|
91 |
+
top=tail_bound,
|
92 |
+
min_bin_width=min_bin_width,
|
93 |
+
min_bin_height=min_bin_height,
|
94 |
+
min_derivative=min_derivative,
|
95 |
+
)
|
96 |
+
|
97 |
+
return outputs, logabsdet
|
98 |
+
|
99 |
+
|
100 |
+
def rational_quadratic_spline(
|
101 |
+
inputs,
|
102 |
+
unnormalized_widths,
|
103 |
+
unnormalized_heights,
|
104 |
+
unnormalized_derivatives,
|
105 |
+
inverse=False,
|
106 |
+
left=0.0,
|
107 |
+
right=1.0,
|
108 |
+
bottom=0.0,
|
109 |
+
top=1.0,
|
110 |
+
min_bin_width=DEFAULT_MIN_BIN_WIDTH,
|
111 |
+
min_bin_height=DEFAULT_MIN_BIN_HEIGHT,
|
112 |
+
min_derivative=DEFAULT_MIN_DERIVATIVE,
|
113 |
+
):
|
114 |
+
if torch.min(inputs) < left or torch.max(inputs) > right:
|
115 |
+
raise ValueError("Input to a transform is not within its domain")
|
116 |
+
|
117 |
+
num_bins = unnormalized_widths.shape[-1]
|
118 |
+
|
119 |
+
if min_bin_width * num_bins > 1.0:
|
120 |
+
raise ValueError("Minimal bin width too large for the number of bins")
|
121 |
+
if min_bin_height * num_bins > 1.0:
|
122 |
+
raise ValueError("Minimal bin height too large for the number of bins")
|
123 |
+
|
124 |
+
widths = F.softmax(unnormalized_widths, dim=-1)
|
125 |
+
widths = min_bin_width + (1 - min_bin_width * num_bins) * widths
|
126 |
+
cumwidths = torch.cumsum(widths, dim=-1)
|
127 |
+
cumwidths = F.pad(cumwidths, pad=(1, 0), mode="constant", value=0.0)
|
128 |
+
cumwidths = (right - left) * cumwidths + left
|
129 |
+
cumwidths[..., 0] = left
|
130 |
+
cumwidths[..., -1] = right
|
131 |
+
widths = cumwidths[..., 1:] - cumwidths[..., :-1]
|
132 |
+
|
133 |
+
derivatives = min_derivative + F.softplus(unnormalized_derivatives)
|
134 |
+
|
135 |
+
heights = F.softmax(unnormalized_heights, dim=-1)
|
136 |
+
heights = min_bin_height + (1 - min_bin_height * num_bins) * heights
|
137 |
+
cumheights = torch.cumsum(heights, dim=-1)
|
138 |
+
cumheights = F.pad(cumheights, pad=(1, 0), mode="constant", value=0.0)
|
139 |
+
cumheights = (top - bottom) * cumheights + bottom
|
140 |
+
cumheights[..., 0] = bottom
|
141 |
+
cumheights[..., -1] = top
|
142 |
+
heights = cumheights[..., 1:] - cumheights[..., :-1]
|
143 |
+
|
144 |
+
if inverse:
|
145 |
+
bin_idx = searchsorted(cumheights, inputs)[..., None]
|
146 |
+
else:
|
147 |
+
bin_idx = searchsorted(cumwidths, inputs)[..., None]
|
148 |
+
|
149 |
+
input_cumwidths = cumwidths.gather(-1, bin_idx)[..., 0]
|
150 |
+
input_bin_widths = widths.gather(-1, bin_idx)[..., 0]
|
151 |
+
|
152 |
+
input_cumheights = cumheights.gather(-1, bin_idx)[..., 0]
|
153 |
+
delta = heights / widths
|
154 |
+
input_delta = delta.gather(-1, bin_idx)[..., 0]
|
155 |
+
|
156 |
+
input_derivatives = derivatives.gather(-1, bin_idx)[..., 0]
|
157 |
+
input_derivatives_plus_one = derivatives[..., 1:].gather(-1, bin_idx)[..., 0]
|
158 |
+
|
159 |
+
input_heights = heights.gather(-1, bin_idx)[..., 0]
|
160 |
+
|
161 |
+
if inverse:
|
162 |
+
a = (inputs - input_cumheights) * (
|
163 |
+
input_derivatives + input_derivatives_plus_one - 2 * input_delta
|
164 |
+
) + input_heights * (input_delta - input_derivatives)
|
165 |
+
b = input_heights * input_derivatives - (inputs - input_cumheights) * (
|
166 |
+
input_derivatives + input_derivatives_plus_one - 2 * input_delta
|
167 |
+
)
|
168 |
+
c = -input_delta * (inputs - input_cumheights)
|
169 |
+
|
170 |
+
discriminant = b.pow(2) - 4 * a * c
|
171 |
+
assert (discriminant >= 0).all()
|
172 |
+
|
173 |
+
root = (2 * c) / (-b - torch.sqrt(discriminant))
|
174 |
+
outputs = root * input_bin_widths + input_cumwidths
|
175 |
+
|
176 |
+
theta_one_minus_theta = root * (1 - root)
|
177 |
+
denominator = input_delta + (
|
178 |
+
(input_derivatives + input_derivatives_plus_one - 2 * input_delta)
|
179 |
+
* theta_one_minus_theta
|
180 |
+
)
|
181 |
+
derivative_numerator = input_delta.pow(2) * (
|
182 |
+
input_derivatives_plus_one * root.pow(2)
|
183 |
+
+ 2 * input_delta * theta_one_minus_theta
|
184 |
+
+ input_derivatives * (1 - root).pow(2)
|
185 |
+
)
|
186 |
+
logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator)
|
187 |
+
|
188 |
+
return outputs, -logabsdet
|
189 |
+
else:
|
190 |
+
theta = (inputs - input_cumwidths) / input_bin_widths
|
191 |
+
theta_one_minus_theta = theta * (1 - theta)
|
192 |
+
|
193 |
+
numerator = input_heights * (
|
194 |
+
input_delta * theta.pow(2) + input_derivatives * theta_one_minus_theta
|
195 |
+
)
|
196 |
+
denominator = input_delta + (
|
197 |
+
(input_derivatives + input_derivatives_plus_one - 2 * input_delta)
|
198 |
+
* theta_one_minus_theta
|
199 |
+
)
|
200 |
+
outputs = input_cumheights + numerator / denominator
|
201 |
+
|
202 |
+
derivative_numerator = input_delta.pow(2) * (
|
203 |
+
input_derivatives_plus_one * theta.pow(2)
|
204 |
+
+ 2 * input_delta * theta_one_minus_theta
|
205 |
+
+ input_derivatives * (1 - theta).pow(2)
|
206 |
+
)
|
207 |
+
logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator)
|
208 |
+
|
209 |
+
return outputs, logabsdet
|
requirements.txt
ADDED
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
wheel
|
2 |
+
setuptools
|
3 |
+
ffmpeg
|
4 |
+
numba==0.56.4
|
5 |
+
numpy==1.23.5
|
6 |
+
scipy==1.9.3
|
7 |
+
librosa==0.9.2
|
8 |
+
fairseq==0.12.2
|
9 |
+
faiss-cpu==1.7.3
|
10 |
+
gradio==3.40.1
|
11 |
+
gradio-client==0.8.1
|
12 |
+
soundfile>=0.12.1
|
13 |
+
praat-parselmouth>=0.4.2
|
14 |
+
httpx==0.23.0
|
15 |
+
tensorboard
|
16 |
+
tensorboardX
|
17 |
+
torchcrepe
|
18 |
+
onnxruntime
|
19 |
+
pyOpenSSL==24.0.0
|
rmvpe.py
ADDED
@@ -0,0 +1,432 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import sys, torch, numpy as np, traceback, pdb
|
2 |
+
import torch.nn as nn
|
3 |
+
from time import time as ttime
|
4 |
+
import torch.nn.functional as F
|
5 |
+
|
6 |
+
|
7 |
+
class BiGRU(nn.Module):
|
8 |
+
def __init__(self, input_features, hidden_features, num_layers):
|
9 |
+
super(BiGRU, self).__init__()
|
10 |
+
self.gru = nn.GRU(
|
11 |
+
input_features,
|
12 |
+
hidden_features,
|
13 |
+
num_layers=num_layers,
|
14 |
+
batch_first=True,
|
15 |
+
bidirectional=True,
|
16 |
+
)
|
17 |
+
|
18 |
+
def forward(self, x):
|
19 |
+
return self.gru(x)[0]
|
20 |
+
|
21 |
+
|
22 |
+
class ConvBlockRes(nn.Module):
|
23 |
+
def __init__(self, in_channels, out_channels, momentum=0.01):
|
24 |
+
super(ConvBlockRes, self).__init__()
|
25 |
+
self.conv = nn.Sequential(
|
26 |
+
nn.Conv2d(
|
27 |
+
in_channels=in_channels,
|
28 |
+
out_channels=out_channels,
|
29 |
+
kernel_size=(3, 3),
|
30 |
+
stride=(1, 1),
|
31 |
+
padding=(1, 1),
|
32 |
+
bias=False,
|
33 |
+
),
|
34 |
+
nn.BatchNorm2d(out_channels, momentum=momentum),
|
35 |
+
nn.ReLU(),
|
36 |
+
nn.Conv2d(
|
37 |
+
in_channels=out_channels,
|
38 |
+
out_channels=out_channels,
|
39 |
+
kernel_size=(3, 3),
|
40 |
+
stride=(1, 1),
|
41 |
+
padding=(1, 1),
|
42 |
+
bias=False,
|
43 |
+
),
|
44 |
+
nn.BatchNorm2d(out_channels, momentum=momentum),
|
45 |
+
nn.ReLU(),
|
46 |
+
)
|
47 |
+
if in_channels != out_channels:
|
48 |
+
self.shortcut = nn.Conv2d(in_channels, out_channels, (1, 1))
|
49 |
+
self.is_shortcut = True
|
50 |
+
else:
|
51 |
+
self.is_shortcut = False
|
52 |
+
|
53 |
+
def forward(self, x):
|
54 |
+
if self.is_shortcut:
|
55 |
+
return self.conv(x) + self.shortcut(x)
|
56 |
+
else:
|
57 |
+
return self.conv(x) + x
|
58 |
+
|
59 |
+
|
60 |
+
class Encoder(nn.Module):
|
61 |
+
def __init__(
|
62 |
+
self,
|
63 |
+
in_channels,
|
64 |
+
in_size,
|
65 |
+
n_encoders,
|
66 |
+
kernel_size,
|
67 |
+
n_blocks,
|
68 |
+
out_channels=16,
|
69 |
+
momentum=0.01,
|
70 |
+
):
|
71 |
+
super(Encoder, self).__init__()
|
72 |
+
self.n_encoders = n_encoders
|
73 |
+
self.bn = nn.BatchNorm2d(in_channels, momentum=momentum)
|
74 |
+
self.layers = nn.ModuleList()
|
75 |
+
self.latent_channels = []
|
76 |
+
for i in range(self.n_encoders):
|
77 |
+
self.layers.append(
|
78 |
+
ResEncoderBlock(
|
79 |
+
in_channels, out_channels, kernel_size, n_blocks, momentum=momentum
|
80 |
+
)
|
81 |
+
)
|
82 |
+
self.latent_channels.append([out_channels, in_size])
|
83 |
+
in_channels = out_channels
|
84 |
+
out_channels *= 2
|
85 |
+
in_size //= 2
|
86 |
+
self.out_size = in_size
|
87 |
+
self.out_channel = out_channels
|
88 |
+
|
89 |
+
def forward(self, x):
|
90 |
+
concat_tensors = []
|
91 |
+
x = self.bn(x)
|
92 |
+
for i in range(self.n_encoders):
|
93 |
+
_, x = self.layers[i](x)
|
94 |
+
concat_tensors.append(_)
|
95 |
+
return x, concat_tensors
|
96 |
+
|
97 |
+
|
98 |
+
class ResEncoderBlock(nn.Module):
|
99 |
+
def __init__(
|
100 |
+
self, in_channels, out_channels, kernel_size, n_blocks=1, momentum=0.01
|
101 |
+
):
|
102 |
+
super(ResEncoderBlock, self).__init__()
|
103 |
+
self.n_blocks = n_blocks
|
104 |
+
self.conv = nn.ModuleList()
|
105 |
+
self.conv.append(ConvBlockRes(in_channels, out_channels, momentum))
|
106 |
+
for i in range(n_blocks - 1):
|
107 |
+
self.conv.append(ConvBlockRes(out_channels, out_channels, momentum))
|
108 |
+
self.kernel_size = kernel_size
|
109 |
+
if self.kernel_size is not None:
|
110 |
+
self.pool = nn.AvgPool2d(kernel_size=kernel_size)
|
111 |
+
|
112 |
+
def forward(self, x):
|
113 |
+
for i in range(self.n_blocks):
|
114 |
+
x = self.conv[i](x)
|
115 |
+
if self.kernel_size is not None:
|
116 |
+
return x, self.pool(x)
|
117 |
+
else:
|
118 |
+
return x
|
119 |
+
|
120 |
+
|
121 |
+
class Intermediate(nn.Module): #
|
122 |
+
def __init__(self, in_channels, out_channels, n_inters, n_blocks, momentum=0.01):
|
123 |
+
super(Intermediate, self).__init__()
|
124 |
+
self.n_inters = n_inters
|
125 |
+
self.layers = nn.ModuleList()
|
126 |
+
self.layers.append(
|
127 |
+
ResEncoderBlock(in_channels, out_channels, None, n_blocks, momentum)
|
128 |
+
)
|
129 |
+
for i in range(self.n_inters - 1):
|
130 |
+
self.layers.append(
|
131 |
+
ResEncoderBlock(out_channels, out_channels, None, n_blocks, momentum)
|
132 |
+
)
|
133 |
+
|
134 |
+
def forward(self, x):
|
135 |
+
for i in range(self.n_inters):
|
136 |
+
x = self.layers[i](x)
|
137 |
+
return x
|
138 |
+
|
139 |
+
|
140 |
+
class ResDecoderBlock(nn.Module):
|
141 |
+
def __init__(self, in_channels, out_channels, stride, n_blocks=1, momentum=0.01):
|
142 |
+
super(ResDecoderBlock, self).__init__()
|
143 |
+
out_padding = (0, 1) if stride == (1, 2) else (1, 1)
|
144 |
+
self.n_blocks = n_blocks
|
145 |
+
self.conv1 = nn.Sequential(
|
146 |
+
nn.ConvTranspose2d(
|
147 |
+
in_channels=in_channels,
|
148 |
+
out_channels=out_channels,
|
149 |
+
kernel_size=(3, 3),
|
150 |
+
stride=stride,
|
151 |
+
padding=(1, 1),
|
152 |
+
output_padding=out_padding,
|
153 |
+
bias=False,
|
154 |
+
),
|
155 |
+
nn.BatchNorm2d(out_channels, momentum=momentum),
|
156 |
+
nn.ReLU(),
|
157 |
+
)
|
158 |
+
self.conv2 = nn.ModuleList()
|
159 |
+
self.conv2.append(ConvBlockRes(out_channels * 2, out_channels, momentum))
|
160 |
+
for i in range(n_blocks - 1):
|
161 |
+
self.conv2.append(ConvBlockRes(out_channels, out_channels, momentum))
|
162 |
+
|
163 |
+
def forward(self, x, concat_tensor):
|
164 |
+
x = self.conv1(x)
|
165 |
+
x = torch.cat((x, concat_tensor), dim=1)
|
166 |
+
for i in range(self.n_blocks):
|
167 |
+
x = self.conv2[i](x)
|
168 |
+
return x
|
169 |
+
|
170 |
+
|
171 |
+
class Decoder(nn.Module):
|
172 |
+
def __init__(self, in_channels, n_decoders, stride, n_blocks, momentum=0.01):
|
173 |
+
super(Decoder, self).__init__()
|
174 |
+
self.layers = nn.ModuleList()
|
175 |
+
self.n_decoders = n_decoders
|
176 |
+
for i in range(self.n_decoders):
|
177 |
+
out_channels = in_channels // 2
|
178 |
+
self.layers.append(
|
179 |
+
ResDecoderBlock(in_channels, out_channels, stride, n_blocks, momentum)
|
180 |
+
)
|
181 |
+
in_channels = out_channels
|
182 |
+
|
183 |
+
def forward(self, x, concat_tensors):
|
184 |
+
for i in range(self.n_decoders):
|
185 |
+
x = self.layers[i](x, concat_tensors[-1 - i])
|
186 |
+
return x
|
187 |
+
|
188 |
+
|
189 |
+
class DeepUnet(nn.Module):
|
190 |
+
def __init__(
|
191 |
+
self,
|
192 |
+
kernel_size,
|
193 |
+
n_blocks,
|
194 |
+
en_de_layers=5,
|
195 |
+
inter_layers=4,
|
196 |
+
in_channels=1,
|
197 |
+
en_out_channels=16,
|
198 |
+
):
|
199 |
+
super(DeepUnet, self).__init__()
|
200 |
+
self.encoder = Encoder(
|
201 |
+
in_channels, 128, en_de_layers, kernel_size, n_blocks, en_out_channels
|
202 |
+
)
|
203 |
+
self.intermediate = Intermediate(
|
204 |
+
self.encoder.out_channel // 2,
|
205 |
+
self.encoder.out_channel,
|
206 |
+
inter_layers,
|
207 |
+
n_blocks,
|
208 |
+
)
|
209 |
+
self.decoder = Decoder(
|
210 |
+
self.encoder.out_channel, en_de_layers, kernel_size, n_blocks
|
211 |
+
)
|
212 |
+
|
213 |
+
def forward(self, x):
|
214 |
+
x, concat_tensors = self.encoder(x)
|
215 |
+
x = self.intermediate(x)
|
216 |
+
x = self.decoder(x, concat_tensors)
|
217 |
+
return x
|
218 |
+
|
219 |
+
|
220 |
+
class E2E(nn.Module):
|
221 |
+
def __init__(
|
222 |
+
self,
|
223 |
+
n_blocks,
|
224 |
+
n_gru,
|
225 |
+
kernel_size,
|
226 |
+
en_de_layers=5,
|
227 |
+
inter_layers=4,
|
228 |
+
in_channels=1,
|
229 |
+
en_out_channels=16,
|
230 |
+
):
|
231 |
+
super(E2E, self).__init__()
|
232 |
+
self.unet = DeepUnet(
|
233 |
+
kernel_size,
|
234 |
+
n_blocks,
|
235 |
+
en_de_layers,
|
236 |
+
inter_layers,
|
237 |
+
in_channels,
|
238 |
+
en_out_channels,
|
239 |
+
)
|
240 |
+
self.cnn = nn.Conv2d(en_out_channels, 3, (3, 3), padding=(1, 1))
|
241 |
+
if n_gru:
|
242 |
+
self.fc = nn.Sequential(
|
243 |
+
BiGRU(3 * 128, 256, n_gru),
|
244 |
+
nn.Linear(512, 360),
|
245 |
+
nn.Dropout(0.25),
|
246 |
+
nn.Sigmoid(),
|
247 |
+
)
|
248 |
+
else:
|
249 |
+
self.fc = nn.Sequential(
|
250 |
+
nn.Linear(3 * N_MELS, N_CLASS), nn.Dropout(0.25), nn.Sigmoid()
|
251 |
+
)
|
252 |
+
|
253 |
+
def forward(self, mel):
|
254 |
+
mel = mel.transpose(-1, -2).unsqueeze(1)
|
255 |
+
x = self.cnn(self.unet(mel)).transpose(1, 2).flatten(-2)
|
256 |
+
x = self.fc(x)
|
257 |
+
return x
|
258 |
+
|
259 |
+
|
260 |
+
from librosa.filters import mel
|
261 |
+
|
262 |
+
|
263 |
+
class MelSpectrogram(torch.nn.Module):
|
264 |
+
def __init__(
|
265 |
+
self,
|
266 |
+
is_half,
|
267 |
+
n_mel_channels,
|
268 |
+
sampling_rate,
|
269 |
+
win_length,
|
270 |
+
hop_length,
|
271 |
+
n_fft=None,
|
272 |
+
mel_fmin=0,
|
273 |
+
mel_fmax=None,
|
274 |
+
clamp=1e-5,
|
275 |
+
):
|
276 |
+
super().__init__()
|
277 |
+
n_fft = win_length if n_fft is None else n_fft
|
278 |
+
self.hann_window = {}
|
279 |
+
mel_basis = mel(
|
280 |
+
sr=sampling_rate,
|
281 |
+
n_fft=n_fft,
|
282 |
+
n_mels=n_mel_channels,
|
283 |
+
fmin=mel_fmin,
|
284 |
+
fmax=mel_fmax,
|
285 |
+
htk=True,
|
286 |
+
)
|
287 |
+
mel_basis = torch.from_numpy(mel_basis).float()
|
288 |
+
self.register_buffer("mel_basis", mel_basis)
|
289 |
+
self.n_fft = win_length if n_fft is None else n_fft
|
290 |
+
self.hop_length = hop_length
|
291 |
+
self.win_length = win_length
|
292 |
+
self.sampling_rate = sampling_rate
|
293 |
+
self.n_mel_channels = n_mel_channels
|
294 |
+
self.clamp = clamp
|
295 |
+
self.is_half = is_half
|
296 |
+
|
297 |
+
def forward(self, audio, keyshift=0, speed=1, center=True):
|
298 |
+
factor = 2 ** (keyshift / 12)
|
299 |
+
n_fft_new = int(np.round(self.n_fft * factor))
|
300 |
+
win_length_new = int(np.round(self.win_length * factor))
|
301 |
+
hop_length_new = int(np.round(self.hop_length * speed))
|
302 |
+
keyshift_key = str(keyshift) + "_" + str(audio.device)
|
303 |
+
if keyshift_key not in self.hann_window:
|
304 |
+
self.hann_window[keyshift_key] = torch.hann_window(win_length_new).to(
|
305 |
+
audio.device
|
306 |
+
)
|
307 |
+
fft = torch.stft(
|
308 |
+
audio,
|
309 |
+
n_fft=n_fft_new,
|
310 |
+
hop_length=hop_length_new,
|
311 |
+
win_length=win_length_new,
|
312 |
+
window=self.hann_window[keyshift_key],
|
313 |
+
center=center,
|
314 |
+
return_complex=True,
|
315 |
+
)
|
316 |
+
magnitude = torch.sqrt(fft.real.pow(2) + fft.imag.pow(2))
|
317 |
+
if keyshift != 0:
|
318 |
+
size = self.n_fft // 2 + 1
|
319 |
+
resize = magnitude.size(1)
|
320 |
+
if resize < size:
|
321 |
+
magnitude = F.pad(magnitude, (0, 0, 0, size - resize))
|
322 |
+
magnitude = magnitude[:, :size, :] * self.win_length / win_length_new
|
323 |
+
mel_output = torch.matmul(self.mel_basis, magnitude)
|
324 |
+
if self.is_half == True:
|
325 |
+
mel_output = mel_output.half()
|
326 |
+
log_mel_spec = torch.log(torch.clamp(mel_output, min=self.clamp))
|
327 |
+
return log_mel_spec
|
328 |
+
|
329 |
+
|
330 |
+
class RMVPE:
|
331 |
+
def __init__(self, model_path, is_half, device=None):
|
332 |
+
self.resample_kernel = {}
|
333 |
+
model = E2E(4, 1, (2, 2))
|
334 |
+
ckpt = torch.load(model_path, map_location="cpu")
|
335 |
+
model.load_state_dict(ckpt)
|
336 |
+
model.eval()
|
337 |
+
if is_half == True:
|
338 |
+
model = model.half()
|
339 |
+
self.model = model
|
340 |
+
self.resample_kernel = {}
|
341 |
+
self.is_half = is_half
|
342 |
+
if device is None:
|
343 |
+
device = "cuda" if torch.cuda.is_available() else "cpu"
|
344 |
+
self.device = device
|
345 |
+
self.mel_extractor = MelSpectrogram(
|
346 |
+
is_half, 128, 16000, 1024, 160, None, 30, 8000
|
347 |
+
).to(device)
|
348 |
+
self.model = self.model.to(device)
|
349 |
+
cents_mapping = 20 * np.arange(360) + 1997.3794084376191
|
350 |
+
self.cents_mapping = np.pad(cents_mapping, (4, 4)) # 368
|
351 |
+
|
352 |
+
def mel2hidden(self, mel):
|
353 |
+
with torch.no_grad():
|
354 |
+
n_frames = mel.shape[-1]
|
355 |
+
mel = F.pad(
|
356 |
+
mel, (0, 32 * ((n_frames - 1) // 32 + 1) - n_frames), mode="reflect"
|
357 |
+
)
|
358 |
+
hidden = self.model(mel)
|
359 |
+
return hidden[:, :n_frames]
|
360 |
+
|
361 |
+
def decode(self, hidden, thred=0.03):
|
362 |
+
cents_pred = self.to_local_average_cents(hidden, thred=thred)
|
363 |
+
f0 = 10 * (2 ** (cents_pred / 1200))
|
364 |
+
f0[f0 == 10] = 0
|
365 |
+
# f0 = np.array([10 * (2 ** (cent_pred / 1200)) if cent_pred else 0 for cent_pred in cents_pred])
|
366 |
+
return f0
|
367 |
+
|
368 |
+
def infer_from_audio(self, audio, thred=0.03):
|
369 |
+
audio = torch.from_numpy(audio).float().to(self.device).unsqueeze(0)
|
370 |
+
# torch.cuda.synchronize()
|
371 |
+
# t0=ttime()
|
372 |
+
mel = self.mel_extractor(audio, center=True)
|
373 |
+
# torch.cuda.synchronize()
|
374 |
+
# t1=ttime()
|
375 |
+
hidden = self.mel2hidden(mel)
|
376 |
+
# torch.cuda.synchronize()
|
377 |
+
# t2=ttime()
|
378 |
+
hidden = hidden.squeeze(0).cpu().numpy()
|
379 |
+
if self.is_half == True:
|
380 |
+
hidden = hidden.astype("float32")
|
381 |
+
f0 = self.decode(hidden, thred=thred)
|
382 |
+
# torch.cuda.synchronize()
|
383 |
+
# t3=ttime()
|
384 |
+
# print("hmvpe:%s\t%s\t%s\t%s"%(t1-t0,t2-t1,t3-t2,t3-t0))
|
385 |
+
return f0
|
386 |
+
|
387 |
+
def to_local_average_cents(self, salience, thred=0.05):
|
388 |
+
# t0 = ttime()
|
389 |
+
center = np.argmax(salience, axis=1) # 帧长#index
|
390 |
+
salience = np.pad(salience, ((0, 0), (4, 4))) # 帧长,368
|
391 |
+
# t1 = ttime()
|
392 |
+
center += 4
|
393 |
+
todo_salience = []
|
394 |
+
todo_cents_mapping = []
|
395 |
+
starts = center - 4
|
396 |
+
ends = center + 5
|
397 |
+
for idx in range(salience.shape[0]):
|
398 |
+
todo_salience.append(salience[:, starts[idx] : ends[idx]][idx])
|
399 |
+
todo_cents_mapping.append(self.cents_mapping[starts[idx] : ends[idx]])
|
400 |
+
# t2 = ttime()
|
401 |
+
todo_salience = np.array(todo_salience) # 帧长,9
|
402 |
+
todo_cents_mapping = np.array(todo_cents_mapping) # 帧长,9
|
403 |
+
product_sum = np.sum(todo_salience * todo_cents_mapping, 1)
|
404 |
+
weight_sum = np.sum(todo_salience, 1) # 帧长
|
405 |
+
devided = product_sum / weight_sum # 帧长
|
406 |
+
# t3 = ttime()
|
407 |
+
maxx = np.max(salience, axis=1) # 帧长
|
408 |
+
devided[maxx <= thred] = 0
|
409 |
+
# t4 = ttime()
|
410 |
+
# print("decode:%s\t%s\t%s\t%s" % (t1 - t0, t2 - t1, t3 - t2, t4 - t3))
|
411 |
+
return devided
|
412 |
+
|
413 |
+
|
414 |
+
# if __name__ == '__main__':
|
415 |
+
# audio, sampling_rate = sf.read("卢本伟语录~1.wav")
|
416 |
+
# if len(audio.shape) > 1:
|
417 |
+
# audio = librosa.to_mono(audio.transpose(1, 0))
|
418 |
+
# audio_bak = audio.copy()
|
419 |
+
# if sampling_rate != 16000:
|
420 |
+
# audio = librosa.resample(audio, orig_sr=sampling_rate, target_sr=16000)
|
421 |
+
# model_path = "/bili-coeus/jupyter/jupyterhub-liujing04/vits_ch/test-RMVPE/weights/rmvpe_llc_half.pt"
|
422 |
+
# thred = 0.03 # 0.01
|
423 |
+
# device = 'cuda' if torch.cuda.is_available() else 'cpu'
|
424 |
+
# rmvpe = RMVPE(model_path,is_half=False, device=device)
|
425 |
+
# t0=ttime()
|
426 |
+
# f0 = rmvpe.infer_from_audio(audio, thred=thred)
|
427 |
+
# f0 = rmvpe.infer_from_audio(audio, thred=thred)
|
428 |
+
# f0 = rmvpe.infer_from_audio(audio, thred=thred)
|
429 |
+
# f0 = rmvpe.infer_from_audio(audio, thred=thred)
|
430 |
+
# f0 = rmvpe.infer_from_audio(audio, thred=thred)
|
431 |
+
# t1=ttime()
|
432 |
+
# print(f0.shape,t1-t0)
|
uvr5/lib/lib_v5/layers_123821KB.py
ADDED
@@ -0,0 +1,118 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
import torch.nn.functional as F
|
3 |
+
from torch import nn
|
4 |
+
|
5 |
+
from . import spec_utils
|
6 |
+
|
7 |
+
|
8 |
+
class Conv2DBNActiv(nn.Module):
|
9 |
+
def __init__(self, nin, nout, ksize=3, stride=1, pad=1, dilation=1, activ=nn.ReLU):
|
10 |
+
super(Conv2DBNActiv, self).__init__()
|
11 |
+
self.conv = nn.Sequential(
|
12 |
+
nn.Conv2d(
|
13 |
+
nin,
|
14 |
+
nout,
|
15 |
+
kernel_size=ksize,
|
16 |
+
stride=stride,
|
17 |
+
padding=pad,
|
18 |
+
dilation=dilation,
|
19 |
+
bias=False,
|
20 |
+
),
|
21 |
+
nn.BatchNorm2d(nout),
|
22 |
+
activ(),
|
23 |
+
)
|
24 |
+
|
25 |
+
def __call__(self, x):
|
26 |
+
return self.conv(x)
|
27 |
+
|
28 |
+
|
29 |
+
class SeperableConv2DBNActiv(nn.Module):
|
30 |
+
def __init__(self, nin, nout, ksize=3, stride=1, pad=1, dilation=1, activ=nn.ReLU):
|
31 |
+
super(SeperableConv2DBNActiv, self).__init__()
|
32 |
+
self.conv = nn.Sequential(
|
33 |
+
nn.Conv2d(
|
34 |
+
nin,
|
35 |
+
nin,
|
36 |
+
kernel_size=ksize,
|
37 |
+
stride=stride,
|
38 |
+
padding=pad,
|
39 |
+
dilation=dilation,
|
40 |
+
groups=nin,
|
41 |
+
bias=False,
|
42 |
+
),
|
43 |
+
nn.Conv2d(nin, nout, kernel_size=1, bias=False),
|
44 |
+
nn.BatchNorm2d(nout),
|
45 |
+
activ(),
|
46 |
+
)
|
47 |
+
|
48 |
+
def __call__(self, x):
|
49 |
+
return self.conv(x)
|
50 |
+
|
51 |
+
|
52 |
+
class Encoder(nn.Module):
|
53 |
+
def __init__(self, nin, nout, ksize=3, stride=1, pad=1, activ=nn.LeakyReLU):
|
54 |
+
super(Encoder, self).__init__()
|
55 |
+
self.conv1 = Conv2DBNActiv(nin, nout, ksize, 1, pad, activ=activ)
|
56 |
+
self.conv2 = Conv2DBNActiv(nout, nout, ksize, stride, pad, activ=activ)
|
57 |
+
|
58 |
+
def __call__(self, x):
|
59 |
+
skip = self.conv1(x)
|
60 |
+
h = self.conv2(skip)
|
61 |
+
|
62 |
+
return h, skip
|
63 |
+
|
64 |
+
|
65 |
+
class Decoder(nn.Module):
|
66 |
+
def __init__(
|
67 |
+
self, nin, nout, ksize=3, stride=1, pad=1, activ=nn.ReLU, dropout=False
|
68 |
+
):
|
69 |
+
super(Decoder, self).__init__()
|
70 |
+
self.conv = Conv2DBNActiv(nin, nout, ksize, 1, pad, activ=activ)
|
71 |
+
self.dropout = nn.Dropout2d(0.1) if dropout else None
|
72 |
+
|
73 |
+
def __call__(self, x, skip=None):
|
74 |
+
x = F.interpolate(x, scale_factor=2, mode="bilinear", align_corners=True)
|
75 |
+
if skip is not None:
|
76 |
+
skip = spec_utils.crop_center(skip, x)
|
77 |
+
x = torch.cat([x, skip], dim=1)
|
78 |
+
h = self.conv(x)
|
79 |
+
|
80 |
+
if self.dropout is not None:
|
81 |
+
h = self.dropout(h)
|
82 |
+
|
83 |
+
return h
|
84 |
+
|
85 |
+
|
86 |
+
class ASPPModule(nn.Module):
|
87 |
+
def __init__(self, nin, nout, dilations=(4, 8, 16), activ=nn.ReLU):
|
88 |
+
super(ASPPModule, self).__init__()
|
89 |
+
self.conv1 = nn.Sequential(
|
90 |
+
nn.AdaptiveAvgPool2d((1, None)),
|
91 |
+
Conv2DBNActiv(nin, nin, 1, 1, 0, activ=activ),
|
92 |
+
)
|
93 |
+
self.conv2 = Conv2DBNActiv(nin, nin, 1, 1, 0, activ=activ)
|
94 |
+
self.conv3 = SeperableConv2DBNActiv(
|
95 |
+
nin, nin, 3, 1, dilations[0], dilations[0], activ=activ
|
96 |
+
)
|
97 |
+
self.conv4 = SeperableConv2DBNActiv(
|
98 |
+
nin, nin, 3, 1, dilations[1], dilations[1], activ=activ
|
99 |
+
)
|
100 |
+
self.conv5 = SeperableConv2DBNActiv(
|
101 |
+
nin, nin, 3, 1, dilations[2], dilations[2], activ=activ
|
102 |
+
)
|
103 |
+
self.bottleneck = nn.Sequential(
|
104 |
+
Conv2DBNActiv(nin * 5, nout, 1, 1, 0, activ=activ), nn.Dropout2d(0.1)
|
105 |
+
)
|
106 |
+
|
107 |
+
def forward(self, x):
|
108 |
+
_, _, h, w = x.size()
|
109 |
+
feat1 = F.interpolate(
|
110 |
+
self.conv1(x), size=(h, w), mode="bilinear", align_corners=True
|
111 |
+
)
|
112 |
+
feat2 = self.conv2(x)
|
113 |
+
feat3 = self.conv3(x)
|
114 |
+
feat4 = self.conv4(x)
|
115 |
+
feat5 = self.conv5(x)
|
116 |
+
out = torch.cat((feat1, feat2, feat3, feat4, feat5), dim=1)
|
117 |
+
bottle = self.bottleneck(out)
|
118 |
+
return bottle
|
uvr5/lib/lib_v5/model_param_init.py
ADDED
@@ -0,0 +1,69 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import json
|
2 |
+
import os
|
3 |
+
import pathlib
|
4 |
+
|
5 |
+
default_param = {}
|
6 |
+
default_param["bins"] = 768
|
7 |
+
default_param["unstable_bins"] = 9 # training only
|
8 |
+
default_param["reduction_bins"] = 762 # training only
|
9 |
+
default_param["sr"] = 44100
|
10 |
+
default_param["pre_filter_start"] = 757
|
11 |
+
default_param["pre_filter_stop"] = 768
|
12 |
+
default_param["band"] = {}
|
13 |
+
|
14 |
+
|
15 |
+
default_param["band"][1] = {
|
16 |
+
"sr": 11025,
|
17 |
+
"hl": 128,
|
18 |
+
"n_fft": 960,
|
19 |
+
"crop_start": 0,
|
20 |
+
"crop_stop": 245,
|
21 |
+
"lpf_start": 61, # inference only
|
22 |
+
"res_type": "polyphase",
|
23 |
+
}
|
24 |
+
|
25 |
+
default_param["band"][2] = {
|
26 |
+
"sr": 44100,
|
27 |
+
"hl": 512,
|
28 |
+
"n_fft": 1536,
|
29 |
+
"crop_start": 24,
|
30 |
+
"crop_stop": 547,
|
31 |
+
"hpf_start": 81, # inference only
|
32 |
+
"res_type": "sinc_best",
|
33 |
+
}
|
34 |
+
|
35 |
+
|
36 |
+
def int_keys(d):
|
37 |
+
r = {}
|
38 |
+
for k, v in d:
|
39 |
+
if k.isdigit():
|
40 |
+
k = int(k)
|
41 |
+
r[k] = v
|
42 |
+
return r
|
43 |
+
|
44 |
+
|
45 |
+
class ModelParameters(object):
|
46 |
+
def __init__(self, config_path=""):
|
47 |
+
if ".pth" == pathlib.Path(config_path).suffix:
|
48 |
+
import zipfile
|
49 |
+
|
50 |
+
with zipfile.ZipFile(config_path, "r") as zip:
|
51 |
+
self.param = json.loads(
|
52 |
+
zip.read("param.json"), object_pairs_hook=int_keys
|
53 |
+
)
|
54 |
+
elif ".json" == pathlib.Path(config_path).suffix:
|
55 |
+
with open(config_path, "r") as f:
|
56 |
+
self.param = json.loads(f.read(), object_pairs_hook=int_keys)
|
57 |
+
else:
|
58 |
+
self.param = default_param
|
59 |
+
|
60 |
+
for k in [
|
61 |
+
"mid_side",
|
62 |
+
"mid_side_b",
|
63 |
+
"mid_side_b2",
|
64 |
+
"stereo_w",
|
65 |
+
"stereo_n",
|
66 |
+
"reverse",
|
67 |
+
]:
|
68 |
+
if not k in self.param:
|
69 |
+
self.param[k] = False
|
uvr5/lib/lib_v5/modelparams/4band_v2.json
ADDED
@@ -0,0 +1,54 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"bins": 672,
|
3 |
+
"unstable_bins": 8,
|
4 |
+
"reduction_bins": 637,
|
5 |
+
"band": {
|
6 |
+
"1": {
|
7 |
+
"sr": 7350,
|
8 |
+
"hl": 80,
|
9 |
+
"n_fft": 640,
|
10 |
+
"crop_start": 0,
|
11 |
+
"crop_stop": 85,
|
12 |
+
"lpf_start": 25,
|
13 |
+
"lpf_stop": 53,
|
14 |
+
"res_type": "polyphase"
|
15 |
+
},
|
16 |
+
"2": {
|
17 |
+
"sr": 7350,
|
18 |
+
"hl": 80,
|
19 |
+
"n_fft": 320,
|
20 |
+
"crop_start": 4,
|
21 |
+
"crop_stop": 87,
|
22 |
+
"hpf_start": 25,
|
23 |
+
"hpf_stop": 12,
|
24 |
+
"lpf_start": 31,
|
25 |
+
"lpf_stop": 62,
|
26 |
+
"res_type": "polyphase"
|
27 |
+
},
|
28 |
+
"3": {
|
29 |
+
"sr": 14700,
|
30 |
+
"hl": 160,
|
31 |
+
"n_fft": 512,
|
32 |
+
"crop_start": 17,
|
33 |
+
"crop_stop": 216,
|
34 |
+
"hpf_start": 48,
|
35 |
+
"hpf_stop": 24,
|
36 |
+
"lpf_start": 139,
|
37 |
+
"lpf_stop": 210,
|
38 |
+
"res_type": "polyphase"
|
39 |
+
},
|
40 |
+
"4": {
|
41 |
+
"sr": 44100,
|
42 |
+
"hl": 480,
|
43 |
+
"n_fft": 960,
|
44 |
+
"crop_start": 78,
|
45 |
+
"crop_stop": 383,
|
46 |
+
"hpf_start": 130,
|
47 |
+
"hpf_stop": 86,
|
48 |
+
"res_type": "kaiser_fast"
|
49 |
+
}
|
50 |
+
},
|
51 |
+
"sr": 44100,
|
52 |
+
"pre_filter_start": 668,
|
53 |
+
"pre_filter_stop": 672
|
54 |
+
}
|
uvr5/lib/lib_v5/nets_61968KB.py
ADDED
@@ -0,0 +1,122 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
import torch.nn.functional as F
|
3 |
+
from torch import nn
|
4 |
+
|
5 |
+
from . import layers_123821KB as layers
|
6 |
+
|
7 |
+
|
8 |
+
class BaseASPPNet(nn.Module):
|
9 |
+
def __init__(self, nin, ch, dilations=(4, 8, 16)):
|
10 |
+
super(BaseASPPNet, self).__init__()
|
11 |
+
self.enc1 = layers.Encoder(nin, ch, 3, 2, 1)
|
12 |
+
self.enc2 = layers.Encoder(ch, ch * 2, 3, 2, 1)
|
13 |
+
self.enc3 = layers.Encoder(ch * 2, ch * 4, 3, 2, 1)
|
14 |
+
self.enc4 = layers.Encoder(ch * 4, ch * 8, 3, 2, 1)
|
15 |
+
|
16 |
+
self.aspp = layers.ASPPModule(ch * 8, ch * 16, dilations)
|
17 |
+
|
18 |
+
self.dec4 = layers.Decoder(ch * (8 + 16), ch * 8, 3, 1, 1)
|
19 |
+
self.dec3 = layers.Decoder(ch * (4 + 8), ch * 4, 3, 1, 1)
|
20 |
+
self.dec2 = layers.Decoder(ch * (2 + 4), ch * 2, 3, 1, 1)
|
21 |
+
self.dec1 = layers.Decoder(ch * (1 + 2), ch, 3, 1, 1)
|
22 |
+
|
23 |
+
def __call__(self, x):
|
24 |
+
h, e1 = self.enc1(x)
|
25 |
+
h, e2 = self.enc2(h)
|
26 |
+
h, e3 = self.enc3(h)
|
27 |
+
h, e4 = self.enc4(h)
|
28 |
+
|
29 |
+
h = self.aspp(h)
|
30 |
+
|
31 |
+
h = self.dec4(h, e4)
|
32 |
+
h = self.dec3(h, e3)
|
33 |
+
h = self.dec2(h, e2)
|
34 |
+
h = self.dec1(h, e1)
|
35 |
+
|
36 |
+
return h
|
37 |
+
|
38 |
+
|
39 |
+
class CascadedASPPNet(nn.Module):
|
40 |
+
def __init__(self, n_fft):
|
41 |
+
super(CascadedASPPNet, self).__init__()
|
42 |
+
self.stg1_low_band_net = BaseASPPNet(2, 32)
|
43 |
+
self.stg1_high_band_net = BaseASPPNet(2, 32)
|
44 |
+
|
45 |
+
self.stg2_bridge = layers.Conv2DBNActiv(34, 16, 1, 1, 0)
|
46 |
+
self.stg2_full_band_net = BaseASPPNet(16, 32)
|
47 |
+
|
48 |
+
self.stg3_bridge = layers.Conv2DBNActiv(66, 32, 1, 1, 0)
|
49 |
+
self.stg3_full_band_net = BaseASPPNet(32, 64)
|
50 |
+
|
51 |
+
self.out = nn.Conv2d(64, 2, 1, bias=False)
|
52 |
+
self.aux1_out = nn.Conv2d(32, 2, 1, bias=False)
|
53 |
+
self.aux2_out = nn.Conv2d(32, 2, 1, bias=False)
|
54 |
+
|
55 |
+
self.max_bin = n_fft // 2
|
56 |
+
self.output_bin = n_fft // 2 + 1
|
57 |
+
|
58 |
+
self.offset = 128
|
59 |
+
|
60 |
+
def forward(self, x, aggressiveness=None):
|
61 |
+
mix = x.detach()
|
62 |
+
x = x.clone()
|
63 |
+
|
64 |
+
x = x[:, :, : self.max_bin]
|
65 |
+
|
66 |
+
bandw = x.size()[2] // 2
|
67 |
+
aux1 = torch.cat(
|
68 |
+
[
|
69 |
+
self.stg1_low_band_net(x[:, :, :bandw]),
|
70 |
+
self.stg1_high_band_net(x[:, :, bandw:]),
|
71 |
+
],
|
72 |
+
dim=2,
|
73 |
+
)
|
74 |
+
|
75 |
+
h = torch.cat([x, aux1], dim=1)
|
76 |
+
aux2 = self.stg2_full_band_net(self.stg2_bridge(h))
|
77 |
+
|
78 |
+
h = torch.cat([x, aux1, aux2], dim=1)
|
79 |
+
h = self.stg3_full_band_net(self.stg3_bridge(h))
|
80 |
+
|
81 |
+
mask = torch.sigmoid(self.out(h))
|
82 |
+
mask = F.pad(
|
83 |
+
input=mask,
|
84 |
+
pad=(0, 0, 0, self.output_bin - mask.size()[2]),
|
85 |
+
mode="replicate",
|
86 |
+
)
|
87 |
+
|
88 |
+
if self.training:
|
89 |
+
aux1 = torch.sigmoid(self.aux1_out(aux1))
|
90 |
+
aux1 = F.pad(
|
91 |
+
input=aux1,
|
92 |
+
pad=(0, 0, 0, self.output_bin - aux1.size()[2]),
|
93 |
+
mode="replicate",
|
94 |
+
)
|
95 |
+
aux2 = torch.sigmoid(self.aux2_out(aux2))
|
96 |
+
aux2 = F.pad(
|
97 |
+
input=aux2,
|
98 |
+
pad=(0, 0, 0, self.output_bin - aux2.size()[2]),
|
99 |
+
mode="replicate",
|
100 |
+
)
|
101 |
+
return mask * mix, aux1 * mix, aux2 * mix
|
102 |
+
else:
|
103 |
+
if aggressiveness:
|
104 |
+
mask[:, :, : aggressiveness["split_bin"]] = torch.pow(
|
105 |
+
mask[:, :, : aggressiveness["split_bin"]],
|
106 |
+
1 + aggressiveness["value"] / 3,
|
107 |
+
)
|
108 |
+
mask[:, :, aggressiveness["split_bin"] :] = torch.pow(
|
109 |
+
mask[:, :, aggressiveness["split_bin"] :],
|
110 |
+
1 + aggressiveness["value"],
|
111 |
+
)
|
112 |
+
|
113 |
+
return mask * mix
|
114 |
+
|
115 |
+
def predict(self, x_mag, aggressiveness=None):
|
116 |
+
h = self.forward(x_mag, aggressiveness)
|
117 |
+
|
118 |
+
if self.offset > 0:
|
119 |
+
h = h[:, :, :, self.offset : -self.offset]
|
120 |
+
assert h.size()[3] > 0
|
121 |
+
|
122 |
+
return h
|
uvr5/lib/lib_v5/spec_utils.py
ADDED
@@ -0,0 +1,672 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import hashlib
|
2 |
+
import json
|
3 |
+
import math
|
4 |
+
import os
|
5 |
+
|
6 |
+
import librosa
|
7 |
+
import numpy as np
|
8 |
+
import soundfile as sf
|
9 |
+
from tqdm import tqdm
|
10 |
+
|
11 |
+
|
12 |
+
def crop_center(h1, h2):
|
13 |
+
h1_shape = h1.size()
|
14 |
+
h2_shape = h2.size()
|
15 |
+
|
16 |
+
if h1_shape[3] == h2_shape[3]:
|
17 |
+
return h1
|
18 |
+
elif h1_shape[3] < h2_shape[3]:
|
19 |
+
raise ValueError("h1_shape[3] must be greater than h2_shape[3]")
|
20 |
+
|
21 |
+
# s_freq = (h2_shape[2] - h1_shape[2]) // 2
|
22 |
+
# e_freq = s_freq + h1_shape[2]
|
23 |
+
s_time = (h1_shape[3] - h2_shape[3]) // 2
|
24 |
+
e_time = s_time + h2_shape[3]
|
25 |
+
h1 = h1[:, :, :, s_time:e_time]
|
26 |
+
|
27 |
+
return h1
|
28 |
+
|
29 |
+
|
30 |
+
def wave_to_spectrogram(
|
31 |
+
wave, hop_length, n_fft, mid_side=False, mid_side_b2=False, reverse=False
|
32 |
+
):
|
33 |
+
if reverse:
|
34 |
+
wave_left = np.flip(np.asfortranarray(wave[0]))
|
35 |
+
wave_right = np.flip(np.asfortranarray(wave[1]))
|
36 |
+
elif mid_side:
|
37 |
+
wave_left = np.asfortranarray(np.add(wave[0], wave[1]) / 2)
|
38 |
+
wave_right = np.asfortranarray(np.subtract(wave[0], wave[1]))
|
39 |
+
elif mid_side_b2:
|
40 |
+
wave_left = np.asfortranarray(np.add(wave[1], wave[0] * 0.5))
|
41 |
+
wave_right = np.asfortranarray(np.subtract(wave[0], wave[1] * 0.5))
|
42 |
+
else:
|
43 |
+
wave_left = np.asfortranarray(wave[0])
|
44 |
+
wave_right = np.asfortranarray(wave[1])
|
45 |
+
|
46 |
+
spec_left = librosa.stft(wave_left, n_fft, hop_length=hop_length)
|
47 |
+
spec_right = librosa.stft(wave_right, n_fft, hop_length=hop_length)
|
48 |
+
|
49 |
+
spec = np.asfortranarray([spec_left, spec_right])
|
50 |
+
|
51 |
+
return spec
|
52 |
+
|
53 |
+
|
54 |
+
def wave_to_spectrogram_mt(
|
55 |
+
wave, hop_length, n_fft, mid_side=False, mid_side_b2=False, reverse=False
|
56 |
+
):
|
57 |
+
import threading
|
58 |
+
|
59 |
+
if reverse:
|
60 |
+
wave_left = np.flip(np.asfortranarray(wave[0]))
|
61 |
+
wave_right = np.flip(np.asfortranarray(wave[1]))
|
62 |
+
elif mid_side:
|
63 |
+
wave_left = np.asfortranarray(np.add(wave[0], wave[1]) / 2)
|
64 |
+
wave_right = np.asfortranarray(np.subtract(wave[0], wave[1]))
|
65 |
+
elif mid_side_b2:
|
66 |
+
wave_left = np.asfortranarray(np.add(wave[1], wave[0] * 0.5))
|
67 |
+
wave_right = np.asfortranarray(np.subtract(wave[0], wave[1] * 0.5))
|
68 |
+
else:
|
69 |
+
wave_left = np.asfortranarray(wave[0])
|
70 |
+
wave_right = np.asfortranarray(wave[1])
|
71 |
+
|
72 |
+
def run_thread(**kwargs):
|
73 |
+
global spec_left
|
74 |
+
spec_left = librosa.stft(**kwargs)
|
75 |
+
|
76 |
+
thread = threading.Thread(
|
77 |
+
target=run_thread,
|
78 |
+
kwargs={"y": wave_left, "n_fft": n_fft, "hop_length": hop_length},
|
79 |
+
)
|
80 |
+
thread.start()
|
81 |
+
spec_right = librosa.stft(wave_right, n_fft, hop_length=hop_length)
|
82 |
+
thread.join()
|
83 |
+
|
84 |
+
spec = np.asfortranarray([spec_left, spec_right])
|
85 |
+
|
86 |
+
return spec
|
87 |
+
|
88 |
+
|
89 |
+
def combine_spectrograms(specs, mp):
|
90 |
+
l = min([specs[i].shape[2] for i in specs])
|
91 |
+
spec_c = np.zeros(shape=(2, mp.param["bins"] + 1, l), dtype=np.complex64)
|
92 |
+
offset = 0
|
93 |
+
bands_n = len(mp.param["band"])
|
94 |
+
|
95 |
+
for d in range(1, bands_n + 1):
|
96 |
+
h = mp.param["band"][d]["crop_stop"] - mp.param["band"][d]["crop_start"]
|
97 |
+
spec_c[:, offset : offset + h, :l] = specs[d][
|
98 |
+
:, mp.param["band"][d]["crop_start"] : mp.param["band"][d]["crop_stop"], :l
|
99 |
+
]
|
100 |
+
offset += h
|
101 |
+
|
102 |
+
if offset > mp.param["bins"]:
|
103 |
+
raise ValueError("Too much bins")
|
104 |
+
|
105 |
+
# lowpass fiter
|
106 |
+
if (
|
107 |
+
mp.param["pre_filter_start"] > 0
|
108 |
+
): # and mp.param['band'][bands_n]['res_type'] in ['scipy', 'polyphase']:
|
109 |
+
if bands_n == 1:
|
110 |
+
spec_c = fft_lp_filter(
|
111 |
+
spec_c, mp.param["pre_filter_start"], mp.param["pre_filter_stop"]
|
112 |
+
)
|
113 |
+
else:
|
114 |
+
gp = 1
|
115 |
+
for b in range(
|
116 |
+
mp.param["pre_filter_start"] + 1, mp.param["pre_filter_stop"]
|
117 |
+
):
|
118 |
+
g = math.pow(
|
119 |
+
10, -(b - mp.param["pre_filter_start"]) * (3.5 - gp) / 20.0
|
120 |
+
)
|
121 |
+
gp = g
|
122 |
+
spec_c[:, b, :] *= g
|
123 |
+
|
124 |
+
return np.asfortranarray(spec_c)
|
125 |
+
|
126 |
+
|
127 |
+
def spectrogram_to_image(spec, mode="magnitude"):
|
128 |
+
if mode == "magnitude":
|
129 |
+
if np.iscomplexobj(spec):
|
130 |
+
y = np.abs(spec)
|
131 |
+
else:
|
132 |
+
y = spec
|
133 |
+
y = np.log10(y**2 + 1e-8)
|
134 |
+
elif mode == "phase":
|
135 |
+
if np.iscomplexobj(spec):
|
136 |
+
y = np.angle(spec)
|
137 |
+
else:
|
138 |
+
y = spec
|
139 |
+
|
140 |
+
y -= y.min()
|
141 |
+
y *= 255 / y.max()
|
142 |
+
img = np.uint8(y)
|
143 |
+
|
144 |
+
if y.ndim == 3:
|
145 |
+
img = img.transpose(1, 2, 0)
|
146 |
+
img = np.concatenate([np.max(img, axis=2, keepdims=True), img], axis=2)
|
147 |
+
|
148 |
+
return img
|
149 |
+
|
150 |
+
|
151 |
+
def reduce_vocal_aggressively(X, y, softmask):
|
152 |
+
v = X - y
|
153 |
+
y_mag_tmp = np.abs(y)
|
154 |
+
v_mag_tmp = np.abs(v)
|
155 |
+
|
156 |
+
v_mask = v_mag_tmp > y_mag_tmp
|
157 |
+
y_mag = np.clip(y_mag_tmp - v_mag_tmp * v_mask * softmask, 0, np.inf)
|
158 |
+
|
159 |
+
return y_mag * np.exp(1.0j * np.angle(y))
|
160 |
+
|
161 |
+
|
162 |
+
def mask_silence(mag, ref, thres=0.2, min_range=64, fade_size=32):
|
163 |
+
if min_range < fade_size * 2:
|
164 |
+
raise ValueError("min_range must be >= fade_area * 2")
|
165 |
+
|
166 |
+
mag = mag.copy()
|
167 |
+
|
168 |
+
idx = np.where(ref.mean(axis=(0, 1)) < thres)[0]
|
169 |
+
starts = np.insert(idx[np.where(np.diff(idx) != 1)[0] + 1], 0, idx[0])
|
170 |
+
ends = np.append(idx[np.where(np.diff(idx) != 1)[0]], idx[-1])
|
171 |
+
uninformative = np.where(ends - starts > min_range)[0]
|
172 |
+
if len(uninformative) > 0:
|
173 |
+
starts = starts[uninformative]
|
174 |
+
ends = ends[uninformative]
|
175 |
+
old_e = None
|
176 |
+
for s, e in zip(starts, ends):
|
177 |
+
if old_e is not None and s - old_e < fade_size:
|
178 |
+
s = old_e - fade_size * 2
|
179 |
+
|
180 |
+
if s != 0:
|
181 |
+
weight = np.linspace(0, 1, fade_size)
|
182 |
+
mag[:, :, s : s + fade_size] += weight * ref[:, :, s : s + fade_size]
|
183 |
+
else:
|
184 |
+
s -= fade_size
|
185 |
+
|
186 |
+
if e != mag.shape[2]:
|
187 |
+
weight = np.linspace(1, 0, fade_size)
|
188 |
+
mag[:, :, e - fade_size : e] += weight * ref[:, :, e - fade_size : e]
|
189 |
+
else:
|
190 |
+
e += fade_size
|
191 |
+
|
192 |
+
mag[:, :, s + fade_size : e - fade_size] += ref[
|
193 |
+
:, :, s + fade_size : e - fade_size
|
194 |
+
]
|
195 |
+
old_e = e
|
196 |
+
|
197 |
+
return mag
|
198 |
+
|
199 |
+
|
200 |
+
def align_wave_head_and_tail(a, b):
|
201 |
+
l = min([a[0].size, b[0].size])
|
202 |
+
|
203 |
+
return a[:l, :l], b[:l, :l]
|
204 |
+
|
205 |
+
|
206 |
+
def cache_or_load(mix_path, inst_path, mp):
|
207 |
+
mix_basename = os.path.splitext(os.path.basename(mix_path))[0]
|
208 |
+
inst_basename = os.path.splitext(os.path.basename(inst_path))[0]
|
209 |
+
|
210 |
+
cache_dir = "mph{}".format(
|
211 |
+
hashlib.sha1(json.dumps(mp.param, sort_keys=True).encode("utf-8")).hexdigest()
|
212 |
+
)
|
213 |
+
mix_cache_dir = os.path.join("cache", cache_dir)
|
214 |
+
inst_cache_dir = os.path.join("cache", cache_dir)
|
215 |
+
|
216 |
+
os.makedirs(mix_cache_dir, exist_ok=True)
|
217 |
+
os.makedirs(inst_cache_dir, exist_ok=True)
|
218 |
+
|
219 |
+
mix_cache_path = os.path.join(mix_cache_dir, mix_basename + ".npy")
|
220 |
+
inst_cache_path = os.path.join(inst_cache_dir, inst_basename + ".npy")
|
221 |
+
|
222 |
+
if os.path.exists(mix_cache_path) and os.path.exists(inst_cache_path):
|
223 |
+
X_spec_m = np.load(mix_cache_path)
|
224 |
+
y_spec_m = np.load(inst_cache_path)
|
225 |
+
else:
|
226 |
+
X_wave, y_wave, X_spec_s, y_spec_s = {}, {}, {}, {}
|
227 |
+
|
228 |
+
for d in range(len(mp.param["band"]), 0, -1):
|
229 |
+
bp = mp.param["band"][d]
|
230 |
+
|
231 |
+
if d == len(mp.param["band"]): # high-end band
|
232 |
+
X_wave[d], _ = librosa.load(
|
233 |
+
mix_path, bp["sr"], False, dtype=np.float32, res_type=bp["res_type"]
|
234 |
+
)
|
235 |
+
y_wave[d], _ = librosa.load(
|
236 |
+
inst_path,
|
237 |
+
bp["sr"],
|
238 |
+
False,
|
239 |
+
dtype=np.float32,
|
240 |
+
res_type=bp["res_type"],
|
241 |
+
)
|
242 |
+
else: # lower bands
|
243 |
+
X_wave[d] = librosa.resample(
|
244 |
+
X_wave[d + 1],
|
245 |
+
mp.param["band"][d + 1]["sr"],
|
246 |
+
bp["sr"],
|
247 |
+
res_type=bp["res_type"],
|
248 |
+
)
|
249 |
+
y_wave[d] = librosa.resample(
|
250 |
+
y_wave[d + 1],
|
251 |
+
mp.param["band"][d + 1]["sr"],
|
252 |
+
bp["sr"],
|
253 |
+
res_type=bp["res_type"],
|
254 |
+
)
|
255 |
+
|
256 |
+
X_wave[d], y_wave[d] = align_wave_head_and_tail(X_wave[d], y_wave[d])
|
257 |
+
|
258 |
+
X_spec_s[d] = wave_to_spectrogram(
|
259 |
+
X_wave[d],
|
260 |
+
bp["hl"],
|
261 |
+
bp["n_fft"],
|
262 |
+
mp.param["mid_side"],
|
263 |
+
mp.param["mid_side_b2"],
|
264 |
+
mp.param["reverse"],
|
265 |
+
)
|
266 |
+
y_spec_s[d] = wave_to_spectrogram(
|
267 |
+
y_wave[d],
|
268 |
+
bp["hl"],
|
269 |
+
bp["n_fft"],
|
270 |
+
mp.param["mid_side"],
|
271 |
+
mp.param["mid_side_b2"],
|
272 |
+
mp.param["reverse"],
|
273 |
+
)
|
274 |
+
|
275 |
+
del X_wave, y_wave
|
276 |
+
|
277 |
+
X_spec_m = combine_spectrograms(X_spec_s, mp)
|
278 |
+
y_spec_m = combine_spectrograms(y_spec_s, mp)
|
279 |
+
|
280 |
+
if X_spec_m.shape != y_spec_m.shape:
|
281 |
+
raise ValueError("The combined spectrograms are different: " + mix_path)
|
282 |
+
|
283 |
+
_, ext = os.path.splitext(mix_path)
|
284 |
+
|
285 |
+
np.save(mix_cache_path, X_spec_m)
|
286 |
+
np.save(inst_cache_path, y_spec_m)
|
287 |
+
|
288 |
+
return X_spec_m, y_spec_m
|
289 |
+
|
290 |
+
|
291 |
+
def spectrogram_to_wave(spec, hop_length, mid_side, mid_side_b2, reverse):
|
292 |
+
spec_left = np.asfortranarray(spec[0])
|
293 |
+
spec_right = np.asfortranarray(spec[1])
|
294 |
+
|
295 |
+
wave_left = librosa.istft(spec_left, hop_length=hop_length)
|
296 |
+
wave_right = librosa.istft(spec_right, hop_length=hop_length)
|
297 |
+
|
298 |
+
if reverse:
|
299 |
+
return np.asfortranarray([np.flip(wave_left), np.flip(wave_right)])
|
300 |
+
elif mid_side:
|
301 |
+
return np.asfortranarray(
|
302 |
+
[np.add(wave_left, wave_right / 2), np.subtract(wave_left, wave_right / 2)]
|
303 |
+
)
|
304 |
+
elif mid_side_b2:
|
305 |
+
return np.asfortranarray(
|
306 |
+
[
|
307 |
+
np.add(wave_right / 1.25, 0.4 * wave_left),
|
308 |
+
np.subtract(wave_left / 1.25, 0.4 * wave_right),
|
309 |
+
]
|
310 |
+
)
|
311 |
+
else:
|
312 |
+
return np.asfortranarray([wave_left, wave_right])
|
313 |
+
|
314 |
+
|
315 |
+
def spectrogram_to_wave_mt(spec, hop_length, mid_side, reverse, mid_side_b2):
|
316 |
+
import threading
|
317 |
+
|
318 |
+
spec_left = np.asfortranarray(spec[0])
|
319 |
+
spec_right = np.asfortranarray(spec[1])
|
320 |
+
|
321 |
+
def run_thread(**kwargs):
|
322 |
+
global wave_left
|
323 |
+
wave_left = librosa.istft(**kwargs)
|
324 |
+
|
325 |
+
thread = threading.Thread(
|
326 |
+
target=run_thread, kwargs={"stft_matrix": spec_left, "hop_length": hop_length}
|
327 |
+
)
|
328 |
+
thread.start()
|
329 |
+
wave_right = librosa.istft(spec_right, hop_length=hop_length)
|
330 |
+
thread.join()
|
331 |
+
|
332 |
+
if reverse:
|
333 |
+
return np.asfortranarray([np.flip(wave_left), np.flip(wave_right)])
|
334 |
+
elif mid_side:
|
335 |
+
return np.asfortranarray(
|
336 |
+
[np.add(wave_left, wave_right / 2), np.subtract(wave_left, wave_right / 2)]
|
337 |
+
)
|
338 |
+
elif mid_side_b2:
|
339 |
+
return np.asfortranarray(
|
340 |
+
[
|
341 |
+
np.add(wave_right / 1.25, 0.4 * wave_left),
|
342 |
+
np.subtract(wave_left / 1.25, 0.4 * wave_right),
|
343 |
+
]
|
344 |
+
)
|
345 |
+
else:
|
346 |
+
return np.asfortranarray([wave_left, wave_right])
|
347 |
+
|
348 |
+
|
349 |
+
def cmb_spectrogram_to_wave(spec_m, mp, extra_bins_h=None, extra_bins=None):
|
350 |
+
wave_band = {}
|
351 |
+
bands_n = len(mp.param["band"])
|
352 |
+
offset = 0
|
353 |
+
|
354 |
+
for d in range(1, bands_n + 1):
|
355 |
+
bp = mp.param["band"][d]
|
356 |
+
spec_s = np.ndarray(
|
357 |
+
shape=(2, bp["n_fft"] // 2 + 1, spec_m.shape[2]), dtype=complex
|
358 |
+
)
|
359 |
+
h = bp["crop_stop"] - bp["crop_start"]
|
360 |
+
spec_s[:, bp["crop_start"] : bp["crop_stop"], :] = spec_m[
|
361 |
+
:, offset : offset + h, :
|
362 |
+
]
|
363 |
+
|
364 |
+
offset += h
|
365 |
+
if d == bands_n: # higher
|
366 |
+
if extra_bins_h: # if --high_end_process bypass
|
367 |
+
max_bin = bp["n_fft"] // 2
|
368 |
+
spec_s[:, max_bin - extra_bins_h : max_bin, :] = extra_bins[
|
369 |
+
:, :extra_bins_h, :
|
370 |
+
]
|
371 |
+
if bp["hpf_start"] > 0:
|
372 |
+
spec_s = fft_hp_filter(spec_s, bp["hpf_start"], bp["hpf_stop"] - 1)
|
373 |
+
if bands_n == 1:
|
374 |
+
wave = spectrogram_to_wave(
|
375 |
+
spec_s,
|
376 |
+
bp["hl"],
|
377 |
+
mp.param["mid_side"],
|
378 |
+
mp.param["mid_side_b2"],
|
379 |
+
mp.param["reverse"],
|
380 |
+
)
|
381 |
+
else:
|
382 |
+
wave = np.add(
|
383 |
+
wave,
|
384 |
+
spectrogram_to_wave(
|
385 |
+
spec_s,
|
386 |
+
bp["hl"],
|
387 |
+
mp.param["mid_side"],
|
388 |
+
mp.param["mid_side_b2"],
|
389 |
+
mp.param["reverse"],
|
390 |
+
),
|
391 |
+
)
|
392 |
+
else:
|
393 |
+
sr = mp.param["band"][d + 1]["sr"]
|
394 |
+
if d == 1: # lower
|
395 |
+
spec_s = fft_lp_filter(spec_s, bp["lpf_start"], bp["lpf_stop"])
|
396 |
+
wave = librosa.resample(
|
397 |
+
spectrogram_to_wave(
|
398 |
+
spec_s,
|
399 |
+
bp["hl"],
|
400 |
+
mp.param["mid_side"],
|
401 |
+
mp.param["mid_side_b2"],
|
402 |
+
mp.param["reverse"],
|
403 |
+
),
|
404 |
+
bp["sr"],
|
405 |
+
sr,
|
406 |
+
res_type="sinc_fastest",
|
407 |
+
)
|
408 |
+
else: # mid
|
409 |
+
spec_s = fft_hp_filter(spec_s, bp["hpf_start"], bp["hpf_stop"] - 1)
|
410 |
+
spec_s = fft_lp_filter(spec_s, bp["lpf_start"], bp["lpf_stop"])
|
411 |
+
wave2 = np.add(
|
412 |
+
wave,
|
413 |
+
spectrogram_to_wave(
|
414 |
+
spec_s,
|
415 |
+
bp["hl"],
|
416 |
+
mp.param["mid_side"],
|
417 |
+
mp.param["mid_side_b2"],
|
418 |
+
mp.param["reverse"],
|
419 |
+
),
|
420 |
+
)
|
421 |
+
# wave = librosa.core.resample(wave2, bp['sr'], sr, res_type="sinc_fastest")
|
422 |
+
wave = librosa.core.resample(wave2, bp["sr"], sr, res_type="scipy")
|
423 |
+
|
424 |
+
return wave.T
|
425 |
+
|
426 |
+
|
427 |
+
def fft_lp_filter(spec, bin_start, bin_stop):
|
428 |
+
g = 1.0
|
429 |
+
for b in range(bin_start, bin_stop):
|
430 |
+
g -= 1 / (bin_stop - bin_start)
|
431 |
+
spec[:, b, :] = g * spec[:, b, :]
|
432 |
+
|
433 |
+
spec[:, bin_stop:, :] *= 0
|
434 |
+
|
435 |
+
return spec
|
436 |
+
|
437 |
+
|
438 |
+
def fft_hp_filter(spec, bin_start, bin_stop):
|
439 |
+
g = 1.0
|
440 |
+
for b in range(bin_start, bin_stop, -1):
|
441 |
+
g -= 1 / (bin_start - bin_stop)
|
442 |
+
spec[:, b, :] = g * spec[:, b, :]
|
443 |
+
|
444 |
+
spec[:, 0 : bin_stop + 1, :] *= 0
|
445 |
+
|
446 |
+
return spec
|
447 |
+
|
448 |
+
|
449 |
+
def mirroring(a, spec_m, input_high_end, mp):
|
450 |
+
if "mirroring" == a:
|
451 |
+
mirror = np.flip(
|
452 |
+
np.abs(
|
453 |
+
spec_m[
|
454 |
+
:,
|
455 |
+
mp.param["pre_filter_start"]
|
456 |
+
- 10
|
457 |
+
- input_high_end.shape[1] : mp.param["pre_filter_start"]
|
458 |
+
- 10,
|
459 |
+
:,
|
460 |
+
]
|
461 |
+
),
|
462 |
+
1,
|
463 |
+
)
|
464 |
+
mirror = mirror * np.exp(1.0j * np.angle(input_high_end))
|
465 |
+
|
466 |
+
return np.where(
|
467 |
+
np.abs(input_high_end) <= np.abs(mirror), input_high_end, mirror
|
468 |
+
)
|
469 |
+
|
470 |
+
if "mirroring2" == a:
|
471 |
+
mirror = np.flip(
|
472 |
+
np.abs(
|
473 |
+
spec_m[
|
474 |
+
:,
|
475 |
+
mp.param["pre_filter_start"]
|
476 |
+
- 10
|
477 |
+
- input_high_end.shape[1] : mp.param["pre_filter_start"]
|
478 |
+
- 10,
|
479 |
+
:,
|
480 |
+
]
|
481 |
+
),
|
482 |
+
1,
|
483 |
+
)
|
484 |
+
mi = np.multiply(mirror, input_high_end * 1.7)
|
485 |
+
|
486 |
+
return np.where(np.abs(input_high_end) <= np.abs(mi), input_high_end, mi)
|
487 |
+
|
488 |
+
|
489 |
+
def ensembling(a, specs):
|
490 |
+
for i in range(1, len(specs)):
|
491 |
+
if i == 1:
|
492 |
+
spec = specs[0]
|
493 |
+
|
494 |
+
ln = min([spec.shape[2], specs[i].shape[2]])
|
495 |
+
spec = spec[:, :, :ln]
|
496 |
+
specs[i] = specs[i][:, :, :ln]
|
497 |
+
|
498 |
+
if "min_mag" == a:
|
499 |
+
spec = np.where(np.abs(specs[i]) <= np.abs(spec), specs[i], spec)
|
500 |
+
if "max_mag" == a:
|
501 |
+
spec = np.where(np.abs(specs[i]) >= np.abs(spec), specs[i], spec)
|
502 |
+
|
503 |
+
return spec
|
504 |
+
|
505 |
+
|
506 |
+
def stft(wave, nfft, hl):
|
507 |
+
wave_left = np.asfortranarray(wave[0])
|
508 |
+
wave_right = np.asfortranarray(wave[1])
|
509 |
+
spec_left = librosa.stft(wave_left, nfft, hop_length=hl)
|
510 |
+
spec_right = librosa.stft(wave_right, nfft, hop_length=hl)
|
511 |
+
spec = np.asfortranarray([spec_left, spec_right])
|
512 |
+
|
513 |
+
return spec
|
514 |
+
|
515 |
+
|
516 |
+
def istft(spec, hl):
|
517 |
+
spec_left = np.asfortranarray(spec[0])
|
518 |
+
spec_right = np.asfortranarray(spec[1])
|
519 |
+
|
520 |
+
wave_left = librosa.istft(spec_left, hop_length=hl)
|
521 |
+
wave_right = librosa.istft(spec_right, hop_length=hl)
|
522 |
+
wave = np.asfortranarray([wave_left, wave_right])
|
523 |
+
|
524 |
+
|
525 |
+
if __name__ == "__main__":
|
526 |
+
import argparse
|
527 |
+
import sys
|
528 |
+
import time
|
529 |
+
|
530 |
+
import cv2
|
531 |
+
from model_param_init import ModelParameters
|
532 |
+
|
533 |
+
p = argparse.ArgumentParser()
|
534 |
+
p.add_argument(
|
535 |
+
"--algorithm",
|
536 |
+
"-a",
|
537 |
+
type=str,
|
538 |
+
choices=["invert", "invert_p", "min_mag", "max_mag", "deep", "align"],
|
539 |
+
default="min_mag",
|
540 |
+
)
|
541 |
+
p.add_argument(
|
542 |
+
"--model_params",
|
543 |
+
"-m",
|
544 |
+
type=str,
|
545 |
+
default=os.path.join("modelparams", "1band_sr44100_hl512.json"),
|
546 |
+
)
|
547 |
+
p.add_argument("--output_name", "-o", type=str, default="output")
|
548 |
+
p.add_argument("--vocals_only", "-v", action="store_true")
|
549 |
+
p.add_argument("input", nargs="+")
|
550 |
+
args = p.parse_args()
|
551 |
+
|
552 |
+
start_time = time.time()
|
553 |
+
|
554 |
+
if args.algorithm.startswith("invert") and len(args.input) != 2:
|
555 |
+
raise ValueError("There should be two input files.")
|
556 |
+
|
557 |
+
if not args.algorithm.startswith("invert") and len(args.input) < 2:
|
558 |
+
raise ValueError("There must be at least two input files.")
|
559 |
+
|
560 |
+
wave, specs = {}, {}
|
561 |
+
mp = ModelParameters(args.model_params)
|
562 |
+
|
563 |
+
for i in range(len(args.input)):
|
564 |
+
spec = {}
|
565 |
+
|
566 |
+
for d in range(len(mp.param["band"]), 0, -1):
|
567 |
+
bp = mp.param["band"][d]
|
568 |
+
|
569 |
+
if d == len(mp.param["band"]): # high-end band
|
570 |
+
wave[d], _ = librosa.load(
|
571 |
+
args.input[i],
|
572 |
+
bp["sr"],
|
573 |
+
False,
|
574 |
+
dtype=np.float32,
|
575 |
+
res_type=bp["res_type"],
|
576 |
+
)
|
577 |
+
|
578 |
+
if len(wave[d].shape) == 1: # mono to stereo
|
579 |
+
wave[d] = np.array([wave[d], wave[d]])
|
580 |
+
else: # lower bands
|
581 |
+
wave[d] = librosa.resample(
|
582 |
+
wave[d + 1],
|
583 |
+
mp.param["band"][d + 1]["sr"],
|
584 |
+
bp["sr"],
|
585 |
+
res_type=bp["res_type"],
|
586 |
+
)
|
587 |
+
|
588 |
+
spec[d] = wave_to_spectrogram(
|
589 |
+
wave[d],
|
590 |
+
bp["hl"],
|
591 |
+
bp["n_fft"],
|
592 |
+
mp.param["mid_side"],
|
593 |
+
mp.param["mid_side_b2"],
|
594 |
+
mp.param["reverse"],
|
595 |
+
)
|
596 |
+
|
597 |
+
specs[i] = combine_spectrograms(spec, mp)
|
598 |
+
|
599 |
+
del wave
|
600 |
+
|
601 |
+
if args.algorithm == "deep":
|
602 |
+
d_spec = np.where(np.abs(specs[0]) <= np.abs(spec[1]), specs[0], spec[1])
|
603 |
+
v_spec = d_spec - specs[1]
|
604 |
+
sf.write(
|
605 |
+
os.path.join("{}.wav".format(args.output_name)),
|
606 |
+
cmb_spectrogram_to_wave(v_spec, mp),
|
607 |
+
mp.param["sr"],
|
608 |
+
)
|
609 |
+
|
610 |
+
if args.algorithm.startswith("invert"):
|
611 |
+
ln = min([specs[0].shape[2], specs[1].shape[2]])
|
612 |
+
specs[0] = specs[0][:, :, :ln]
|
613 |
+
specs[1] = specs[1][:, :, :ln]
|
614 |
+
|
615 |
+
if "invert_p" == args.algorithm:
|
616 |
+
X_mag = np.abs(specs[0])
|
617 |
+
y_mag = np.abs(specs[1])
|
618 |
+
max_mag = np.where(X_mag >= y_mag, X_mag, y_mag)
|
619 |
+
v_spec = specs[1] - max_mag * np.exp(1.0j * np.angle(specs[0]))
|
620 |
+
else:
|
621 |
+
specs[1] = reduce_vocal_aggressively(specs[0], specs[1], 0.2)
|
622 |
+
v_spec = specs[0] - specs[1]
|
623 |
+
|
624 |
+
if not args.vocals_only:
|
625 |
+
X_mag = np.abs(specs[0])
|
626 |
+
y_mag = np.abs(specs[1])
|
627 |
+
v_mag = np.abs(v_spec)
|
628 |
+
|
629 |
+
X_image = spectrogram_to_image(X_mag)
|
630 |
+
y_image = spectrogram_to_image(y_mag)
|
631 |
+
v_image = spectrogram_to_image(v_mag)
|
632 |
+
|
633 |
+
cv2.imwrite("{}_X.png".format(args.output_name), X_image)
|
634 |
+
cv2.imwrite("{}_y.png".format(args.output_name), y_image)
|
635 |
+
cv2.imwrite("{}_v.png".format(args.output_name), v_image)
|
636 |
+
|
637 |
+
sf.write(
|
638 |
+
"{}_X.wav".format(args.output_name),
|
639 |
+
cmb_spectrogram_to_wave(specs[0], mp),
|
640 |
+
mp.param["sr"],
|
641 |
+
)
|
642 |
+
sf.write(
|
643 |
+
"{}_y.wav".format(args.output_name),
|
644 |
+
cmb_spectrogram_to_wave(specs[1], mp),
|
645 |
+
mp.param["sr"],
|
646 |
+
)
|
647 |
+
|
648 |
+
sf.write(
|
649 |
+
"{}_v.wav".format(args.output_name),
|
650 |
+
cmb_spectrogram_to_wave(v_spec, mp),
|
651 |
+
mp.param["sr"],
|
652 |
+
)
|
653 |
+
else:
|
654 |
+
if not args.algorithm == "deep":
|
655 |
+
sf.write(
|
656 |
+
os.path.join("ensembled", "{}.wav".format(args.output_name)),
|
657 |
+
cmb_spectrogram_to_wave(ensembling(args.algorithm, specs), mp),
|
658 |
+
mp.param["sr"],
|
659 |
+
)
|
660 |
+
|
661 |
+
if args.algorithm == "align":
|
662 |
+
trackalignment = [
|
663 |
+
{
|
664 |
+
"file1": '"{}"'.format(args.input[0]),
|
665 |
+
"file2": '"{}"'.format(args.input[1]),
|
666 |
+
}
|
667 |
+
]
|
668 |
+
|
669 |
+
for i, e in tqdm(enumerate(trackalignment), desc="Performing Alignment..."):
|
670 |
+
os.system(f"python lib/align_tracks.py {e['file1']} {e['file2']}")
|
671 |
+
|
672 |
+
# print('Total time: {0:.{1}f}s'.format(time.time() - start_time, 1))
|
uvr5/lib/name_params.json
ADDED
@@ -0,0 +1,263 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"equivalent" : [
|
3 |
+
{
|
4 |
+
"model_hash_name" : [
|
5 |
+
{
|
6 |
+
"hash_name": "47939caf0cfe52a0e81442b85b971dfd",
|
7 |
+
"model_params": "lib/lib_v5/modelparams/4band_44100.json",
|
8 |
+
"param_name": "4band_44100"
|
9 |
+
},
|
10 |
+
{
|
11 |
+
"hash_name": "4e4ecb9764c50a8c414fee6e10395bbe",
|
12 |
+
"model_params": "lib/lib_v5/modelparams/4band_v2.json",
|
13 |
+
"param_name": "4band_v2"
|
14 |
+
},
|
15 |
+
{
|
16 |
+
"hash_name": "ca106edd563e034bde0bdec4bb7a4b36",
|
17 |
+
"model_params": "lib/lib_v5/modelparams/4band_v2.json",
|
18 |
+
"param_name": "4band_v2"
|
19 |
+
},
|
20 |
+
{
|
21 |
+
"hash_name": "e60a1e84803ce4efc0a6551206cc4b71",
|
22 |
+
"model_params": "lib/lib_v5/modelparams/4band_44100.json",
|
23 |
+
"param_name": "4band_44100"
|
24 |
+
},
|
25 |
+
{
|
26 |
+
"hash_name": "a82f14e75892e55e994376edbf0c8435",
|
27 |
+
"model_params": "lib/lib_v5/modelparams/4band_44100.json",
|
28 |
+
"param_name": "4band_44100"
|
29 |
+
},
|
30 |
+
{
|
31 |
+
"hash_name": "6dd9eaa6f0420af9f1d403aaafa4cc06",
|
32 |
+
"model_params": "lib/lib_v5/modelparams/4band_v2_sn.json",
|
33 |
+
"param_name": "4band_v2_sn"
|
34 |
+
},
|
35 |
+
{
|
36 |
+
"hash_name": "08611fb99bd59eaa79ad27c58d137727",
|
37 |
+
"model_params": "lib/lib_v5/modelparams/4band_v2_sn.json",
|
38 |
+
"param_name": "4band_v2_sn"
|
39 |
+
},
|
40 |
+
{
|
41 |
+
"hash_name": "5c7bbca45a187e81abbbd351606164e5",
|
42 |
+
"model_params": "lib/lib_v5/modelparams/3band_44100_msb2.json",
|
43 |
+
"param_name": "3band_44100_msb2"
|
44 |
+
},
|
45 |
+
{
|
46 |
+
"hash_name": "d6b2cb685a058a091e5e7098192d3233",
|
47 |
+
"model_params": "lib/lib_v5/modelparams/3band_44100_msb2.json",
|
48 |
+
"param_name": "3band_44100_msb2"
|
49 |
+
},
|
50 |
+
{
|
51 |
+
"hash_name": "c1b9f38170a7c90e96f027992eb7c62b",
|
52 |
+
"model_params": "lib/lib_v5/modelparams/4band_44100.json",
|
53 |
+
"param_name": "4band_44100"
|
54 |
+
},
|
55 |
+
{
|
56 |
+
"hash_name": "c3448ec923fa0edf3d03a19e633faa53",
|
57 |
+
"model_params": "lib/lib_v5/modelparams/4band_44100.json",
|
58 |
+
"param_name": "4band_44100"
|
59 |
+
},
|
60 |
+
{
|
61 |
+
"hash_name": "68aa2c8093d0080704b200d140f59e54",
|
62 |
+
"model_params": "lib/lib_v5/modelparams/3band_44100.json",
|
63 |
+
"param_name": "3band_44100"
|
64 |
+
},
|
65 |
+
{
|
66 |
+
"hash_name": "fdc83be5b798e4bd29fe00fe6600e147",
|
67 |
+
"model_params": "lib/lib_v5/modelparams/3band_44100_mid.json",
|
68 |
+
"param_name": "3band_44100_mid.json"
|
69 |
+
},
|
70 |
+
{
|
71 |
+
"hash_name": "2ce34bc92fd57f55db16b7a4def3d745",
|
72 |
+
"model_params": "lib/lib_v5/modelparams/3band_44100_mid.json",
|
73 |
+
"param_name": "3band_44100_mid.json"
|
74 |
+
},
|
75 |
+
{
|
76 |
+
"hash_name": "52fdca89576f06cf4340b74a4730ee5f",
|
77 |
+
"model_params": "lib/lib_v5/modelparams/4band_44100.json",
|
78 |
+
"param_name": "4band_44100.json"
|
79 |
+
},
|
80 |
+
{
|
81 |
+
"hash_name": "41191165b05d38fc77f072fa9e8e8a30",
|
82 |
+
"model_params": "lib/lib_v5/modelparams/4band_44100.json",
|
83 |
+
"param_name": "4band_44100.json"
|
84 |
+
},
|
85 |
+
{
|
86 |
+
"hash_name": "89e83b511ad474592689e562d5b1f80e",
|
87 |
+
"model_params": "lib/lib_v5/modelparams/2band_32000.json",
|
88 |
+
"param_name": "2band_32000.json"
|
89 |
+
},
|
90 |
+
{
|
91 |
+
"hash_name": "0b954da81d453b716b114d6d7c95177f",
|
92 |
+
"model_params": "lib/lib_v5/modelparams/2band_32000.json",
|
93 |
+
"param_name": "2band_32000.json"
|
94 |
+
}
|
95 |
+
|
96 |
+
],
|
97 |
+
"v4 Models": [
|
98 |
+
{
|
99 |
+
"hash_name": "6a00461c51c2920fd68937d4609ed6c8",
|
100 |
+
"model_params": "lib/lib_v5/modelparams/1band_sr16000_hl512.json",
|
101 |
+
"param_name": "1band_sr16000_hl512"
|
102 |
+
},
|
103 |
+
{
|
104 |
+
"hash_name": "0ab504864d20f1bd378fe9c81ef37140",
|
105 |
+
"model_params": "lib/lib_v5/modelparams/1band_sr32000_hl512.json",
|
106 |
+
"param_name": "1band_sr32000_hl512"
|
107 |
+
},
|
108 |
+
{
|
109 |
+
"hash_name": "7dd21065bf91c10f7fccb57d7d83b07f",
|
110 |
+
"model_params": "lib/lib_v5/modelparams/1band_sr32000_hl512.json",
|
111 |
+
"param_name": "1band_sr32000_hl512"
|
112 |
+
},
|
113 |
+
{
|
114 |
+
"hash_name": "80ab74d65e515caa3622728d2de07d23",
|
115 |
+
"model_params": "lib/lib_v5/modelparams/1band_sr32000_hl512.json",
|
116 |
+
"param_name": "1band_sr32000_hl512"
|
117 |
+
},
|
118 |
+
{
|
119 |
+
"hash_name": "edc115e7fc523245062200c00caa847f",
|
120 |
+
"model_params": "lib/lib_v5/modelparams/1band_sr33075_hl384.json",
|
121 |
+
"param_name": "1band_sr33075_hl384"
|
122 |
+
},
|
123 |
+
{
|
124 |
+
"hash_name": "28063e9f6ab5b341c5f6d3c67f2045b7",
|
125 |
+
"model_params": "lib/lib_v5/modelparams/1band_sr33075_hl384.json",
|
126 |
+
"param_name": "1band_sr33075_hl384"
|
127 |
+
},
|
128 |
+
{
|
129 |
+
"hash_name": "b58090534c52cbc3e9b5104bad666ef2",
|
130 |
+
"model_params": "lib/lib_v5/modelparams/1band_sr44100_hl512.json",
|
131 |
+
"param_name": "1band_sr44100_hl512"
|
132 |
+
},
|
133 |
+
{
|
134 |
+
"hash_name": "0cdab9947f1b0928705f518f3c78ea8f",
|
135 |
+
"model_params": "lib/lib_v5/modelparams/1band_sr44100_hl512.json",
|
136 |
+
"param_name": "1band_sr44100_hl512"
|
137 |
+
},
|
138 |
+
{
|
139 |
+
"hash_name": "ae702fed0238afb5346db8356fe25f13",
|
140 |
+
"model_params": "lib/lib_v5/modelparams/1band_sr44100_hl1024.json",
|
141 |
+
"param_name": "1band_sr44100_hl1024"
|
142 |
+
}
|
143 |
+
]
|
144 |
+
}
|
145 |
+
],
|
146 |
+
"User Models" : [
|
147 |
+
{
|
148 |
+
"1 Band": [
|
149 |
+
{
|
150 |
+
"hash_name": "1band_sr16000_hl512",
|
151 |
+
"model_params": "lib/lib_v5/modelparams/1band_sr16000_hl512.json",
|
152 |
+
"param_name": "1band_sr16000_hl512"
|
153 |
+
},
|
154 |
+
{
|
155 |
+
"hash_name": "1band_sr32000_hl512",
|
156 |
+
"model_params": "lib/lib_v5/modelparams/1band_sr32000_hl512.json",
|
157 |
+
"param_name": "1band_sr16000_hl512"
|
158 |
+
},
|
159 |
+
{
|
160 |
+
"hash_name": "1band_sr33075_hl384",
|
161 |
+
"model_params": "lib/lib_v5/modelparams/1band_sr33075_hl384.json",
|
162 |
+
"param_name": "1band_sr33075_hl384"
|
163 |
+
},
|
164 |
+
{
|
165 |
+
"hash_name": "1band_sr44100_hl256",
|
166 |
+
"model_params": "lib/lib_v5/modelparams/1band_sr44100_hl256.json",
|
167 |
+
"param_name": "1band_sr44100_hl256"
|
168 |
+
},
|
169 |
+
{
|
170 |
+
"hash_name": "1band_sr44100_hl512",
|
171 |
+
"model_params": "lib/lib_v5/modelparams/1band_sr44100_hl512.json",
|
172 |
+
"param_name": "1band_sr44100_hl512"
|
173 |
+
},
|
174 |
+
{
|
175 |
+
"hash_name": "1band_sr44100_hl1024",
|
176 |
+
"model_params": "lib/lib_v5/modelparams/1band_sr44100_hl1024.json",
|
177 |
+
"param_name": "1band_sr44100_hl1024"
|
178 |
+
}
|
179 |
+
],
|
180 |
+
"2 Band": [
|
181 |
+
{
|
182 |
+
"hash_name": "2band_44100_lofi",
|
183 |
+
"model_params": "lib/lib_v5/modelparams/2band_44100_lofi.json",
|
184 |
+
"param_name": "2band_44100_lofi"
|
185 |
+
},
|
186 |
+
{
|
187 |
+
"hash_name": "2band_32000",
|
188 |
+
"model_params": "lib/lib_v5/modelparams/2band_32000.json",
|
189 |
+
"param_name": "2band_32000"
|
190 |
+
},
|
191 |
+
{
|
192 |
+
"hash_name": "2band_48000",
|
193 |
+
"model_params": "lib/lib_v5/modelparams/2band_48000.json",
|
194 |
+
"param_name": "2band_48000"
|
195 |
+
}
|
196 |
+
],
|
197 |
+
"3 Band": [
|
198 |
+
{
|
199 |
+
"hash_name": "3band_44100",
|
200 |
+
"model_params": "lib/lib_v5/modelparams/3band_44100.json",
|
201 |
+
"param_name": "3band_44100"
|
202 |
+
},
|
203 |
+
{
|
204 |
+
"hash_name": "3band_44100_mid",
|
205 |
+
"model_params": "lib/lib_v5/modelparams/3band_44100_mid.json",
|
206 |
+
"param_name": "3band_44100_mid"
|
207 |
+
},
|
208 |
+
{
|
209 |
+
"hash_name": "3band_44100_msb2",
|
210 |
+
"model_params": "lib/lib_v5/modelparams/3band_44100_msb2.json",
|
211 |
+
"param_name": "3band_44100_msb2"
|
212 |
+
}
|
213 |
+
],
|
214 |
+
"4 Band": [
|
215 |
+
{
|
216 |
+
"hash_name": "4band_44100",
|
217 |
+
"model_params": "lib/lib_v5/modelparams/4band_44100.json",
|
218 |
+
"param_name": "4band_44100"
|
219 |
+
},
|
220 |
+
{
|
221 |
+
"hash_name": "4band_44100_mid",
|
222 |
+
"model_params": "lib/lib_v5/modelparams/4band_44100_mid.json",
|
223 |
+
"param_name": "4band_44100_mid"
|
224 |
+
},
|
225 |
+
{
|
226 |
+
"hash_name": "4band_44100_msb",
|
227 |
+
"model_params": "lib/lib_v5/modelparams/4band_44100_msb.json",
|
228 |
+
"param_name": "4band_44100_msb"
|
229 |
+
},
|
230 |
+
{
|
231 |
+
"hash_name": "4band_44100_msb2",
|
232 |
+
"model_params": "lib/lib_v5/modelparams/4band_44100_msb2.json",
|
233 |
+
"param_name": "4band_44100_msb2"
|
234 |
+
},
|
235 |
+
{
|
236 |
+
"hash_name": "4band_44100_reverse",
|
237 |
+
"model_params": "lib/lib_v5/modelparams/4band_44100_reverse.json",
|
238 |
+
"param_name": "4band_44100_reverse"
|
239 |
+
},
|
240 |
+
{
|
241 |
+
"hash_name": "4band_44100_sw",
|
242 |
+
"model_params": "lib/lib_v5/modelparams/4band_44100_sw.json",
|
243 |
+
"param_name": "4band_44100_sw"
|
244 |
+
},
|
245 |
+
{
|
246 |
+
"hash_name": "4band_v2",
|
247 |
+
"model_params": "lib/lib_v5/modelparams/4band_v2.json",
|
248 |
+
"param_name": "4band_v2"
|
249 |
+
},
|
250 |
+
{
|
251 |
+
"hash_name": "4band_v2_sn",
|
252 |
+
"model_params": "lib/lib_v5/modelparams/4band_v2_sn.json",
|
253 |
+
"param_name": "4band_v2_sn"
|
254 |
+
},
|
255 |
+
{
|
256 |
+
"hash_name": "tmodelparam",
|
257 |
+
"model_params": "lib/lib_v5/modelparams/tmodelparam.json",
|
258 |
+
"param_name": "User Model Param Set"
|
259 |
+
}
|
260 |
+
]
|
261 |
+
}
|
262 |
+
]
|
263 |
+
}
|
uvr5/lib/utils.py
ADDED
@@ -0,0 +1,121 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import json
|
2 |
+
|
3 |
+
import numpy as np
|
4 |
+
import torch
|
5 |
+
from tqdm import tqdm
|
6 |
+
|
7 |
+
|
8 |
+
def load_data(file_name: str = "./lib/name_params.json") -> dict:
|
9 |
+
with open(file_name, "r") as f:
|
10 |
+
data = json.load(f)
|
11 |
+
|
12 |
+
return data
|
13 |
+
|
14 |
+
|
15 |
+
def make_padding(width, cropsize, offset):
|
16 |
+
left = offset
|
17 |
+
roi_size = cropsize - left * 2
|
18 |
+
if roi_size == 0:
|
19 |
+
roi_size = cropsize
|
20 |
+
right = roi_size - (width % roi_size) + left
|
21 |
+
|
22 |
+
return left, right, roi_size
|
23 |
+
|
24 |
+
|
25 |
+
def inference(X_spec, device, model, aggressiveness, data):
|
26 |
+
"""
|
27 |
+
data : dic configs
|
28 |
+
"""
|
29 |
+
|
30 |
+
def _execute(
|
31 |
+
X_mag_pad, roi_size, n_window, device, model, aggressiveness, is_half=True
|
32 |
+
):
|
33 |
+
model.eval()
|
34 |
+
with torch.no_grad():
|
35 |
+
preds = []
|
36 |
+
|
37 |
+
iterations = [n_window]
|
38 |
+
|
39 |
+
total_iterations = sum(iterations)
|
40 |
+
for i in tqdm(range(n_window)):
|
41 |
+
start = i * roi_size
|
42 |
+
X_mag_window = X_mag_pad[
|
43 |
+
None, :, :, start : start + data["window_size"]
|
44 |
+
]
|
45 |
+
X_mag_window = torch.from_numpy(X_mag_window)
|
46 |
+
if is_half:
|
47 |
+
X_mag_window = X_mag_window.half()
|
48 |
+
X_mag_window = X_mag_window.to(device)
|
49 |
+
|
50 |
+
pred = model.predict(X_mag_window, aggressiveness)
|
51 |
+
|
52 |
+
pred = pred.detach().cpu().numpy()
|
53 |
+
preds.append(pred[0])
|
54 |
+
|
55 |
+
pred = np.concatenate(preds, axis=2)
|
56 |
+
return pred
|
57 |
+
|
58 |
+
def preprocess(X_spec):
|
59 |
+
X_mag = np.abs(X_spec)
|
60 |
+
X_phase = np.angle(X_spec)
|
61 |
+
|
62 |
+
return X_mag, X_phase
|
63 |
+
|
64 |
+
X_mag, X_phase = preprocess(X_spec)
|
65 |
+
|
66 |
+
coef = X_mag.max()
|
67 |
+
X_mag_pre = X_mag / coef
|
68 |
+
|
69 |
+
n_frame = X_mag_pre.shape[2]
|
70 |
+
pad_l, pad_r, roi_size = make_padding(n_frame, data["window_size"], model.offset)
|
71 |
+
n_window = int(np.ceil(n_frame / roi_size))
|
72 |
+
|
73 |
+
X_mag_pad = np.pad(X_mag_pre, ((0, 0), (0, 0), (pad_l, pad_r)), mode="constant")
|
74 |
+
|
75 |
+
if list(model.state_dict().values())[0].dtype == torch.float16:
|
76 |
+
is_half = True
|
77 |
+
else:
|
78 |
+
is_half = False
|
79 |
+
pred = _execute(
|
80 |
+
X_mag_pad, roi_size, n_window, device, model, aggressiveness, is_half
|
81 |
+
)
|
82 |
+
pred = pred[:, :, :n_frame]
|
83 |
+
|
84 |
+
if data["tta"]:
|
85 |
+
pad_l += roi_size // 2
|
86 |
+
pad_r += roi_size // 2
|
87 |
+
n_window += 1
|
88 |
+
|
89 |
+
X_mag_pad = np.pad(X_mag_pre, ((0, 0), (0, 0), (pad_l, pad_r)), mode="constant")
|
90 |
+
|
91 |
+
pred_tta = _execute(
|
92 |
+
X_mag_pad, roi_size, n_window, device, model, aggressiveness, is_half
|
93 |
+
)
|
94 |
+
pred_tta = pred_tta[:, :, roi_size // 2 :]
|
95 |
+
pred_tta = pred_tta[:, :, :n_frame]
|
96 |
+
|
97 |
+
return (pred + pred_tta) * 0.5 * coef, X_mag, np.exp(1.0j * X_phase)
|
98 |
+
else:
|
99 |
+
return pred * coef, X_mag, np.exp(1.0j * X_phase)
|
100 |
+
|
101 |
+
|
102 |
+
def _get_name_params(model_path, model_hash):
|
103 |
+
data = load_data()
|
104 |
+
flag = False
|
105 |
+
ModelName = model_path
|
106 |
+
for type in list(data):
|
107 |
+
for model in list(data[type][0]):
|
108 |
+
for i in range(len(data[type][0][model])):
|
109 |
+
if str(data[type][0][model][i]["hash_name"]) == model_hash:
|
110 |
+
flag = True
|
111 |
+
elif str(data[type][0][model][i]["hash_name"]) in ModelName:
|
112 |
+
flag = True
|
113 |
+
|
114 |
+
if flag:
|
115 |
+
model_params_auto = data[type][0][model][i]["model_params"]
|
116 |
+
param_name_auto = data[type][0][model][i]["param_name"]
|
117 |
+
if type == "equivalent":
|
118 |
+
return param_name_auto, model_params_auto
|
119 |
+
else:
|
120 |
+
flag = False
|
121 |
+
return param_name_auto, model_params_auto
|
uvr5/uvr_model/__init__.py
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
|
uvr5/vr.py
ADDED
@@ -0,0 +1,196 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os,sys
|
2 |
+
parent_directory = os.path.dirname(os.path.abspath(__file__))
|
3 |
+
import logging,pdb
|
4 |
+
logger = logging.getLogger(__name__)
|
5 |
+
|
6 |
+
import librosa
|
7 |
+
import numpy as np
|
8 |
+
import soundfile as sf
|
9 |
+
import torch
|
10 |
+
from uvr5.lib.lib_v5 import nets_61968KB as Nets
|
11 |
+
from uvr5.lib.lib_v5 import spec_utils
|
12 |
+
from uvr5.lib.lib_v5.model_param_init import ModelParameters
|
13 |
+
from uvr5.lib.utils import inference
|
14 |
+
|
15 |
+
|
16 |
+
class AudioPre:
|
17 |
+
def __init__(self, agg, model_path, device, is_half, tta=False):
|
18 |
+
self.model_path = model_path
|
19 |
+
self.device = device
|
20 |
+
self.data = {
|
21 |
+
# Processing Options
|
22 |
+
"postprocess": False,
|
23 |
+
"tta": tta,
|
24 |
+
# Constants
|
25 |
+
"window_size": 512,
|
26 |
+
"agg": agg,
|
27 |
+
"high_end_process": "mirroring",
|
28 |
+
}
|
29 |
+
mp = ModelParameters("%s/lib/lib_v5/modelparams/4band_v2.json"%parent_directory)
|
30 |
+
model = Nets.CascadedASPPNet(mp.param["bins"] * 2)
|
31 |
+
cpk = torch.load(model_path, map_location="cpu")
|
32 |
+
model.load_state_dict(cpk)
|
33 |
+
model.eval()
|
34 |
+
if is_half:
|
35 |
+
model = model.half().to(device)
|
36 |
+
else:
|
37 |
+
model = model.to(device)
|
38 |
+
|
39 |
+
self.mp = mp
|
40 |
+
self.model = model
|
41 |
+
|
42 |
+
def _path_audio_(
|
43 |
+
self, music_file, ins_root=None, vocal_root=None, format="flac", is_hp3=False
|
44 |
+
):
|
45 |
+
if ins_root is None and vocal_root is None:
|
46 |
+
return "No save root."
|
47 |
+
name = os.path.basename(music_file)
|
48 |
+
if ins_root is not None:
|
49 |
+
os.makedirs(ins_root, exist_ok=True)
|
50 |
+
if vocal_root is not None:
|
51 |
+
os.makedirs(vocal_root, exist_ok=True)
|
52 |
+
X_wave, y_wave, X_spec_s, y_spec_s = {}, {}, {}, {}
|
53 |
+
bands_n = len(self.mp.param["band"])
|
54 |
+
# print(bands_n)
|
55 |
+
for d in range(bands_n, 0, -1):
|
56 |
+
bp = self.mp.param["band"][d]
|
57 |
+
if d == bands_n: # high-end band
|
58 |
+
(
|
59 |
+
X_wave[d],
|
60 |
+
_,
|
61 |
+
) = librosa.core.load( # 理论上librosa读取可能对某些音频有bug,应该上ffmpeg读取,但是太麻烦了弃坑
|
62 |
+
music_file,
|
63 |
+
bp["sr"],
|
64 |
+
False,
|
65 |
+
dtype=np.float32,
|
66 |
+
res_type=bp["res_type"],
|
67 |
+
)
|
68 |
+
if X_wave[d].ndim == 1:
|
69 |
+
X_wave[d] = np.asfortranarray([X_wave[d], X_wave[d]])
|
70 |
+
else: # lower bands
|
71 |
+
X_wave[d] = librosa.core.resample(
|
72 |
+
X_wave[d + 1],
|
73 |
+
self.mp.param["band"][d + 1]["sr"],
|
74 |
+
bp["sr"],
|
75 |
+
res_type=bp["res_type"],
|
76 |
+
)
|
77 |
+
# Stft of wave source
|
78 |
+
X_spec_s[d] = spec_utils.wave_to_spectrogram_mt(
|
79 |
+
X_wave[d],
|
80 |
+
bp["hl"],
|
81 |
+
bp["n_fft"],
|
82 |
+
self.mp.param["mid_side"],
|
83 |
+
self.mp.param["mid_side_b2"],
|
84 |
+
self.mp.param["reverse"],
|
85 |
+
)
|
86 |
+
# pdb.set_trace()
|
87 |
+
if d == bands_n and self.data["high_end_process"] != "none":
|
88 |
+
input_high_end_h = (bp["n_fft"] // 2 - bp["crop_stop"]) + (
|
89 |
+
self.mp.param["pre_filter_stop"] - self.mp.param["pre_filter_start"]
|
90 |
+
)
|
91 |
+
input_high_end = X_spec_s[d][
|
92 |
+
:, bp["n_fft"] // 2 - input_high_end_h : bp["n_fft"] // 2, :
|
93 |
+
]
|
94 |
+
|
95 |
+
X_spec_m = spec_utils.combine_spectrograms(X_spec_s, self.mp)
|
96 |
+
aggresive_set = float(self.data["agg"] / 100)
|
97 |
+
aggressiveness = {
|
98 |
+
"value": aggresive_set,
|
99 |
+
"split_bin": self.mp.param["band"][1]["crop_stop"],
|
100 |
+
}
|
101 |
+
with torch.no_grad():
|
102 |
+
pred, X_mag, X_phase = inference(
|
103 |
+
X_spec_m, self.device, self.model, aggressiveness, self.data
|
104 |
+
)
|
105 |
+
# Postprocess
|
106 |
+
if self.data["postprocess"]:
|
107 |
+
pred_inv = np.clip(X_mag - pred, 0, np.inf)
|
108 |
+
pred = spec_utils.mask_silence(pred, pred_inv)
|
109 |
+
y_spec_m = pred * X_phase
|
110 |
+
v_spec_m = X_spec_m - y_spec_m
|
111 |
+
|
112 |
+
if is_hp3 == True:
|
113 |
+
ins_root,vocal_root = vocal_root,ins_root
|
114 |
+
|
115 |
+
if ins_root is not None:
|
116 |
+
if self.data["high_end_process"].startswith("mirroring"):
|
117 |
+
input_high_end_ = spec_utils.mirroring(
|
118 |
+
self.data["high_end_process"], y_spec_m, input_high_end, self.mp
|
119 |
+
)
|
120 |
+
wav_instrument = spec_utils.cmb_spectrogram_to_wave(
|
121 |
+
y_spec_m, self.mp, input_high_end_h, input_high_end_
|
122 |
+
)
|
123 |
+
else:
|
124 |
+
wav_instrument = spec_utils.cmb_spectrogram_to_wave(y_spec_m, self.mp)
|
125 |
+
logger.info("%s instruments done" % name)
|
126 |
+
if is_hp3 == True:
|
127 |
+
head = "vocal_"
|
128 |
+
else:
|
129 |
+
head = "instrument_"
|
130 |
+
if format in ["wav", "flac"]:
|
131 |
+
sf.write(
|
132 |
+
os.path.join(
|
133 |
+
ins_root,
|
134 |
+
head + "{}_{}.{}".format(name, self.data["agg"], format),
|
135 |
+
),
|
136 |
+
(np.array(wav_instrument)).astype("float32"),
|
137 |
+
self.mp.param["sr"],
|
138 |
+
) #
|
139 |
+
else:
|
140 |
+
path = os.path.join(
|
141 |
+
ins_root, head + "{}_{}.wav".format(name, self.data["agg"])
|
142 |
+
)
|
143 |
+
sf.write(
|
144 |
+
path,
|
145 |
+
(np.array(wav_instrument)).astype("float32"),
|
146 |
+
self.mp.param["sr"],
|
147 |
+
)
|
148 |
+
if os.path.exists(path):
|
149 |
+
opt_format_path = path[:-4] + ".%s" % format
|
150 |
+
os.system("ffmpeg -i %s -vn %s -q:a 2 -y" % (path, opt_format_path))
|
151 |
+
if os.path.exists(opt_format_path):
|
152 |
+
try:
|
153 |
+
os.remove(path)
|
154 |
+
except:
|
155 |
+
pass
|
156 |
+
if vocal_root is not None:
|
157 |
+
if is_hp3 == True:
|
158 |
+
head = "instrument_"
|
159 |
+
else:
|
160 |
+
head = "vocal_"
|
161 |
+
if self.data["high_end_process"].startswith("mirroring"):
|
162 |
+
input_high_end_ = spec_utils.mirroring(
|
163 |
+
self.data["high_end_process"], v_spec_m, input_high_end, self.mp
|
164 |
+
)
|
165 |
+
wav_vocals = spec_utils.cmb_spectrogram_to_wave(
|
166 |
+
v_spec_m, self.mp, input_high_end_h, input_high_end_
|
167 |
+
)
|
168 |
+
else:
|
169 |
+
wav_vocals = spec_utils.cmb_spectrogram_to_wave(v_spec_m, self.mp)
|
170 |
+
logger.info("%s vocals done" % name)
|
171 |
+
if format in ["wav", "flac"]:
|
172 |
+
sf.write(
|
173 |
+
os.path.join(
|
174 |
+
vocal_root,
|
175 |
+
head + "{}_{}.{}".format(name, self.data["agg"], format),
|
176 |
+
),
|
177 |
+
(np.array(wav_vocals)).astype("float32"),
|
178 |
+
self.mp.param["sr"],
|
179 |
+
)
|
180 |
+
else:
|
181 |
+
path = os.path.join(
|
182 |
+
vocal_root, head + "{}_{}.wav".format(name, self.data["agg"])
|
183 |
+
)
|
184 |
+
sf.write(
|
185 |
+
path,
|
186 |
+
(np.array(wav_vocals)).astype("float32"),
|
187 |
+
self.mp.param["sr"],
|
188 |
+
)
|
189 |
+
if os.path.exists(path):
|
190 |
+
opt_format_path = path[:-4] + ".%s" % format
|
191 |
+
os.system("ffmpeg -i %s -vn %s -q:a 2 -y" % (path, opt_format_path))
|
192 |
+
if os.path.exists(opt_format_path):
|
193 |
+
try:
|
194 |
+
os.remove(path)
|
195 |
+
except:
|
196 |
+
pass
|
vc_infer_pipeline.py
ADDED
@@ -0,0 +1,424 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import numpy as np, parselmouth, torch, pdb, sys, os
|
2 |
+
from time import time as ttime
|
3 |
+
import torch.nn.functional as F
|
4 |
+
import scipy.signal as signal
|
5 |
+
import os, traceback, faiss, librosa, torchcrepe #pyworld
|
6 |
+
from scipy import signal
|
7 |
+
from functools import lru_cache
|
8 |
+
|
9 |
+
now_dir = os.getcwd()
|
10 |
+
sys.path.append(now_dir)
|
11 |
+
|
12 |
+
bh, ah = signal.butter(N=5, Wn=48, btype="high", fs=16000)
|
13 |
+
|
14 |
+
input_audio_path2wav = {}
|
15 |
+
|
16 |
+
def change_rms(data1, sr1, data2, sr2, rate): # 1是输入音频,2是输出音频,rate是2的占比
|
17 |
+
# print(data1.max(),data2.max())
|
18 |
+
rms1 = librosa.feature.rms(
|
19 |
+
y=data1, frame_length=sr1 // 2 * 2, hop_length=sr1 // 2
|
20 |
+
) # 每半秒一个点
|
21 |
+
rms2 = librosa.feature.rms(y=data2, frame_length=sr2 // 2 * 2, hop_length=sr2 // 2)
|
22 |
+
rms1 = torch.from_numpy(rms1)
|
23 |
+
rms1 = F.interpolate(
|
24 |
+
rms1.unsqueeze(0), size=data2.shape[0], mode="linear"
|
25 |
+
).squeeze()
|
26 |
+
rms2 = torch.from_numpy(rms2)
|
27 |
+
rms2 = F.interpolate(
|
28 |
+
rms2.unsqueeze(0), size=data2.shape[0], mode="linear"
|
29 |
+
).squeeze()
|
30 |
+
rms2 = torch.max(rms2, torch.zeros_like(rms2) + 1e-6)
|
31 |
+
data2 *= (
|
32 |
+
torch.pow(rms1, torch.tensor(1 - rate))
|
33 |
+
* torch.pow(rms2, torch.tensor(rate - 1))
|
34 |
+
).numpy()
|
35 |
+
return data2
|
36 |
+
|
37 |
+
|
38 |
+
class VC(object):
|
39 |
+
def __init__(self, tgt_sr, config):
|
40 |
+
self.x_pad, self.x_query, self.x_center, self.x_max, self.is_half = (
|
41 |
+
config.x_pad,
|
42 |
+
config.x_query,
|
43 |
+
config.x_center,
|
44 |
+
config.x_max,
|
45 |
+
config.is_half,
|
46 |
+
)
|
47 |
+
self.sr = 16000 # hubert输入采样率
|
48 |
+
self.window = 160 # 每帧点数
|
49 |
+
self.t_pad = self.sr * self.x_pad # 每条前后pad时间
|
50 |
+
self.t_pad_tgt = tgt_sr * self.x_pad
|
51 |
+
self.t_pad2 = self.t_pad * 2
|
52 |
+
self.t_query = self.sr * self.x_query # 查询切点前后查询时间
|
53 |
+
self.t_center = self.sr * self.x_center # 查询切点位置
|
54 |
+
self.t_max = self.sr * self.x_max # 免查询时长阈值
|
55 |
+
self.device = config.device
|
56 |
+
|
57 |
+
def get_f0(
|
58 |
+
self,
|
59 |
+
input_audio_path,
|
60 |
+
x,
|
61 |
+
p_len,
|
62 |
+
f0_up_key,
|
63 |
+
f0_method,
|
64 |
+
filter_radius,
|
65 |
+
inp_f0=None,
|
66 |
+
):
|
67 |
+
global input_audio_path2wav
|
68 |
+
time_step = self.window / self.sr * 1000
|
69 |
+
f0_min = 50
|
70 |
+
f0_max = 1100
|
71 |
+
f0_mel_min = 1127 * np.log(1 + f0_min / 700)
|
72 |
+
f0_mel_max = 1127 * np.log(1 + f0_max / 700)
|
73 |
+
if f0_method == "pm":
|
74 |
+
f0 = (
|
75 |
+
parselmouth.Sound(x, self.sr)
|
76 |
+
.to_pitch_ac(
|
77 |
+
time_step=time_step / 1000,
|
78 |
+
voicing_threshold=0.6,
|
79 |
+
pitch_floor=f0_min,
|
80 |
+
pitch_ceiling=f0_max,
|
81 |
+
)
|
82 |
+
.selected_array["frequency"]
|
83 |
+
)
|
84 |
+
pad_size = (p_len - len(f0) + 1) // 2
|
85 |
+
if pad_size > 0 or p_len - len(f0) - pad_size > 0:
|
86 |
+
f0 = np.pad(
|
87 |
+
f0, [[pad_size, p_len - len(f0) - pad_size]], mode="constant"
|
88 |
+
)
|
89 |
+
elif f0_method == "crepe":
|
90 |
+
model = "full"
|
91 |
+
# Pick a batch size that doesn't cause memory errors on your gpu
|
92 |
+
batch_size = 512
|
93 |
+
# Compute pitch using first gpu
|
94 |
+
audio = torch.tensor(np.copy(x))[None].float()
|
95 |
+
f0, pd = torchcrepe.predict(
|
96 |
+
audio,
|
97 |
+
self.sr,
|
98 |
+
self.window,
|
99 |
+
f0_min,
|
100 |
+
f0_max,
|
101 |
+
model,
|
102 |
+
batch_size=batch_size,
|
103 |
+
device=self.device,
|
104 |
+
return_periodicity=True,
|
105 |
+
)
|
106 |
+
pd = torchcrepe.filter.median(pd, 3)
|
107 |
+
f0 = torchcrepe.filter.mean(f0, 3)
|
108 |
+
f0[pd < 0.1] = 0
|
109 |
+
f0 = f0[0].cpu().numpy()
|
110 |
+
elif f0_method == "rmvpe":
|
111 |
+
if hasattr(self, "model_rmvpe") == False:
|
112 |
+
from rmvpe import RMVPE
|
113 |
+
|
114 |
+
print("loading rmvpe model")
|
115 |
+
self.model_rmvpe = RMVPE(
|
116 |
+
"rmvpe.pt", is_half=self.is_half, device=self.device
|
117 |
+
)
|
118 |
+
f0 = self.model_rmvpe.infer_from_audio(x, thred=0.03)
|
119 |
+
f0 *= pow(2, f0_up_key / 12)
|
120 |
+
# with open("test.txt","w")as f:f.write("\n".join([str(i)for i in f0.tolist()]))
|
121 |
+
tf0 = self.sr // self.window # 每秒f0点数
|
122 |
+
if inp_f0 is not None:
|
123 |
+
delta_t = np.round(
|
124 |
+
(inp_f0[:, 0].max() - inp_f0[:, 0].min()) * tf0 + 1
|
125 |
+
).astype("int16")
|
126 |
+
replace_f0 = np.interp(
|
127 |
+
list(range(delta_t)), inp_f0[:, 0] * 100, inp_f0[:, 1]
|
128 |
+
)
|
129 |
+
shape = f0[self.x_pad * tf0 : self.x_pad * tf0 + len(replace_f0)].shape[0]
|
130 |
+
f0[self.x_pad * tf0 : self.x_pad * tf0 + len(replace_f0)] = replace_f0[
|
131 |
+
:shape
|
132 |
+
]
|
133 |
+
# with open("test_opt.txt","w")as f:f.write("\n".join([str(i)for i in f0.tolist()]))
|
134 |
+
f0bak = f0.copy()
|
135 |
+
f0_mel = 1127 * np.log(1 + f0 / 700)
|
136 |
+
f0_mel[f0_mel > 0] = (f0_mel[f0_mel > 0] - f0_mel_min) * 254 / (
|
137 |
+
f0_mel_max - f0_mel_min
|
138 |
+
) + 1
|
139 |
+
f0_mel[f0_mel <= 1] = 1
|
140 |
+
f0_mel[f0_mel > 255] = 255
|
141 |
+
f0_coarse = np.rint(f0_mel).astype(np.int)
|
142 |
+
return f0_coarse, f0bak # 1-0
|
143 |
+
|
144 |
+
def vc(
|
145 |
+
self,
|
146 |
+
model,
|
147 |
+
net_g,
|
148 |
+
sid,
|
149 |
+
audio0,
|
150 |
+
pitch,
|
151 |
+
pitchf,
|
152 |
+
times,
|
153 |
+
index,
|
154 |
+
big_npy,
|
155 |
+
index_rate,
|
156 |
+
version,
|
157 |
+
protect,
|
158 |
+
): # ,file_index,file_big_npy
|
159 |
+
feats = torch.from_numpy(audio0)
|
160 |
+
if self.is_half:
|
161 |
+
feats = feats.half()
|
162 |
+
else:
|
163 |
+
feats = feats.float()
|
164 |
+
if feats.dim() == 2: # double channels
|
165 |
+
feats = feats.mean(-1)
|
166 |
+
assert feats.dim() == 1, feats.dim()
|
167 |
+
feats = feats.view(1, -1)
|
168 |
+
padding_mask = torch.BoolTensor(feats.shape).to(self.device).fill_(False)
|
169 |
+
|
170 |
+
inputs = {
|
171 |
+
"source": feats.to(self.device),
|
172 |
+
"padding_mask": padding_mask,
|
173 |
+
"output_layer": 9 if version == "v1" else 12,
|
174 |
+
}
|
175 |
+
t0 = ttime()
|
176 |
+
with torch.no_grad():
|
177 |
+
logits = model.extract_features(**inputs)
|
178 |
+
feats = model.final_proj(logits[0]) if version == "v1" else logits[0]
|
179 |
+
if protect < 0.5 and pitch != None and pitchf != None:
|
180 |
+
feats0 = feats.clone()
|
181 |
+
if (
|
182 |
+
isinstance(index, type(None)) == False
|
183 |
+
and isinstance(big_npy, type(None)) == False
|
184 |
+
and index_rate != 0
|
185 |
+
):
|
186 |
+
npy = feats[0].cpu().numpy()
|
187 |
+
if self.is_half:
|
188 |
+
npy = npy.astype("float32")
|
189 |
+
|
190 |
+
# _, I = index.search(npy, 1)
|
191 |
+
# npy = big_npy[I.squeeze()]
|
192 |
+
|
193 |
+
score, ix = index.search(npy, k=8)
|
194 |
+
weight = np.square(1 / score)
|
195 |
+
weight /= weight.sum(axis=1, keepdims=True)
|
196 |
+
npy = np.sum(big_npy[ix] * np.expand_dims(weight, axis=2), axis=1)
|
197 |
+
|
198 |
+
if self.is_half:
|
199 |
+
npy = npy.astype("float16")
|
200 |
+
feats = (
|
201 |
+
torch.from_numpy(npy).unsqueeze(0).to(self.device) * index_rate
|
202 |
+
+ (1 - index_rate) * feats
|
203 |
+
)
|
204 |
+
|
205 |
+
feats = F.interpolate(feats.permute(0, 2, 1), scale_factor=2).permute(0, 2, 1)
|
206 |
+
if protect < 0.5 and pitch != None and pitchf != None:
|
207 |
+
feats0 = F.interpolate(feats0.permute(0, 2, 1), scale_factor=2).permute(
|
208 |
+
0, 2, 1
|
209 |
+
)
|
210 |
+
t1 = ttime()
|
211 |
+
p_len = audio0.shape[0] // self.window
|
212 |
+
if feats.shape[1] < p_len:
|
213 |
+
p_len = feats.shape[1]
|
214 |
+
if pitch != None and pitchf != None:
|
215 |
+
pitch = pitch[:, :p_len]
|
216 |
+
pitchf = pitchf[:, :p_len]
|
217 |
+
|
218 |
+
if protect < 0.5 and pitch != None and pitchf != None:
|
219 |
+
pitchff = pitchf.clone()
|
220 |
+
pitchff[pitchf > 0] = 1
|
221 |
+
pitchff[pitchf < 1] = protect
|
222 |
+
pitchff = pitchff.unsqueeze(-1)
|
223 |
+
feats = feats * pitchff + feats0 * (1 - pitchff)
|
224 |
+
feats = feats.to(feats0.dtype)
|
225 |
+
p_len = torch.tensor([p_len], device=self.device).long()
|
226 |
+
with torch.no_grad():
|
227 |
+
if pitch != None and pitchf != None:
|
228 |
+
audio1 = (
|
229 |
+
(net_g.infer(feats, p_len, pitch, pitchf, sid)[0][0, 0])
|
230 |
+
.data.cpu()
|
231 |
+
.float()
|
232 |
+
.numpy()
|
233 |
+
)
|
234 |
+
else:
|
235 |
+
audio1 = (
|
236 |
+
(net_g.infer(feats, p_len, sid)[0][0, 0]).data.cpu().float().numpy()
|
237 |
+
)
|
238 |
+
del feats, p_len, padding_mask
|
239 |
+
if torch.cuda.is_available():
|
240 |
+
torch.cuda.empty_cache()
|
241 |
+
t2 = ttime()
|
242 |
+
times[0] += t1 - t0
|
243 |
+
times[2] += t2 - t1
|
244 |
+
return audio1
|
245 |
+
|
246 |
+
def pipeline(
|
247 |
+
self,
|
248 |
+
model,
|
249 |
+
net_g,
|
250 |
+
sid,
|
251 |
+
audio,
|
252 |
+
input_audio_path,
|
253 |
+
times,
|
254 |
+
f0_up_key,
|
255 |
+
f0_method,
|
256 |
+
file_index,
|
257 |
+
# file_big_npy,
|
258 |
+
index_rate,
|
259 |
+
if_f0,
|
260 |
+
filter_radius,
|
261 |
+
tgt_sr,
|
262 |
+
resample_sr,
|
263 |
+
rms_mix_rate,
|
264 |
+
version,
|
265 |
+
protect,
|
266 |
+
f0_file=None,
|
267 |
+
):
|
268 |
+
if (
|
269 |
+
file_index != ""
|
270 |
+
# and file_big_npy != ""
|
271 |
+
# and os.path.exists(file_big_npy) == True
|
272 |
+
and os.path.exists(file_index) == True
|
273 |
+
and index_rate != 0
|
274 |
+
):
|
275 |
+
try:
|
276 |
+
index = faiss.read_index(file_index)
|
277 |
+
# big_npy = np.load(file_big_npy)
|
278 |
+
big_npy = index.reconstruct_n(0, index.ntotal)
|
279 |
+
except:
|
280 |
+
traceback.print_exc()
|
281 |
+
index = big_npy = None
|
282 |
+
else:
|
283 |
+
index = big_npy = None
|
284 |
+
audio = signal.filtfilt(bh, ah, audio)
|
285 |
+
audio_pad = np.pad(audio, (self.window // 2, self.window // 2), mode="reflect")
|
286 |
+
opt_ts = []
|
287 |
+
if audio_pad.shape[0] > self.t_max:
|
288 |
+
audio_sum = np.zeros_like(audio)
|
289 |
+
for i in range(self.window):
|
290 |
+
audio_sum += audio_pad[i : i - self.window]
|
291 |
+
for t in range(self.t_center, audio.shape[0], self.t_center):
|
292 |
+
opt_ts.append(
|
293 |
+
t
|
294 |
+
- self.t_query
|
295 |
+
+ np.where(
|
296 |
+
np.abs(audio_sum[t - self.t_query : t + self.t_query])
|
297 |
+
== np.abs(audio_sum[t - self.t_query : t + self.t_query]).min()
|
298 |
+
)[0][0]
|
299 |
+
)
|
300 |
+
s = 0
|
301 |
+
audio_opt = []
|
302 |
+
t = None
|
303 |
+
t1 = ttime()
|
304 |
+
audio_pad = np.pad(audio, (self.t_pad, self.t_pad), mode="reflect")
|
305 |
+
p_len = audio_pad.shape[0] // self.window
|
306 |
+
inp_f0 = None
|
307 |
+
if hasattr(f0_file, "name") == True:
|
308 |
+
try:
|
309 |
+
with open(f0_file.name, "r") as f:
|
310 |
+
lines = f.read().strip("\n").split("\n")
|
311 |
+
inp_f0 = []
|
312 |
+
for line in lines:
|
313 |
+
inp_f0.append([float(i) for i in line.split(",")])
|
314 |
+
inp_f0 = np.array(inp_f0, dtype="float32")
|
315 |
+
except:
|
316 |
+
traceback.print_exc()
|
317 |
+
sid = torch.tensor(sid, device=self.device).unsqueeze(0).long()
|
318 |
+
pitch, pitchf = None, None
|
319 |
+
if if_f0 == 1:
|
320 |
+
pitch, pitchf = self.get_f0(
|
321 |
+
input_audio_path,
|
322 |
+
audio_pad,
|
323 |
+
p_len,
|
324 |
+
f0_up_key,
|
325 |
+
f0_method,
|
326 |
+
filter_radius,
|
327 |
+
inp_f0,
|
328 |
+
)
|
329 |
+
pitch = pitch[:p_len]
|
330 |
+
pitchf = pitchf[:p_len]
|
331 |
+
if self.device == "mps":
|
332 |
+
pitchf = pitchf.astype(np.float32)
|
333 |
+
pitch = torch.tensor(pitch, device=self.device).unsqueeze(0).long()
|
334 |
+
pitchf = torch.tensor(pitchf, device=self.device).unsqueeze(0).float()
|
335 |
+
t2 = ttime()
|
336 |
+
times[1] += t2 - t1
|
337 |
+
for t in opt_ts:
|
338 |
+
t = t // self.window * self.window
|
339 |
+
if if_f0 == 1:
|
340 |
+
audio_opt.append(
|
341 |
+
self.vc(
|
342 |
+
model,
|
343 |
+
net_g,
|
344 |
+
sid,
|
345 |
+
audio_pad[s : t + self.t_pad2 + self.window],
|
346 |
+
pitch[:, s // self.window : (t + self.t_pad2) // self.window],
|
347 |
+
pitchf[:, s // self.window : (t + self.t_pad2) // self.window],
|
348 |
+
times,
|
349 |
+
index,
|
350 |
+
big_npy,
|
351 |
+
index_rate,
|
352 |
+
version,
|
353 |
+
protect,
|
354 |
+
)[self.t_pad_tgt : -self.t_pad_tgt]
|
355 |
+
)
|
356 |
+
else:
|
357 |
+
audio_opt.append(
|
358 |
+
self.vc(
|
359 |
+
model,
|
360 |
+
net_g,
|
361 |
+
sid,
|
362 |
+
audio_pad[s : t + self.t_pad2 + self.window],
|
363 |
+
None,
|
364 |
+
None,
|
365 |
+
times,
|
366 |
+
index,
|
367 |
+
big_npy,
|
368 |
+
index_rate,
|
369 |
+
version,
|
370 |
+
protect,
|
371 |
+
)[self.t_pad_tgt : -self.t_pad_tgt]
|
372 |
+
)
|
373 |
+
s = t
|
374 |
+
if if_f0 == 1:
|
375 |
+
audio_opt.append(
|
376 |
+
self.vc(
|
377 |
+
model,
|
378 |
+
net_g,
|
379 |
+
sid,
|
380 |
+
audio_pad[t:],
|
381 |
+
pitch[:, t // self.window :] if t is not None else pitch,
|
382 |
+
pitchf[:, t // self.window :] if t is not None else pitchf,
|
383 |
+
times,
|
384 |
+
index,
|
385 |
+
big_npy,
|
386 |
+
index_rate,
|
387 |
+
version,
|
388 |
+
protect,
|
389 |
+
)[self.t_pad_tgt : -self.t_pad_tgt]
|
390 |
+
)
|
391 |
+
else:
|
392 |
+
audio_opt.append(
|
393 |
+
self.vc(
|
394 |
+
model,
|
395 |
+
net_g,
|
396 |
+
sid,
|
397 |
+
audio_pad[t:],
|
398 |
+
None,
|
399 |
+
None,
|
400 |
+
times,
|
401 |
+
index,
|
402 |
+
big_npy,
|
403 |
+
index_rate,
|
404 |
+
version,
|
405 |
+
protect,
|
406 |
+
)[self.t_pad_tgt : -self.t_pad_tgt]
|
407 |
+
)
|
408 |
+
audio_opt = np.concatenate(audio_opt)
|
409 |
+
if rms_mix_rate != 1:
|
410 |
+
audio_opt = change_rms(audio, 16000, audio_opt, tgt_sr, rms_mix_rate)
|
411 |
+
if resample_sr >= 16000 and tgt_sr != resample_sr:
|
412 |
+
audio_opt = librosa.resample(
|
413 |
+
audio_opt, orig_sr=tgt_sr, target_sr=resample_sr
|
414 |
+
)
|
415 |
+
#audio_max = np.abs(audio_opt).max() / 0.99
|
416 |
+
#max_int16 = 32768
|
417 |
+
#if audio_max > 1:
|
418 |
+
# max_int16 /= audio_max
|
419 |
+
#audio_opt = (audio_opt * max_int16).astype(np.int16)
|
420 |
+
audio_opt = (np.array(audio_opt)).astype("float32")
|
421 |
+
del pitch, pitchf, sid
|
422 |
+
if torch.cuda.is_available():
|
423 |
+
torch.cuda.empty_cache()
|
424 |
+
return audio_opt
|