Kangarroar commited on
Commit
ed1cdd1
1 Parent(s): 300443f

Upload 154 files

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +2 -0
  2. .gitignore +20 -0
  3. LICENSE.md +661 -0
  4. README.md +60 -12
  5. batch.py +43 -0
  6. ckpt.jpg +0 -0
  7. config.yaml +349 -0
  8. doc/train_and_inference.markdown +210 -0
  9. flask_api.py +54 -0
  10. infer.py +98 -0
  11. infer_tools/__init__.py +0 -0
  12. infer_tools/infer_tool.py +343 -0
  13. infer_tools/slicer.py +158 -0
  14. inference.ipynb +0 -0
  15. modules/commons/__pycache__/common_layers.cpython-38.pyc +0 -0
  16. modules/commons/__pycache__/espnet_positional_embedding.cpython-38.pyc +0 -0
  17. modules/commons/__pycache__/ssim.cpython-38.pyc +0 -0
  18. modules/commons/common_layers.py +671 -0
  19. modules/commons/espnet_positional_embedding.py +113 -0
  20. modules/commons/ssim.py +391 -0
  21. modules/fastspeech/__pycache__/fs2.cpython-38.pyc +0 -0
  22. modules/fastspeech/__pycache__/pe.cpython-38.pyc +0 -0
  23. modules/fastspeech/__pycache__/tts_modules.cpython-38.pyc +0 -0
  24. modules/fastspeech/fs2.py +255 -0
  25. modules/fastspeech/pe.py +149 -0
  26. modules/fastspeech/tts_modules.py +364 -0
  27. modules/hifigan/__pycache__/hifigan.cpython-38.pyc +0 -0
  28. modules/hifigan/hifigan.py +365 -0
  29. modules/hifigan/mel_utils.py +80 -0
  30. modules/nsf_hifigan/__pycache__/env.cpython-38.pyc +0 -0
  31. modules/nsf_hifigan/__pycache__/models.cpython-38.pyc +0 -0
  32. modules/nsf_hifigan/__pycache__/nvSTFT.cpython-38.pyc +0 -0
  33. modules/nsf_hifigan/__pycache__/utils.cpython-38.pyc +0 -0
  34. modules/nsf_hifigan/env.py +15 -0
  35. modules/nsf_hifigan/models.py +549 -0
  36. modules/nsf_hifigan/nvSTFT.py +111 -0
  37. modules/nsf_hifigan/utils.py +67 -0
  38. modules/parallel_wavegan/__init__.py +0 -0
  39. modules/parallel_wavegan/__pycache__/__init__.cpython-38.pyc +0 -0
  40. modules/parallel_wavegan/layers/__init__.py +5 -0
  41. modules/parallel_wavegan/layers/__pycache__/__init__.cpython-38.pyc +0 -0
  42. modules/parallel_wavegan/layers/__pycache__/causal_conv.cpython-38.pyc +0 -0
  43. modules/parallel_wavegan/layers/__pycache__/pqmf.cpython-38.pyc +0 -0
  44. modules/parallel_wavegan/layers/__pycache__/residual_block.cpython-38.pyc +0 -0
  45. modules/parallel_wavegan/layers/__pycache__/residual_stack.cpython-38.pyc +0 -0
  46. modules/parallel_wavegan/layers/__pycache__/upsample.cpython-38.pyc +0 -0
  47. modules/parallel_wavegan/layers/causal_conv.py +56 -0
  48. modules/parallel_wavegan/layers/pqmf.py +129 -0
  49. modules/parallel_wavegan/layers/residual_block.py +129 -0
  50. modules/parallel_wavegan/layers/residual_stack.py +75 -0
.gitattributes CHANGED
@@ -32,3 +32,5 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
32
  *.zip filter=lfs diff=lfs merge=lfs -text
33
  *.zst filter=lfs diff=lfs merge=lfs -text
34
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
32
  *.zip filter=lfs diff=lfs merge=lfs -text
33
  *.zst filter=lfs diff=lfs merge=lfs -text
34
  *tfevents* filter=lfs diff=lfs merge=lfs -text
35
+ results/test_output.wav filter=lfs diff=lfs merge=lfs -text
36
+ test_output.wav filter=lfs diff=lfs merge=lfs -text
.gitignore ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ .idea
2
+ *.pyc
3
+ __pycache__/
4
+ *.sh
5
+ local_tools/
6
+ *.ckpt
7
+ *.pth
8
+ infer_out/
9
+ *.onnx
10
+ data/
11
+ checkpoints/
12
+ processcmd.py
13
+ .vscode
14
+ WPy64-38100
15
+ Winpython64-3.8.10.0dot.exe
16
+ *.pkf
17
+ *.wav
18
+ *.json
19
+ *.flac
20
+ *.xmp
LICENSE.md ADDED
@@ -0,0 +1,661 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ GNU AFFERO GENERAL PUBLIC LICENSE
2
+ Version 3, 19 November 2007
3
+
4
+ Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
5
+ Everyone is permitted to copy and distribute verbatim copies
6
+ of this license document, but changing it is not allowed.
7
+
8
+ Preamble
9
+
10
+ The GNU Affero General Public License is a free, copyleft license for
11
+ software and other kinds of works, specifically designed to ensure
12
+ cooperation with the community in the case of network server software.
13
+
14
+ The licenses for most software and other practical works are designed
15
+ to take away your freedom to share and change the works. By contrast,
16
+ our General Public Licenses are intended to guarantee your freedom to
17
+ share and change all versions of a program--to make sure it remains free
18
+ software for all its users.
19
+
20
+ When we speak of free software, we are referring to freedom, not
21
+ price. Our General Public Licenses are designed to make sure that you
22
+ have the freedom to distribute copies of free software (and charge for
23
+ them if you wish), that you receive source code or can get it if you
24
+ want it, that you can change the software or use pieces of it in new
25
+ free programs, and that you know you can do these things.
26
+
27
+ Developers that use our General Public Licenses protect your rights
28
+ with two steps: (1) assert copyright on the software, and (2) offer
29
+ you this License which gives you legal permission to copy, distribute
30
+ and/or modify the software.
31
+
32
+ A secondary benefit of defending all users' freedom is that
33
+ improvements made in alternate versions of the program, if they
34
+ receive widespread use, become available for other developers to
35
+ incorporate. Many developers of free software are heartened and
36
+ encouraged by the resulting cooperation. However, in the case of
37
+ software used on network servers, this result may fail to come about.
38
+ The GNU General Public License permits making a modified version and
39
+ letting the public access it on a server without ever releasing its
40
+ source code to the public.
41
+
42
+ The GNU Affero General Public License is designed specifically to
43
+ ensure that, in such cases, the modified source code becomes available
44
+ to the community. It requires the operator of a network server to
45
+ provide the source code of the modified version running there to the
46
+ users of that server. Therefore, public use of a modified version, on
47
+ a publicly accessible server, gives the public access to the source
48
+ code of the modified version.
49
+
50
+ An older license, called the Affero General Public License and
51
+ published by Affero, was designed to accomplish similar goals. This is
52
+ a different license, not a version of the Affero GPL, but Affero has
53
+ released a new version of the Affero GPL which permits relicensing under
54
+ this license.
55
+
56
+ The precise terms and conditions for copying, distribution and
57
+ modification follow.
58
+
59
+ TERMS AND CONDITIONS
60
+
61
+ 0. Definitions.
62
+
63
+ "This License" refers to version 3 of the GNU Affero General Public License.
64
+
65
+ "Copyright" also means copyright-like laws that apply to other kinds of
66
+ works, such as semiconductor masks.
67
+
68
+ "The Program" refers to any copyrightable work licensed under this
69
+ License. Each licensee is addressed as "you". "Licensees" and
70
+ "recipients" may be individuals or organizations.
71
+
72
+ To "modify" a work means to copy from or adapt all or part of the work
73
+ in a fashion requiring copyright permission, other than the making of an
74
+ exact copy. The resulting work is called a "modified version" of the
75
+ earlier work or a work "based on" the earlier work.
76
+
77
+ A "covered work" means either the unmodified Program or a work based
78
+ on the Program.
79
+
80
+ To "propagate" a work means to do anything with it that, without
81
+ permission, would make you directly or secondarily liable for
82
+ infringement under applicable copyright law, except executing it on a
83
+ computer or modifying a private copy. Propagation includes copying,
84
+ distribution (with or without modification), making available to the
85
+ public, and in some countries other activities as well.
86
+
87
+ To "convey" a work means any kind of propagation that enables other
88
+ parties to make or receive copies. Mere interaction with a user through
89
+ a computer network, with no transfer of a copy, is not conveying.
90
+
91
+ An interactive user interface displays "Appropriate Legal Notices"
92
+ to the extent that it includes a convenient and prominently visible
93
+ feature that (1) displays an appropriate copyright notice, and (2)
94
+ tells the user that there is no warranty for the work (except to the
95
+ extent that warranties are provided), that licensees may convey the
96
+ work under this License, and how to view a copy of this License. If
97
+ the interface presents a list of user commands or options, such as a
98
+ menu, a prominent item in the list meets this criterion.
99
+
100
+ 1. Source Code.
101
+
102
+ The "source code" for a work means the preferred form of the work
103
+ for making modifications to it. "Object code" means any non-source
104
+ form of a work.
105
+
106
+ A "Standard Interface" means an interface that either is an official
107
+ standard defined by a recognized standards body, or, in the case of
108
+ interfaces specified for a particular programming language, one that
109
+ is widely used among developers working in that language.
110
+
111
+ The "System Libraries" of an executable work include anything, other
112
+ than the work as a whole, that (a) is included in the normal form of
113
+ packaging a Major Component, but which is not part of that Major
114
+ Component, and (b) serves only to enable use of the work with that
115
+ Major Component, or to implement a Standard Interface for which an
116
+ implementation is available to the public in source code form. A
117
+ "Major Component", in this context, means a major essential component
118
+ (kernel, window system, and so on) of the specific operating system
119
+ (if any) on which the executable work runs, or a compiler used to
120
+ produce the work, or an object code interpreter used to run it.
121
+
122
+ The "Corresponding Source" for a work in object code form means all
123
+ the source code needed to generate, install, and (for an executable
124
+ work) run the object code and to modify the work, including scripts to
125
+ control those activities. However, it does not include the work's
126
+ System Libraries, or general-purpose tools or generally available free
127
+ programs which are used unmodified in performing those activities but
128
+ which are not part of the work. For example, Corresponding Source
129
+ includes interface definition files associated with source files for
130
+ the work, and the source code for shared libraries and dynamically
131
+ linked subprograms that the work is specifically designed to require,
132
+ such as by intimate data communication or control flow between those
133
+ subprograms and other parts of the work.
134
+
135
+ The Corresponding Source need not include anything that users
136
+ can regenerate automatically from other parts of the Corresponding
137
+ Source.
138
+
139
+ The Corresponding Source for a work in source code form is that
140
+ same work.
141
+
142
+ 2. Basic Permissions.
143
+
144
+ All rights granted under this License are granted for the term of
145
+ copyright on the Program, and are irrevocable provided the stated
146
+ conditions are met. This License explicitly affirms your unlimited
147
+ permission to run the unmodified Program. The output from running a
148
+ covered work is covered by this License only if the output, given its
149
+ content, constitutes a covered work. This License acknowledges your
150
+ rights of fair use or other equivalent, as provided by copyright law.
151
+
152
+ You may make, run and propagate covered works that you do not
153
+ convey, without conditions so long as your license otherwise remains
154
+ in force. You may convey covered works to others for the sole purpose
155
+ of having them make modifications exclusively for you, or provide you
156
+ with facilities for running those works, provided that you comply with
157
+ the terms of this License in conveying all material for which you do
158
+ not control copyright. Those thus making or running the covered works
159
+ for you must do so exclusively on your behalf, under your direction
160
+ and control, on terms that prohibit them from making any copies of
161
+ your copyrighted material outside their relationship with you.
162
+
163
+ Conveying under any other circumstances is permitted solely under
164
+ the conditions stated below. Sublicensing is not allowed; section 10
165
+ makes it unnecessary.
166
+
167
+ 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
168
+
169
+ No covered work shall be deemed part of an effective technological
170
+ measure under any applicable law fulfilling obligations under article
171
+ 11 of the WIPO copyright treaty adopted on 20 December 1996, or
172
+ similar laws prohibiting or restricting circumvention of such
173
+ measures.
174
+
175
+ When you convey a covered work, you waive any legal power to forbid
176
+ circumvention of technological measures to the extent such circumvention
177
+ is effected by exercising rights under this License with respect to
178
+ the covered work, and you disclaim any intention to limit operation or
179
+ modification of the work as a means of enforcing, against the work's
180
+ users, your or third parties' legal rights to forbid circumvention of
181
+ technological measures.
182
+
183
+ 4. Conveying Verbatim Copies.
184
+
185
+ You may convey verbatim copies of the Program's source code as you
186
+ receive it, in any medium, provided that you conspicuously and
187
+ appropriately publish on each copy an appropriate copyright notice;
188
+ keep intact all notices stating that this License and any
189
+ non-permissive terms added in accord with section 7 apply to the code;
190
+ keep intact all notices of the absence of any warranty; and give all
191
+ recipients a copy of this License along with the Program.
192
+
193
+ You may charge any price or no price for each copy that you convey,
194
+ and you may offer support or warranty protection for a fee.
195
+
196
+ 5. Conveying Modified Source Versions.
197
+
198
+ You may convey a work based on the Program, or the modifications to
199
+ produce it from the Program, in the form of source code under the
200
+ terms of section 4, provided that you also meet all of these conditions:
201
+
202
+ a) The work must carry prominent notices stating that you modified
203
+ it, and giving a relevant date.
204
+
205
+ b) The work must carry prominent notices stating that it is
206
+ released under this License and any conditions added under section
207
+ 7. This requirement modifies the requirement in section 4 to
208
+ "keep intact all notices".
209
+
210
+ c) You must license the entire work, as a whole, under this
211
+ License to anyone who comes into possession of a copy. This
212
+ License will therefore apply, along with any applicable section 7
213
+ additional terms, to the whole of the work, and all its parts,
214
+ regardless of how they are packaged. This License gives no
215
+ permission to license the work in any other way, but it does not
216
+ invalidate such permission if you have separately received it.
217
+
218
+ d) If the work has interactive user interfaces, each must display
219
+ Appropriate Legal Notices; however, if the Program has interactive
220
+ interfaces that do not display Appropriate Legal Notices, your
221
+ work need not make them do so.
222
+
223
+ A compilation of a covered work with other separate and independent
224
+ works, which are not by their nature extensions of the covered work,
225
+ and which are not combined with it such as to form a larger program,
226
+ in or on a volume of a storage or distribution medium, is called an
227
+ "aggregate" if the compilation and its resulting copyright are not
228
+ used to limit the access or legal rights of the compilation's users
229
+ beyond what the individual works permit. Inclusion of a covered work
230
+ in an aggregate does not cause this License to apply to the other
231
+ parts of the aggregate.
232
+
233
+ 6. Conveying Non-Source Forms.
234
+
235
+ You may convey a covered work in object code form under the terms
236
+ of sections 4 and 5, provided that you also convey the
237
+ machine-readable Corresponding Source under the terms of this License,
238
+ in one of these ways:
239
+
240
+ a) Convey the object code in, or embodied in, a physical product
241
+ (including a physical distribution medium), accompanied by the
242
+ Corresponding Source fixed on a durable physical medium
243
+ customarily used for software interchange.
244
+
245
+ b) Convey the object code in, or embodied in, a physical product
246
+ (including a physical distribution medium), accompanied by a
247
+ written offer, valid for at least three years and valid for as
248
+ long as you offer spare parts or customer support for that product
249
+ model, to give anyone who possesses the object code either (1) a
250
+ copy of the Corresponding Source for all the software in the
251
+ product that is covered by this License, on a durable physical
252
+ medium customarily used for software interchange, for a price no
253
+ more than your reasonable cost of physically performing this
254
+ conveying of source, or (2) access to copy the
255
+ Corresponding Source from a network server at no charge.
256
+
257
+ c) Convey individual copies of the object code with a copy of the
258
+ written offer to provide the Corresponding Source. This
259
+ alternative is allowed only occasionally and noncommercially, and
260
+ only if you received the object code with such an offer, in accord
261
+ with subsection 6b.
262
+
263
+ d) Convey the object code by offering access from a designated
264
+ place (gratis or for a charge), and offer equivalent access to the
265
+ Corresponding Source in the same way through the same place at no
266
+ further charge. You need not require recipients to copy the
267
+ Corresponding Source along with the object code. If the place to
268
+ copy the object code is a network server, the Corresponding Source
269
+ may be on a different server (operated by you or a third party)
270
+ that supports equivalent copying facilities, provided you maintain
271
+ clear directions next to the object code saying where to find the
272
+ Corresponding Source. Regardless of what server hosts the
273
+ Corresponding Source, you remain obligated to ensure that it is
274
+ available for as long as needed to satisfy these requirements.
275
+
276
+ e) Convey the object code using peer-to-peer transmission, provided
277
+ you inform other peers where the object code and Corresponding
278
+ Source of the work are being offered to the general public at no
279
+ charge under subsection 6d.
280
+
281
+ A separable portion of the object code, whose source code is excluded
282
+ from the Corresponding Source as a System Library, need not be
283
+ included in conveying the object code work.
284
+
285
+ A "User Product" is either (1) a "consumer product", which means any
286
+ tangible personal property which is normally used for personal, family,
287
+ or household purposes, or (2) anything designed or sold for incorporation
288
+ into a dwelling. In determining whether a product is a consumer product,
289
+ doubtful cases shall be resolved in favor of coverage. For a particular
290
+ product received by a particular user, "normally used" refers to a
291
+ typical or common use of that class of product, regardless of the status
292
+ of the particular user or of the way in which the particular user
293
+ actually uses, or expects or is expected to use, the product. A product
294
+ is a consumer product regardless of whether the product has substantial
295
+ commercial, industrial or non-consumer uses, unless such uses represent
296
+ the only significant mode of use of the product.
297
+
298
+ "Installation Information" for a User Product means any methods,
299
+ procedures, authorization keys, or other information required to install
300
+ and execute modified versions of a covered work in that User Product from
301
+ a modified version of its Corresponding Source. The information must
302
+ suffice to ensure that the continued functioning of the modified object
303
+ code is in no case prevented or interfered with solely because
304
+ modification has been made.
305
+
306
+ If you convey an object code work under this section in, or with, or
307
+ specifically for use in, a User Product, and the conveying occurs as
308
+ part of a transaction in which the right of possession and use of the
309
+ User Product is transferred to the recipient in perpetuity or for a
310
+ fixed term (regardless of how the transaction is characterized), the
311
+ Corresponding Source conveyed under this section must be accompanied
312
+ by the Installation Information. But this requirement does not apply
313
+ if neither you nor any third party retains the ability to install
314
+ modified object code on the User Product (for example, the work has
315
+ been installed in ROM).
316
+
317
+ The requirement to provide Installation Information does not include a
318
+ requirement to continue to provide support service, warranty, or updates
319
+ for a work that has been modified or installed by the recipient, or for
320
+ the User Product in which it has been modified or installed. Access to a
321
+ network may be denied when the modification itself materially and
322
+ adversely affects the operation of the network or violates the rules and
323
+ protocols for communication across the network.
324
+
325
+ Corresponding Source conveyed, and Installation Information provided,
326
+ in accord with this section must be in a format that is publicly
327
+ documented (and with an implementation available to the public in
328
+ source code form), and must require no special password or key for
329
+ unpacking, reading or copying.
330
+
331
+ 7. Additional Terms.
332
+
333
+ "Additional permissions" are terms that supplement the terms of this
334
+ License by making exceptions from one or more of its conditions.
335
+ Additional permissions that are applicable to the entire Program shall
336
+ be treated as though they were included in this License, to the extent
337
+ that they are valid under applicable law. If additional permissions
338
+ apply only to part of the Program, that part may be used separately
339
+ under those permissions, but the entire Program remains governed by
340
+ this License without regard to the additional permissions.
341
+
342
+ When you convey a copy of a covered work, you may at your option
343
+ remove any additional permissions from that copy, or from any part of
344
+ it. (Additional permissions may be written to require their own
345
+ removal in certain cases when you modify the work.) You may place
346
+ additional permissions on material, added by you to a covered work,
347
+ for which you have or can give appropriate copyright permission.
348
+
349
+ Notwithstanding any other provision of this License, for material you
350
+ add to a covered work, you may (if authorized by the copyright holders of
351
+ that material) supplement the terms of this License with terms:
352
+
353
+ a) Disclaiming warranty or limiting liability differently from the
354
+ terms of sections 15 and 16 of this License; or
355
+
356
+ b) Requiring preservation of specified reasonable legal notices or
357
+ author attributions in that material or in the Appropriate Legal
358
+ Notices displayed by works containing it; or
359
+
360
+ c) Prohibiting misrepresentation of the origin of that material, or
361
+ requiring that modified versions of such material be marked in
362
+ reasonable ways as different from the original version; or
363
+
364
+ d) Limiting the use for publicity purposes of names of licensors or
365
+ authors of the material; or
366
+
367
+ e) Declining to grant rights under trademark law for use of some
368
+ trade names, trademarks, or service marks; or
369
+
370
+ f) Requiring indemnification of licensors and authors of that
371
+ material by anyone who conveys the material (or modified versions of
372
+ it) with contractual assumptions of liability to the recipient, for
373
+ any liability that these contractual assumptions directly impose on
374
+ those licensors and authors.
375
+
376
+ All other non-permissive additional terms are considered "further
377
+ restrictions" within the meaning of section 10. If the Program as you
378
+ received it, or any part of it, contains a notice stating that it is
379
+ governed by this License along with a term that is a further
380
+ restriction, you may remove that term. If a license document contains
381
+ a further restriction but permits relicensing or conveying under this
382
+ License, you may add to a covered work material governed by the terms
383
+ of that license document, provided that the further restriction does
384
+ not survive such relicensing or conveying.
385
+
386
+ If you add terms to a covered work in accord with this section, you
387
+ must place, in the relevant source files, a statement of the
388
+ additional terms that apply to those files, or a notice indicating
389
+ where to find the applicable terms.
390
+
391
+ Additional terms, permissive or non-permissive, may be stated in the
392
+ form of a separately written license, or stated as exceptions;
393
+ the above requirements apply either way.
394
+
395
+ 8. Termination.
396
+
397
+ You may not propagate or modify a covered work except as expressly
398
+ provided under this License. Any attempt otherwise to propagate or
399
+ modify it is void, and will automatically terminate your rights under
400
+ this License (including any patent licenses granted under the third
401
+ paragraph of section 11).
402
+
403
+ However, if you cease all violation of this License, then your
404
+ license from a particular copyright holder is reinstated (a)
405
+ provisionally, unless and until the copyright holder explicitly and
406
+ finally terminates your license, and (b) permanently, if the copyright
407
+ holder fails to notify you of the violation by some reasonable means
408
+ prior to 60 days after the cessation.
409
+
410
+ Moreover, your license from a particular copyright holder is
411
+ reinstated permanently if the copyright holder notifies you of the
412
+ violation by some reasonable means, this is the first time you have
413
+ received notice of violation of this License (for any work) from that
414
+ copyright holder, and you cure the violation prior to 30 days after
415
+ your receipt of the notice.
416
+
417
+ Termination of your rights under this section does not terminate the
418
+ licenses of parties who have received copies or rights from you under
419
+ this License. If your rights have been terminated and not permanently
420
+ reinstated, you do not qualify to receive new licenses for the same
421
+ material under section 10.
422
+
423
+ 9. Acceptance Not Required for Having Copies.
424
+
425
+ You are not required to accept this License in order to receive or
426
+ run a copy of the Program. Ancillary propagation of a covered work
427
+ occurring solely as a consequence of using peer-to-peer transmission
428
+ to receive a copy likewise does not require acceptance. However,
429
+ nothing other than this License grants you permission to propagate or
430
+ modify any covered work. These actions infringe copyright if you do
431
+ not accept this License. Therefore, by modifying or propagating a
432
+ covered work, you indicate your acceptance of this License to do so.
433
+
434
+ 10. Automatic Licensing of Downstream Recipients.
435
+
436
+ Each time you convey a covered work, the recipient automatically
437
+ receives a license from the original licensors, to run, modify and
438
+ propagate that work, subject to this License. You are not responsible
439
+ for enforcing compliance by third parties with this License.
440
+
441
+ An "entity transaction" is a transaction transferring control of an
442
+ organization, or substantially all assets of one, or subdividing an
443
+ organization, or merging organizations. If propagation of a covered
444
+ work results from an entity transaction, each party to that
445
+ transaction who receives a copy of the work also receives whatever
446
+ licenses to the work the party's predecessor in interest had or could
447
+ give under the previous paragraph, plus a right to possession of the
448
+ Corresponding Source of the work from the predecessor in interest, if
449
+ the predecessor has it or can get it with reasonable efforts.
450
+
451
+ You may not impose any further restrictions on the exercise of the
452
+ rights granted or affirmed under this License. For example, you may
453
+ not impose a license fee, royalty, or other charge for exercise of
454
+ rights granted under this License, and you may not initiate litigation
455
+ (including a cross-claim or counterclaim in a lawsuit) alleging that
456
+ any patent claim is infringed by making, using, selling, offering for
457
+ sale, or importing the Program or any portion of it.
458
+
459
+ 11. Patents.
460
+
461
+ A "contributor" is a copyright holder who authorizes use under this
462
+ License of the Program or a work on which the Program is based. The
463
+ work thus licensed is called the contributor's "contributor version".
464
+
465
+ A contributor's "essential patent claims" are all patent claims
466
+ owned or controlled by the contributor, whether already acquired or
467
+ hereafter acquired, that would be infringed by some manner, permitted
468
+ by this License, of making, using, or selling its contributor version,
469
+ but do not include claims that would be infringed only as a
470
+ consequence of further modification of the contributor version. For
471
+ purposes of this definition, "control" includes the right to grant
472
+ patent sublicenses in a manner consistent with the requirements of
473
+ this License.
474
+
475
+ Each contributor grants you a non-exclusive, worldwide, royalty-free
476
+ patent license under the contributor's essential patent claims, to
477
+ make, use, sell, offer for sale, import and otherwise run, modify and
478
+ propagate the contents of its contributor version.
479
+
480
+ In the following three paragraphs, a "patent license" is any express
481
+ agreement or commitment, however denominated, not to enforce a patent
482
+ (such as an express permission to practice a patent or covenant not to
483
+ sue for patent infringement). To "grant" such a patent license to a
484
+ party means to make such an agreement or commitment not to enforce a
485
+ patent against the party.
486
+
487
+ If you convey a covered work, knowingly relying on a patent license,
488
+ and the Corresponding Source of the work is not available for anyone
489
+ to copy, free of charge and under the terms of this License, through a
490
+ publicly available network server or other readily accessible means,
491
+ then you must either (1) cause the Corresponding Source to be so
492
+ available, or (2) arrange to deprive yourself of the benefit of the
493
+ patent license for this particular work, or (3) arrange, in a manner
494
+ consistent with the requirements of this License, to extend the patent
495
+ license to downstream recipients. "Knowingly relying" means you have
496
+ actual knowledge that, but for the patent license, your conveying the
497
+ covered work in a country, or your recipient's use of the covered work
498
+ in a country, would infringe one or more identifiable patents in that
499
+ country that you have reason to believe are valid.
500
+
501
+ If, pursuant to or in connection with a single transaction or
502
+ arrangement, you convey, or propagate by procuring conveyance of, a
503
+ covered work, and grant a patent license to some of the parties
504
+ receiving the covered work authorizing them to use, propagate, modify
505
+ or convey a specific copy of the covered work, then the patent license
506
+ you grant is automatically extended to all recipients of the covered
507
+ work and works based on it.
508
+
509
+ A patent license is "discriminatory" if it does not include within
510
+ the scope of its coverage, prohibits the exercise of, or is
511
+ conditioned on the non-exercise of one or more of the rights that are
512
+ specifically granted under this License. You may not convey a covered
513
+ work if you are a party to an arrangement with a third party that is
514
+ in the business of distributing software, under which you make payment
515
+ to the third party based on the extent of your activity of conveying
516
+ the work, and under which the third party grants, to any of the
517
+ parties who would receive the covered work from you, a discriminatory
518
+ patent license (a) in connection with copies of the covered work
519
+ conveyed by you (or copies made from those copies), or (b) primarily
520
+ for and in connection with specific products or compilations that
521
+ contain the covered work, unless you entered into that arrangement,
522
+ or that patent license was granted, prior to 28 March 2007.
523
+
524
+ Nothing in this License shall be construed as excluding or limiting
525
+ any implied license or other defenses to infringement that may
526
+ otherwise be available to you under applicable patent law.
527
+
528
+ 12. No Surrender of Others' Freedom.
529
+
530
+ If conditions are imposed on you (whether by court order, agreement or
531
+ otherwise) that contradict the conditions of this License, they do not
532
+ excuse you from the conditions of this License. If you cannot convey a
533
+ covered work so as to satisfy simultaneously your obligations under this
534
+ License and any other pertinent obligations, then as a consequence you may
535
+ not convey it at all. For example, if you agree to terms that obligate you
536
+ to collect a royalty for further conveying from those to whom you convey
537
+ the Program, the only way you could satisfy both those terms and this
538
+ License would be to refrain entirely from conveying the Program.
539
+
540
+ 13. Remote Network Interaction; Use with the GNU General Public License.
541
+
542
+ Notwithstanding any other provision of this License, if you modify the
543
+ Program, your modified version must prominently offer all users
544
+ interacting with it remotely through a computer network (if your version
545
+ supports such interaction) an opportunity to receive the Corresponding
546
+ Source of your version by providing access to the Corresponding Source
547
+ from a network server at no charge, through some standard or customary
548
+ means of facilitating copying of software. This Corresponding Source
549
+ shall include the Corresponding Source for any work covered by version 3
550
+ of the GNU General Public License that is incorporated pursuant to the
551
+ following paragraph.
552
+
553
+ Notwithstanding any other provision of this License, you have
554
+ permission to link or combine any covered work with a work licensed
555
+ under version 3 of the GNU General Public License into a single
556
+ combined work, and to convey the resulting work. The terms of this
557
+ License will continue to apply to the part which is the covered work,
558
+ but the work with which it is combined will remain governed by version
559
+ 3 of the GNU General Public License.
560
+
561
+ 14. Revised Versions of this License.
562
+
563
+ The Free Software Foundation may publish revised and/or new versions of
564
+ the GNU Affero General Public License from time to time. Such new versions
565
+ will be similar in spirit to the present version, but may differ in detail to
566
+ address new problems or concerns.
567
+
568
+ Each version is given a distinguishing version number. If the
569
+ Program specifies that a certain numbered version of the GNU Affero General
570
+ Public License "or any later version" applies to it, you have the
571
+ option of following the terms and conditions either of that numbered
572
+ version or of any later version published by the Free Software
573
+ Foundation. If the Program does not specify a version number of the
574
+ GNU Affero General Public License, you may choose any version ever published
575
+ by the Free Software Foundation.
576
+
577
+ If the Program specifies that a proxy can decide which future
578
+ versions of the GNU Affero General Public License can be used, that proxy's
579
+ public statement of acceptance of a version permanently authorizes you
580
+ to choose that version for the Program.
581
+
582
+ Later license versions may give you additional or different
583
+ permissions. However, no additional obligations are imposed on any
584
+ author or copyright holder as a result of your choosing to follow a
585
+ later version.
586
+
587
+ 15. Disclaimer of Warranty.
588
+
589
+ THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
590
+ APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
591
+ HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
592
+ OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
593
+ THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
594
+ PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
595
+ IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
596
+ ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
597
+
598
+ 16. Limitation of Liability.
599
+
600
+ IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
601
+ WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
602
+ THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
603
+ GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
604
+ USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
605
+ DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
606
+ PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
607
+ EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
608
+ SUCH DAMAGES.
609
+
610
+ 17. Interpretation of Sections 15 and 16.
611
+
612
+ If the disclaimer of warranty and limitation of liability provided
613
+ above cannot be given local legal effect according to their terms,
614
+ reviewing courts shall apply local law that most closely approximates
615
+ an absolute waiver of all civil liability in connection with the
616
+ Program, unless a warranty or assumption of liability accompanies a
617
+ copy of the Program in return for a fee.
618
+
619
+ END OF TERMS AND CONDITIONS
620
+
621
+ How to Apply These Terms to Your New Programs
622
+
623
+ If you develop a new program, and you want it to be of the greatest
624
+ possible use to the public, the best way to achieve this is to make it
625
+ free software which everyone can redistribute and change under these terms.
626
+
627
+ To do so, attach the following notices to the program. It is safest
628
+ to attach them to the start of each source file to most effectively
629
+ state the exclusion of warranty; and each file should have at least
630
+ the "copyright" line and a pointer to where the full notice is found.
631
+
632
+ <one line to give the program's name and a brief idea of what it does.>
633
+ Copyright (C) <year> <name of author>
634
+
635
+ This program is free software: you can redistribute it and/or modify
636
+ it under the terms of the GNU Affero General Public License as published
637
+ by the Free Software Foundation, either version 3 of the License, or
638
+ (at your option) any later version.
639
+
640
+ This program is distributed in the hope that it will be useful,
641
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
642
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
643
+ GNU Affero General Public License for more details.
644
+
645
+ You should have received a copy of the GNU Affero General Public License
646
+ along with this program. If not, see <https://www.gnu.org/licenses/>.
647
+
648
+ Also add information on how to contact you by electronic and paper mail.
649
+
650
+ If your software can interact with users remotely through a computer
651
+ network, you should also make sure that it provides a way for users to
652
+ get its source. For example, if your program is a web application, its
653
+ interface could display a "Source" link that leads users to an archive
654
+ of the code. There are many ways you could offer source, and different
655
+ solutions will be better for different programs; see section 13 for the
656
+ specific requirements.
657
+
658
+ You should also get your employer (if you work as a programmer) or school,
659
+ if any, to sign a "copyright disclaimer" for the program, if necessary.
660
+ For more information on this, and how to apply and follow the GNU AGPL, see
661
+ <https://www.gnu.org/licenses/>.
README.md CHANGED
@@ -1,12 +1,60 @@
1
- ---
2
- title: Model
3
- emoji: 💩
4
- colorFrom: red
5
- colorTo: gray
6
- sdk: gradio
7
- sdk_version: 3.15.0
8
- app_file: app.py
9
- pinned: false
10
- ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Diff-SVC
2
+ Singing Voice Conversion via diffusion model
3
+
4
+ this just replaces parselmouth with world as a pitch estimator for inference (hopefully)
5
+
6
+ ## updates:
7
+ >2022.12.4 44.1kHz声码器开放申请,正式提供对44.1kHz的支持\
8
+ 2022.11.28 增加了默认打开的no_fs2选项,可优化部分网络,提升训练速度、缩减模型体积,对于未来新训练的模型有效\
9
+ 2022.11.23 修复了一个重大bug,曾导致可能将用于推理的原始gt音频转变采样率为22.05kHz,对于由此造成的影响我们表示十分抱歉,请务必检查自己的测试音频,并使用更新后的代码\
10
+ 2022.11.22 修复了很多bug,其中有几个影响推理效果重大的bug\
11
+ 2022.11.20 增加对推理时多数格式的输入和保存,无需手动借助其他软件转换\
12
+ 2022.11.13 修正中断后读取模型的epoch/steps显示问题,添加f0处理的磁盘缓存,添加实时变声推理的支持文件\
13
+ 2022.11.11 修正切片时长误差,补充对44.1khz的适配, 增加对contentvec的支持\
14
+ 2022.11.4 添加梅尔谱保存功能\
15
+ 2022.11.2 整合新声码器代码,更新parselmouth算法\
16
+ 2022.10.29 整理推理部分,添加长音频自动切片功能。\
17
+ 2022.10.28 将hubert的onnx推理迁移为torch推理,并整理推理逻辑。\
18
+ <font color=#FFA500>如原先下载过onnx的hubert模型需重新下载并替换为pt模型</font>,config不需要改,目前可以实现1060 6G显存的直接GPU推理与预处理,详情请查看文档。\
19
+ 2022.10.27 更新依赖文件,去除冗余依赖。\
20
+ 2022.10.27 修复了一个严重错误,曾导致在gpu服务器上hubert仍使用cpu推理,速度减慢3-5倍,影响预处理与推理,不影响训练\
21
+ 2022.10.26 修复windows上预处理数据在linux上无法使用的问题,更新部分文档\
22
+ 2022.10.25 编写推理/训练详细文档,修改整合部分代码,增加对ogg格式音频的支持(无需与wav区分,直接使用即可)\
23
+ 2022.10.24 支持对自定义数据集的训练,并精简代码\
24
+ 2022.10.22 完成对opencpop数据集的训练并创建仓库
25
+
26
+ ## 注意事项:
27
+ >本项目是基于学术交流目的建立,并非为生产环境准备,不对由此项目模型产生的任何声音的版权问题负责。\
28
+ 如将本仓库代码二次分发,或将由此项目产出的任何结果公开发表(包括但不限于视频网站投稿),请注明原作者及代码来源(此仓库)。\
29
+ 如果将此项目用于任何其他企划,请提前联系并告知本仓库作者,十分感谢。\
30
+ >This repository was established based on the purpose of acadamic exchange, not for production environment. And this repository is not responsible for any copyright issues associated with the output of it.\
31
+ If you distribute/publish either the code or the output of the model, please cite this repository.\
32
+ If you wish to utilize this repo as part of your project, please inform the author in advance, thank you.
33
+
34
+ ## 推理:
35
+
36
+ >查看./inference.ipynb
37
+
38
+
39
+ ## 预处理:
40
+ ```
41
+ export PYTHONPATH=.
42
+ CUDA_VISIBLE_DEVICES=0 python preprocessing/binarize.py --config training/config.yaml
43
+ ```
44
+ ## 训练:
45
+ ```
46
+ CUDA_VISIBLE_DEVICES=0 python run.py --config training/config.yaml --exp_name [your project name] --reset
47
+ ```
48
+ 详细训练过程和各种参数介绍请查看[推理与训练说明](./doc/train_and_inference.markdown)
49
+ ### 已训练模型
50
+ >目前本项目已在众多数据集进行过训练和测试。部分ckpt文件、demo音频和推理训练所需的其他文件请在下方QQ频道内下载\
51
+ 使用QQ扫描此二维码(如不能加入,请尝试一个合适的网络环境):
52
+ <img src="./ckpt.jpg" width=256/>
53
+ For English support, you can join this discord:
54
+
55
+ [![Discord](https://img.shields.io/discord/1044927142900809739?color=%23738ADB&label=Discord&style=for-the-badge)](https://discord.gg/jvA5c2xzSE)
56
+
57
+ ## Acknowledgements
58
+ >项目基于[diffsinger](https://github.com/MoonInTheRiver/DiffSinger)、[diffsinger(openvpi维护版)](https://github.com/openvpi/DiffSinger)、[soft-vc](https://github.com/bshall/soft-vc)开发.\
59
+ 同时也十分感谢openvpi成员在开发训练过程中给予的帮助。
60
+ >注意:此项目与同名论文[DiffSVC](https://arxiv.org/abs/2105.13871)无任何联系,请勿混淆!
batch.py ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import soundfile
2
+
3
+ from infer_tools import infer_tool
4
+ from infer_tools.infer_tool import Svc
5
+
6
+
7
+ def run_clip(svc_model, key, acc, use_pe, use_crepe, thre, use_gt_mel, add_noise_step, project_name='', f_name=None,
8
+ file_path=None, out_path=None):
9
+ raw_audio_path = f_name
10
+ infer_tool.format_wav(raw_audio_path)
11
+ _f0_tst, _f0_pred, _audio = svc_model.infer(raw_audio_path, key=key, acc=acc, singer=True, use_pe=use_pe,
12
+ use_crepe=use_crepe,
13
+ thre=thre, use_gt_mel=use_gt_mel, add_noise_step=add_noise_step)
14
+ out_path = f'./singer_data/{f_name.split("/")[-1]}'
15
+ soundfile.write(out_path, _audio, 44100, 'PCM_16')
16
+
17
+
18
+ if __name__ == '__main__':
19
+ # 工程文件夹名,训练时用的那个
20
+ project_name = "firefox"
21
+ model_path = f'./checkpoints/{project_name}/clean_model_ckpt_steps_100000.ckpt'
22
+ config_path = f'./checkpoints/{project_name}/config.yaml'
23
+
24
+ # 支持多个wav/ogg文件,放在raw文件夹下,带扩展名
25
+ file_names = infer_tool.get_end_file("./batch", "wav")
26
+ trans = [-6] # 音高调整,支持正负(半音),数量与上一行对应,不足的自动按第一个移调参数补齐
27
+ # 加速倍数
28
+ accelerate = 50
29
+ hubert_gpu = True
30
+ cut_time = 30
31
+
32
+ # 下面不动
33
+ infer_tool.mkdir(["./batch", "./singer_data"])
34
+ infer_tool.fill_a_to_b(trans, file_names)
35
+
36
+ model = Svc(project_name, config_path, hubert_gpu, model_path)
37
+ count = 0
38
+ for f_name, tran in zip(file_names, trans):
39
+ print(f_name)
40
+ run_clip(model, key=tran, acc=accelerate, use_crepe=False, thre=0.05, use_pe=False, use_gt_mel=False,
41
+ add_noise_step=500, f_name=f_name, project_name=project_name)
42
+ count += 1
43
+ print(f"process:{round(count * 100 / len(file_names), 2)}%")
ckpt.jpg ADDED
config.yaml ADDED
@@ -0,0 +1,349 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ K_step: 1000
2
+ accumulate_grad_batches: 1
3
+ audio_num_mel_bins: 80
4
+ audio_sample_rate: 24000
5
+ binarization_args:
6
+ shuffle: false
7
+ with_align: true
8
+ with_f0: true
9
+ with_hubert: true
10
+ with_spk_embed: false
11
+ with_wav: false
12
+ binarizer_cls: preprocessing.SVCpre.SVCBinarizer
13
+ binary_data_dir: data/binary/atri
14
+ check_val_every_n_epoch: 10
15
+ choose_test_manually: false
16
+ clip_grad_norm: 1
17
+ config_path: training/config.yaml
18
+ content_cond_steps: []
19
+ cwt_add_f0_loss: false
20
+ cwt_hidden_size: 128
21
+ cwt_layers: 2
22
+ cwt_loss: l1
23
+ cwt_std_scale: 0.8
24
+ datasets:
25
+ - opencpop
26
+ debug: false
27
+ dec_ffn_kernel_size: 9
28
+ dec_layers: 4
29
+ decay_steps: 30000
30
+ decoder_type: fft
31
+ dict_dir: ''
32
+ diff_decoder_type: wavenet
33
+ diff_loss_type: l2
34
+ dilation_cycle_length: 4
35
+ dropout: 0.1
36
+ ds_workers: 4
37
+ dur_enc_hidden_stride_kernel:
38
+ - 0,2,3
39
+ - 0,2,3
40
+ - 0,1,3
41
+ dur_loss: mse
42
+ dur_predictor_kernel: 3
43
+ dur_predictor_layers: 5
44
+ enc_ffn_kernel_size: 9
45
+ enc_layers: 4
46
+ encoder_K: 8
47
+ encoder_type: fft
48
+ endless_ds: False
49
+ f0_bin: 256
50
+ f0_max: 1100.0
51
+ f0_min: 50.0
52
+ ffn_act: gelu
53
+ ffn_padding: SAME
54
+ fft_size: 512
55
+ fmax: 12000
56
+ fmin: 30
57
+ fs2_ckpt: ''
58
+ gaussian_start: true
59
+ gen_dir_name: ''
60
+ gen_tgt_spk_id: -1
61
+ hidden_size: 256
62
+ hop_size: 128
63
+ hubert_gpu: true
64
+ hubert_path: checkpoints/hubert/hubert_soft.pt
65
+ infer: false
66
+ keep_bins: 80
67
+ lambda_commit: 0.25
68
+ lambda_energy: 0.0
69
+ lambda_f0: 1.0
70
+ lambda_ph_dur: 0.3
71
+ lambda_sent_dur: 1.0
72
+ lambda_uv: 1.0
73
+ lambda_word_dur: 1.0
74
+ load_ckpt: ''
75
+ log_interval: 100
76
+ loud_norm: false
77
+ lr: 5.0e-05
78
+ max_beta: 0.02
79
+ max_epochs: 3000
80
+ max_eval_sentences: 1
81
+ max_eval_tokens: 60000
82
+ max_frames: 42000
83
+ max_input_tokens: 60000
84
+ max_sentences: 24
85
+ max_tokens: 128000
86
+ max_updates: 1000000
87
+ mel_loss: ssim:0.5|l1:0.5
88
+ mel_vmax: 1.5
89
+ mel_vmin: -6.0
90
+ min_level_db: -120
91
+ norm_type: gn
92
+ num_ckpt_keep: 10
93
+ num_heads: 2
94
+ num_sanity_val_steps: 1
95
+ num_spk: 1
96
+ num_test_samples: 0
97
+ num_valid_plots: 10
98
+ optimizer_adam_beta1: 0.9
99
+ optimizer_adam_beta2: 0.98
100
+ out_wav_norm: false
101
+ pe_ckpt: checkpoints/0102_xiaoma_pe/model_ckpt_steps_60000.ckpt
102
+ pe_enable: false
103
+ perform_enhance: true
104
+ pitch_ar: false
105
+ pitch_enc_hidden_stride_kernel:
106
+ - 0,2,5
107
+ - 0,2,5
108
+ - 0,2,5
109
+ pitch_extractor: parselmouth
110
+ pitch_loss: l2
111
+ pitch_norm: log
112
+ pitch_type: frame
113
+ pndm_speedup: 10
114
+ pre_align_args:
115
+ allow_no_txt: false
116
+ denoise: false
117
+ forced_align: mfa
118
+ txt_processor: zh_g2pM
119
+ use_sox: true
120
+ use_tone: false
121
+ pre_align_cls: data_gen.singing.pre_align.SingingPreAlign
122
+ predictor_dropout: 0.5
123
+ predictor_grad: 0.1
124
+ predictor_hidden: -1
125
+ predictor_kernel: 5
126
+ predictor_layers: 5
127
+ prenet_dropout: 0.5
128
+ prenet_hidden_size: 256
129
+ pretrain_fs_ckpt: pretrain/nyaru/model_ckpt_steps_60000.ckpt
130
+ processed_data_dir: xxx
131
+ profile_infer: false
132
+ raw_data_dir: data/raw/atri
133
+ ref_norm_layer: bn
134
+ rel_pos: true
135
+ reset_phone_dict: true
136
+ residual_channels: 256
137
+ residual_layers: 20
138
+ save_best: false
139
+ save_ckpt: true
140
+ save_codes:
141
+ - configs
142
+ - modules
143
+ - src
144
+ - utils
145
+ save_f0: true
146
+ save_gt: false
147
+ schedule_type: linear
148
+ seed: 1234
149
+ sort_by_len: true
150
+ speaker_id: atri
151
+ spec_max:
152
+ - 0.2987259328365326
153
+ - 0.29721200466156006
154
+ - 0.23978209495544434
155
+ - 0.208412766456604
156
+ - 0.25777050852775574
157
+ - 0.2514476478099823
158
+ - 0.1129382848739624
159
+ - 0.03415697440505028
160
+ - 0.09860049188137054
161
+ - 0.10637332499027252
162
+ - 0.13287633657455444
163
+ - 0.19744250178337097
164
+ - 0.10040587931871414
165
+ - 0.13735432922840118
166
+ - 0.15107455849647522
167
+ - 0.17196381092071533
168
+ - 0.08298977464437485
169
+ - 0.0632769986987114
170
+ - 0.02723858878016472
171
+ - -0.001819317927584052
172
+ - -0.029565516859292984
173
+ - -0.023574354127049446
174
+ - -0.01633293740451336
175
+ - 0.07143621146678925
176
+ - 0.021580500528216362
177
+ - 0.07257916033267975
178
+ - -0.024349519982933998
179
+ - -0.06165708228945732
180
+ - -0.10486568510532379
181
+ - -0.1363687664270401
182
+ - -0.13333871960639954
183
+ - -0.13955898582935333
184
+ - -0.16613495349884033
185
+ - -0.17636367678642273
186
+ - -0.2786925733089447
187
+ - -0.22967253625392914
188
+ - -0.31897130608558655
189
+ - -0.18007366359233856
190
+ - -0.29366692900657654
191
+ - -0.2871025800704956
192
+ - -0.36748355627059937
193
+ - -0.46071451902389526
194
+ - -0.5464922189712524
195
+ - -0.5719417333602905
196
+ - -0.6020897626876831
197
+ - -0.6239874958992004
198
+ - -0.5653440952301025
199
+ - -0.6508013606071472
200
+ - -0.628247857093811
201
+ - -0.6809687614440918
202
+ - -0.569259762763977
203
+ - -0.5423558354377747
204
+ - -0.5811785459518433
205
+ - -0.5359002351760864
206
+ - -0.6565515398979187
207
+ - -0.7143737077713013
208
+ - -0.8502675890922546
209
+ - -0.7979224920272827
210
+ - -0.7110578417778015
211
+ - -0.763409435749054
212
+ - -0.7984790802001953
213
+ - -0.6927220821380615
214
+ - -0.658117413520813
215
+ - -0.7486468553543091
216
+ - -0.5949879884719849
217
+ - -0.7494576573371887
218
+ - -0.7400822639465332
219
+ - -0.6822793483734131
220
+ - -0.7773582339286804
221
+ - -0.661201536655426
222
+ - -0.791329026222229
223
+ - -0.8982341885566711
224
+ - -0.8736728429794312
225
+ - -0.7701027393341064
226
+ - -0.8490535616874695
227
+ - -0.7479292154312134
228
+ - -0.9320166110992432
229
+ - -1.2862414121627808
230
+ - -2.8936190605163574
231
+ - -2.924229860305786
232
+ spec_min:
233
+ - -6.0
234
+ - -6.0
235
+ - -6.0
236
+ - -6.0
237
+ - -6.0
238
+ - -6.0
239
+ - -6.0
240
+ - -6.0
241
+ - -6.0
242
+ - -6.0
243
+ - -6.0
244
+ - -6.0
245
+ - -6.0
246
+ - -6.0
247
+ - -6.0
248
+ - -6.0
249
+ - -6.0
250
+ - -6.0
251
+ - -6.0
252
+ - -6.0
253
+ - -6.0
254
+ - -6.0
255
+ - -6.0
256
+ - -6.0
257
+ - -6.0
258
+ - -6.0
259
+ - -6.0
260
+ - -6.0
261
+ - -6.0
262
+ - -6.0
263
+ - -6.0
264
+ - -6.0
265
+ - -6.0
266
+ - -6.0
267
+ - -6.0
268
+ - -6.0
269
+ - -6.0
270
+ - -6.0
271
+ - -6.0
272
+ - -6.0
273
+ - -6.0
274
+ - -6.0
275
+ - -6.0
276
+ - -6.0
277
+ - -6.0
278
+ - -6.0
279
+ - -6.0
280
+ - -6.0
281
+ - -6.0
282
+ - -6.0
283
+ - -6.0
284
+ - -6.0
285
+ - -6.0
286
+ - -6.0
287
+ - -6.0
288
+ - -6.0
289
+ - -6.0
290
+ - -6.0
291
+ - -6.0
292
+ - -6.0
293
+ - -6.0
294
+ - -6.0
295
+ - -6.0
296
+ - -6.0
297
+ - -6.0
298
+ - -6.0
299
+ - -6.0
300
+ - -5.999454021453857
301
+ - -5.8822431564331055
302
+ - -5.892064571380615
303
+ - -5.882402420043945
304
+ - -5.786972522735596
305
+ - -5.746835231781006
306
+ - -5.8594512939453125
307
+ - -5.7389445304870605
308
+ - -5.718059539794922
309
+ - -5.779720306396484
310
+ - -5.801984786987305
311
+ - -6.0
312
+ - -6.0
313
+ spk_cond_steps: []
314
+ stop_token_weight: 5.0
315
+ task_cls: training.task.SVC_task.SVCTask
316
+ test_ids: []
317
+ test_input_dir: ''
318
+ test_num: 0
319
+ test_prefixes:
320
+ - test
321
+ test_set_name: test
322
+ timesteps: 1000
323
+ train_set_name: train
324
+ use_crepe: true
325
+ use_denoise: false
326
+ use_energy_embed: false
327
+ use_gt_dur: false
328
+ use_gt_f0: false
329
+ use_midi: false
330
+ use_nsf: true
331
+ use_pitch_embed: true
332
+ use_pos_embed: true
333
+ use_spk_embed: false
334
+ use_spk_id: false
335
+ use_split_spk_id: false
336
+ use_uv: false
337
+ use_var_enc: false
338
+ use_vec: false
339
+ val_check_interval: 2000
340
+ valid_num: 0
341
+ valid_set_name: valid
342
+ vocoder: network.vocoders.hifigan.HifiGAN
343
+ vocoder_ckpt: checkpoints/0109_hifigan_bigpopcs_hop128
344
+ warmup_updates: 2000
345
+ wav2spec_eps: 1e-6
346
+ weight_decay: 0
347
+ win_size: 512
348
+ work_dir: checkpoints/atri
349
+ no_fs2: false
doc/train_and_inference.markdown ADDED
@@ -0,0 +1,210 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Diff-SVC(train/inference by yourself)
2
+ ## 0.环境配置
3
+ >注意:requirements文件已更新,目前分为3个版本,可自行选择使用。\
4
+ 1. requirements.txt 是此仓库测试的原始完整环境,Torch1.12.1+cu113,可选择直接pip 或删除其中与pytorch有关的项目(torch/torchvision)后再pip,并使用自己的torch环境
5
+ ```
6
+ pip install -r requirements.txt
7
+ ```
8
+ >2. (推荐)requirements_short.txt 是上述环境的手动整理版,不含torch本体,也可以直接
9
+ ```
10
+ pip install -r requirements_short.txt
11
+ ```
12
+ >3. 根目录下有一份@三千整理的依赖列表requirements.png,是在某品牌云服务器上跑通的,不过此torch版本已不兼容目前版本代码,但是其他部分版本可以参考,十分感谢
13
+
14
+ ## 1.推理
15
+ >使用根目录下的inference.ipynb进行推理或使用经过作者适配的@小狼的infer.py\
16
+ 在第一个block中修改如下参数:
17
+ ```
18
+ config_path='checkpoints压缩包中config.yaml的位置'
19
+ 如'./checkpoints/nyaru/config.yaml'
20
+ config和checkpoints是一一对应的,请不要使用其他config
21
+
22
+ project_name='这个项目的名称'
23
+ 如'nyaru'
24
+
25
+ model_path='ckpt文件的全路径'
26
+ 如'./checkpoints/nyaru/model_ckpt_steps_112000.ckpt'
27
+
28
+ hubert_gpu=True
29
+ 推理时是否使用gpu推理hubert(模型中的一个模块),不影响模型的其他部分
30
+ 目前版本已大幅减小hubert的gpu占用,在1060 6G显存下可完整推理,不需要关闭了。
31
+ 另外现已支持长音频自动切片功能(ipynb和infer.py均可),超过30s的音频将自动在静音处切片处理,感谢@小狼的代码
32
+
33
+ ```
34
+ ### 可调节参数:
35
+ ```
36
+ wav_fn='xxx.wav'#传入音频的路径,默认在项目根目录中
37
+
38
+ use_crepe=True
39
+ #crepe是一个F0算法,效果好但速度慢,改成False会使用效果稍逊于crepe但较快的parselmouth算法
40
+
41
+ thre=0.05
42
+ #crepe的噪声过滤阈值,源音频干净可适当调大,噪音多就保持这个数值或者调小,前面改成False后这个参数不起作用
43
+
44
+ pndm_speedup=20
45
+ #推理加速算法倍数,默认是1000步,这里填成10就是只使用100步合成,是一个中规中矩的数值,这个数值可以高到50倍(20步合成)没有明显质量损失,再大可能会有可观的质量损失,注意如果下方开启了use_gt_mel, 应保证这个数值小于add_noise_step,并尽量让其能够整除
46
+
47
+ key=0
48
+ #变调参数,默认为0(不是1!!),将源音频的音高升高key个半音后合成,如男声转女生,可填入8或者12等(12就是升高一整个8度)
49
+
50
+ use_pe=True
51
+ #梅尔谱合成音频时使用的F0提取算法,如果改成False将使用源音频的F0\
52
+ 这里填True和False合成会略有差异,通常是True会好些,但也不尽然,对合成速度几乎无影响\
53
+ (无论key填什么 这里都是可以自由选择的,不影响)\
54
+ 44.1kHz下不支持此功能,会自动关闭,开着也不报错就是了
55
+
56
+ use_gt_mel=False
57
+ #这个选项类似于AI画图的图生图功能,如果打开,产生的音频将是输入声音与目标说话人声音的混合,混合比例由下一个参数确定
58
+ 注意!!!:这个参数如果改成True,请确保key填成0,不支持变调
59
+
60
+ add_noise_step=500
61
+ #与上个参数有关,控制两种声音的比例,填入1是完全的源声线,填入1000是完全的目标声线,能听出来是两者均等混合的数值大约在300附近(并不是线性的,另外这个参数如果调的很小,可以把pndm加速倍率调低,增加合成质量)
62
+
63
+ wav_gen='yyy.wav'#输出音频的路径,默认在项目根目录中,可通过改变扩展名更改保存文件类型
64
+ ```
65
+ 如果使用infer.py,修改方式类似,需要修改__name__=='__main__'中的部分,然后在根目录中执行\
66
+ python infer.py\
67
+ 这种方式需要将原音频放入raw中并在results中查找结果
68
+ ## 2.数据预处理与训练
69
+ ### 2.1 准备数据
70
+ >目前支持wav格式和ogg格式的音频数据,采样率最好高于24kHz,程序会自动处理采样率和声道问题。采样率不可低于16kHz(一般不会的)\
71
+ 音频需要切片为5-15s为宜的短音频,长度没有具体要求,但不宜过长过短。音频需要为纯目标人干声,不可以有背景音乐和其他人声音,最好也不要有过重的混响等。若经过去伴奏等处理,请尽量保证处理后的音频质量。\
72
+ 目前仅支持单人训练,总时长尽量保证在3h或以上,不需要额外任何标注,将音频文件放在下述raw_data_dir下即可,这个目录下的结构可以自由定义,程序会自主找到所需文件。
73
+
74
+ ### 2.2 修改超参数配置
75
+ >首先请备份一份config.yaml(此文件对应24kHz声码器, 44.1kHz声码器请使用config_nsf.yaml),然后修改它\
76
+ 可能会用到的参数如下(以工程名为nyaru为例):
77
+ ```
78
+ K_step: 1000
79
+ #diffusion过程总的step,建议不要修改
80
+
81
+ binary_data_dir: data/binary/nyaru
82
+ 预处理后数据的存放地址:需要将后缀改成工程名字
83
+
84
+ config_path: training/config.yaml
85
+ 你要使用的这份yaml自身的地址,由于预处理过程中会写入数据,所以这个地址务必修改成将要存放这份yaml文件的完整路径
86
+
87
+ choose_test_manually: false
88
+ 手动选择测试集,默认关闭,自动随机抽取5条音频作为测试集。
89
+ 如果改为ture,请在test_prefixes:中填入测试数据的文件名前缀,程序会将以对应前缀开头的文件作为测试集
90
+ 这是个列表,可以填多个前缀,如:
91
+ test_prefixes:
92
+ - test
93
+ - aaaa
94
+ - 5012
95
+ - speaker1024
96
+ 重要:测试集*不可以*为空,为了不产生意外影响,建议尽量不要手动选择测试集
97
+
98
+ endless_ds:False
99
+ 如果你的数据集过小,每个epoch时间很短,请将此项打开,将把正常的1000epoch作为一个epoch计算
100
+
101
+ hubert_path: checkpoints/hubert/hubert.pt
102
+ hubert模型的存放地址,确保这个路径是对的,一般解压checkpoints包之后就是这个路径不需要改,现已使用torch版本推理
103
+ hubert_gpu:True
104
+ 是否在预处理时使用gpu运行hubert(模型的一个模块),关闭后使用cpu,但耗时会显著增加。另外模型训练完推理时hubert是否用gpu是在inference中单独控制的,不受此处影响。目前hubert改为torch版后已经可以做到在1060 6G显存gpu上进行预处理,与直接推理1分钟内的音频不超出显存限制,一般不需要关了。
105
+
106
+ lr: 0.0008
107
+ #初始的学习率:这个数字对应于88的batchsize,如果batchsize更小,可以调低这个数值一些
108
+
109
+ decay_steps: 20000
110
+ 每20000步学习率衰减为原来的一半,如果batchsize比较小,请调大这个数值
111
+
112
+ #对于30-40左右的batchsize,推荐lr=0.0004,decay_steps=40000
113
+
114
+ max_frames: 42000
115
+ max_input_tokens: 6000
116
+ max_sentences: 88
117
+ max_tokens: 128000
118
+ #batchsize是由这几个参数动态算出来的,如果不太清楚具体含义,可以只改动max_sentences这个参数,填入batchsize的最大限制值,以免炸显存
119
+
120
+ pe_ckpt: checkpoints/0102_xiaoma_pe/model_ckpt_steps_60000.ckpt
121
+ #pe模型路径,确保这个文件存在,具体作用参考inference部分
122
+
123
+ raw_data_dir: data/raw/nyaru
124
+ #存放预处理前原始数据的位置,请将原始wav数据放在这个目录下,内部文件结构无所谓,会自动解构
125
+
126
+ residual_channels: 384
127
+ residual_layers: 20
128
+ #控制核心网络规模的一组参数,越大参数越多炼的越慢,但效果不一定会变好,大一点的数据集可以把第一个改成512。这个可以自行实验效果,不过不了解的话尽量不动。
129
+
130
+ speaker_id: nyaru
131
+ #训练的说话人名字,目前只支持单说话人,请在这里填写(只是观赏作用,没有实际意义的参数)
132
+
133
+ use_crepe: true
134
+ #在数据预处理中使用crepe提取F0,追求效果请打开,追求速度可以关闭
135
+
136
+ val_check_interval: 2000
137
+ #每2000steps推理测试集并保存ckpt
138
+
139
+ vocoder_ckpt:checkpoints/0109_hifigan_bigpopcs_hop128
140
+ #24kHz下为对应声码器的目录, 44.1kHz下为对应声码器的文件名, 注意不要填错
141
+
142
+ work_dir: checkpoints/nyaru
143
+ #修改后缀为工程名(也可以删掉或完全留空自动生成,但别乱填)
144
+ no_fs2: true
145
+ #对网络encoder的精简,能缩减模型体积,加快训练,且并未发现有对网络表现损害的直接证据。默认打开
146
+
147
+ ```
148
+ >其他的参数如果你不知道它是做什么的,请不要修改,即使你看着名称可能以为你知道它是做什么的。
149
+
150
+ ### 2.3 数据预处理
151
+ 在diff-svc的目录下执行以下命令:\
152
+ #windows
153
+ ```
154
+ set PYTHONPATH=.
155
+ set CUDA_VISIBLE_DEVICES=0
156
+ python preprocessing/binarize.py --config training/config.yaml
157
+ ```
158
+ #linux
159
+ ```
160
+ export PYTHONPATH=.
161
+ CUDA_VISIBLE_DEVICES=0 python preprocessing/binarize.py --config training/config.yaml
162
+ ```
163
+ 对于预处理,@小狼准备了一份可以分段处理hubert和其他特征的代码,如果正常处理显存不足,可以先python ./network/hubert/hubert_model.py
164
+ 然后再运行正常的指令,能够识别提前处理好的hubert特征
165
+ ### 2.4 训练
166
+ #windows
167
+ ```
168
+ set CUDA_VISIBLE_DEVICES=0
169
+ python run.py --config training/config.yaml --exp_name nyaru --reset
170
+ ```
171
+ #linux
172
+ ```
173
+ CUDA_VISIBLE_DEVICES=0 python run.py --config training/config.yaml --exp_name nyaru --reset
174
+ ```
175
+ >需要将exp_name改为你的工程名,并修改config路径,请确保和预处理使用的是同一个config文件\
176
+ *重要* :训练完成后,若之前不是在本地数据预处理,除了需要下载对应的ckpt文件,也需要将config文件下载下来,作为推理时使用的config,不可以使用本地之前上传上去那份。因为预处理时会向config文件中写入内容。推理时要保持使用的config和预处理使用的config是同一份。
177
+
178
+
179
+ ### 2.5 可能出现的问题:
180
+ >2.5.1 'Upsample' object has no attribute 'recompute_scale_factor'\
181
+ 此问题发现于cuda11.3对应的torch中,若出现此问题,请通过合适的方法(如ide自动跳转等)找到你的python依赖包中的torch.nn.modules.upsampling.py文件(如conda环境中为conda目录\envs\环境目录\Lib\site-packages\torch\nn\modules\upsampling.py),修改其153-154行
182
+ ```
183
+ return F.interpolate(input, self.size, self.scale_factor, self.mode, self.align_corners,recompute_scale_factor=self.recompute_scale_factor)
184
+ ```
185
+ >改为
186
+ ```
187
+ return F.interpolate(input, self.size, self.scale_factor, self.mode, self.align_corners)
188
+ # recompute_scale_factor=self.recompute_scale_factor)
189
+ ```
190
+ >2.5.2 no module named 'utils'\
191
+ 请在你的运行环境(如colab笔记本)中以如下方式设置:
192
+ ```
193
+ import os
194
+ os.environ['PYTHONPATH']='.'
195
+ !CUDA_VISIBLE_DEVICES=0 python preprocessing/binarize.py --config training/config.yaml
196
+ ```
197
+ 注意一定要在项目文件夹的根目录中执行
198
+ >2.5.3 cannot load library 'libsndfile.so'\
199
+ 可能会在linux环境中遇到的错误,请执行以下指令
200
+ ```
201
+ apt-get install libsndfile1 -y
202
+ ```
203
+ >2.5.4 cannot load import 'consume_prefix_in_state_dict_if_present'\
204
+ torch版本过低,请更换高版本torch
205
+
206
+ >2.5.5 预处理数据过慢\
207
+ 检查是否在配置中开启了use_crepe,将其关闭可显著提升速度。\
208
+ 检查配置中hubert_gpu是否开启。
209
+
210
+ 如有其他问题,请加入QQ频道或discord频道询问。
flask_api.py ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import io
2
+ import logging
3
+
4
+ import librosa
5
+ import soundfile
6
+ from flask import Flask, request, send_file
7
+ from flask_cors import CORS
8
+
9
+ from infer_tools.infer_tool import Svc
10
+ from utils.hparams import hparams
11
+
12
+ app = Flask(__name__)
13
+
14
+ CORS(app)
15
+
16
+ logging.getLogger('numba').setLevel(logging.WARNING)
17
+
18
+
19
+ @app.route("/voiceChangeModel", methods=["POST"])
20
+ def voice_change_model():
21
+ request_form = request.form
22
+ wave_file = request.files.get("sample", None)
23
+ # 变调信息
24
+ f_pitch_change = float(request_form.get("fPitchChange", 0))
25
+ # DAW所需的采样率
26
+ daw_sample = int(float(request_form.get("sampleRate", 0)))
27
+ speaker_id = int(float(request_form.get("sSpeakId", 0)))
28
+ # http获得wav文件并转换
29
+ input_wav_path = io.BytesIO(wave_file.read())
30
+ # 模型推理
31
+ _f0_tst, _f0_pred, _audio = model.infer(input_wav_path, key=f_pitch_change, acc=accelerate, use_pe=False,
32
+ use_crepe=False)
33
+ tar_audio = librosa.resample(_audio, hparams["audio_sample_rate"], daw_sample)
34
+ # 返回音频
35
+ out_wav_path = io.BytesIO()
36
+ soundfile.write(out_wav_path, tar_audio, daw_sample, format="wav")
37
+ out_wav_path.seek(0)
38
+ return send_file(out_wav_path, download_name="temp.wav", as_attachment=True)
39
+
40
+
41
+ if __name__ == '__main__':
42
+ # 工程文件夹名,训练时用的那个
43
+ project_name = "firefox"
44
+ model_path = f'./checkpoints/{project_name}/model_ckpt_steps_188000.ckpt'
45
+ config_path = f'./checkpoints/{project_name}/config.yaml'
46
+
47
+ # 加速倍数
48
+ accelerate = 50
49
+ hubert_gpu = True
50
+
51
+ model = Svc(project_name, config_path, hubert_gpu, model_path)
52
+
53
+ # 此处与vst插件对应,不建议更改
54
+ app.run(port=6842, host="0.0.0.0", debug=False, threaded=False)
infer.py ADDED
@@ -0,0 +1,98 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import io
2
+ import time
3
+ from pathlib import Path
4
+
5
+ import librosa
6
+ import numpy as np
7
+ import soundfile
8
+
9
+ from infer_tools import infer_tool
10
+ from infer_tools import slicer
11
+ from infer_tools.infer_tool import Svc
12
+ from utils.hparams import hparams
13
+
14
+ chunks_dict = infer_tool.read_temp("./infer_tools/new_chunks_temp.json")
15
+
16
+
17
+ def run_clip(svc_model, key, acc, use_pe, use_crepe, thre, use_gt_mel, add_noise_step, project_name='', f_name=None,
18
+ file_path=None, out_path=None, slice_db=-40,**kwargs):
19
+ print(f'code version:2022-12-04')
20
+ use_pe = use_pe if hparams['audio_sample_rate'] == 24000 else False
21
+ if file_path is None:
22
+ raw_audio_path = f"./raw/{f_name}"
23
+ clean_name = f_name[:-4]
24
+ else:
25
+ raw_audio_path = file_path
26
+ clean_name = str(Path(file_path).name)[:-4]
27
+ infer_tool.format_wav(raw_audio_path)
28
+ wav_path = Path(raw_audio_path).with_suffix('.wav')
29
+ global chunks_dict
30
+ audio, sr = librosa.load(wav_path, mono=True,sr=None)
31
+ wav_hash = infer_tool.get_md5(audio)
32
+ if wav_hash in chunks_dict.keys():
33
+ print("load chunks from temp")
34
+ chunks = chunks_dict[wav_hash]["chunks"]
35
+ else:
36
+ chunks = slicer.cut(wav_path, db_thresh=slice_db)
37
+ chunks_dict[wav_hash] = {"chunks": chunks, "time": int(time.time())}
38
+ infer_tool.write_temp("./infer_tools/new_chunks_temp.json", chunks_dict)
39
+ audio_data, audio_sr = slicer.chunks2audio(wav_path, chunks)
40
+
41
+ count = 0
42
+ f0_tst = []
43
+ f0_pred = []
44
+ audio = []
45
+ for (slice_tag, data) in audio_data:
46
+ print(f'#=====segment start, {round(len(data) / audio_sr, 3)}s======')
47
+ length = int(np.ceil(len(data) / audio_sr * hparams['audio_sample_rate']))
48
+ raw_path = io.BytesIO()
49
+ soundfile.write(raw_path, data, audio_sr, format="wav")
50
+ if hparams['debug']:
51
+ print(np.mean(data), np.var(data))
52
+ raw_path.seek(0)
53
+ if slice_tag:
54
+ print('jump empty segment')
55
+ _f0_tst, _f0_pred, _audio = (
56
+ np.zeros(int(np.ceil(length / hparams['hop_size']))), np.zeros(int(np.ceil(length / hparams['hop_size']))),
57
+ np.zeros(length))
58
+ else:
59
+ _f0_tst, _f0_pred, _audio = svc_model.infer(raw_path, key=key, acc=acc, use_pe=use_pe, use_crepe=use_crepe,
60
+ thre=thre, use_gt_mel=use_gt_mel, add_noise_step=add_noise_step)
61
+ fix_audio = np.zeros(length)
62
+ fix_audio[:] = np.mean(_audio)
63
+ fix_audio[:len(_audio)] = _audio[0 if len(_audio)<len(fix_audio) else len(_audio)-len(fix_audio):]
64
+ f0_tst.extend(_f0_tst)
65
+ f0_pred.extend(_f0_pred)
66
+ audio.extend(list(fix_audio))
67
+ count += 1
68
+ if out_path is None:
69
+ out_path = f'./results/{clean_name}_{key}key_{project_name}_{hparams["residual_channels"]}_{hparams["residual_layers"]}_{int(step / 1000)}k_{accelerate}x.{kwargs["format"]}'
70
+ soundfile.write(out_path, audio, hparams["audio_sample_rate"], 'PCM_16',format=out_path.split('.')[-1])
71
+ return np.array(f0_tst), np.array(f0_pred), audio
72
+
73
+
74
+ if __name__ == '__main__':
75
+ # 工程文件夹名,训练时用的那个
76
+ project_name = "yilanqiu"
77
+ model_path = f'./checkpoints/{project_name}/model_ckpt_steps_246000.ckpt'
78
+ config_path = f'./checkpoints/{project_name}/config.yaml'
79
+
80
+ # 支持多个wav/ogg文件,放在raw文件夹下,带扩展名
81
+ file_names = ["青花瓷.wav"]
82
+ trans = [0] # 音高调整,支持正负(半音),数量与上一行对应,不足的自动按第一个移调参数补齐
83
+ # 加速倍数
84
+ accelerate = 20
85
+ hubert_gpu = True
86
+ format='flac'
87
+ step = int(model_path.split("_")[-1].split(".")[0])
88
+
89
+ # 下面不动
90
+ infer_tool.mkdir(["./raw", "./results"])
91
+ infer_tool.fill_a_to_b(trans, file_names)
92
+
93
+ model = Svc(project_name, config_path, hubert_gpu, model_path)
94
+ for f_name, tran in zip(file_names, trans):
95
+ if "." not in f_name:
96
+ f_name += ".wav"
97
+ run_clip(model, key=tran, acc=accelerate, use_crepe=True, thre=0.05, use_pe=True, use_gt_mel=False,
98
+ add_noise_step=500, f_name=f_name, project_name=project_name, format=format)
infer_tools/__init__.py ADDED
File without changes
infer_tools/infer_tool.py ADDED
@@ -0,0 +1,343 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import hashlib
2
+ import json
3
+ import os
4
+ import time
5
+ from io import BytesIO
6
+ from pathlib import Path
7
+
8
+ import librosa
9
+ import numpy as np
10
+ import soundfile
11
+ import torch
12
+
13
+ import utils
14
+ from modules.fastspeech.pe import PitchExtractor
15
+ from network.diff.candidate_decoder import FFT
16
+ from network.diff.diffusion import GaussianDiffusion
17
+ from network.diff.net import DiffNet
18
+ from network.vocoders.base_vocoder import VOCODERS, get_vocoder_cls
19
+ from preprocessing.data_gen_utils import get_pitch_parselmouth, get_pitch_crepe, get_pitch_world
20
+ from preprocessing.hubertinfer import Hubertencoder
21
+ from utils.hparams import hparams, set_hparams
22
+ from utils.pitch_utils import denorm_f0, norm_interp_f0
23
+
24
+ if os.path.exists("chunks_temp.json"):
25
+ os.remove("chunks_temp.json")
26
+
27
+
28
+ def read_temp(file_name):
29
+ if not os.path.exists(file_name):
30
+ with open(file_name, "w") as f:
31
+ f.write(json.dumps({"info": "temp_dict"}))
32
+ return {}
33
+ else:
34
+ try:
35
+ with open(file_name, "r") as f:
36
+ data = f.read()
37
+ data_dict = json.loads(data)
38
+ if os.path.getsize(file_name) > 50 * 1024 * 1024:
39
+ f_name = file_name.split("/")[-1]
40
+ print(f"clean {f_name}")
41
+ for wav_hash in list(data_dict.keys()):
42
+ if int(time.time()) - int(data_dict[wav_hash]["time"]) > 14 * 24 * 3600:
43
+ del data_dict[wav_hash]
44
+ except Exception as e:
45
+ print(e)
46
+ print(f"{file_name} error,auto rebuild file")
47
+ data_dict = {"info": "temp_dict"}
48
+ return data_dict
49
+
50
+
51
+ f0_dict = read_temp("./infer_tools/f0_temp.json")
52
+
53
+
54
+ def write_temp(file_name, data):
55
+ with open(file_name, "w") as f:
56
+ f.write(json.dumps(data))
57
+
58
+
59
+ def timeit(func):
60
+ def run(*args, **kwargs):
61
+ t = time.time()
62
+ res = func(*args, **kwargs)
63
+ print('executing \'%s\' costed %.3fs' % (func.__name__, time.time() - t))
64
+ return res
65
+
66
+ return run
67
+
68
+
69
+ def format_wav(audio_path):
70
+ if Path(audio_path).suffix=='.wav':
71
+ return
72
+ raw_audio, raw_sample_rate = librosa.load(audio_path, mono=True,sr=None)
73
+ soundfile.write(Path(audio_path).with_suffix(".wav"), raw_audio, raw_sample_rate)
74
+
75
+
76
+ def fill_a_to_b(a, b):
77
+ if len(a) < len(b):
78
+ for _ in range(0, len(b) - len(a)):
79
+ a.append(a[0])
80
+
81
+
82
+ def get_end_file(dir_path, end):
83
+ file_lists = []
84
+ for root, dirs, files in os.walk(dir_path):
85
+ files = [f for f in files if f[0] != '.']
86
+ dirs[:] = [d for d in dirs if d[0] != '.']
87
+ for f_file in files:
88
+ if f_file.endswith(end):
89
+ file_lists.append(os.path.join(root, f_file).replace("\\", "/"))
90
+ return file_lists
91
+
92
+
93
+ def mkdir(paths: list):
94
+ for path in paths:
95
+ if not os.path.exists(path):
96
+ os.mkdir(path)
97
+
98
+
99
+ def get_md5(content):
100
+ return hashlib.new("md5", content).hexdigest()
101
+
102
+
103
+ class Svc:
104
+ def __init__(self, project_name, config_name, hubert_gpu, model_path):
105
+ self.project_name = project_name
106
+ self.DIFF_DECODERS = {
107
+ 'wavenet': lambda hp: DiffNet(hp['audio_num_mel_bins']),
108
+ 'fft': lambda hp: FFT(
109
+ hp['hidden_size'], hp['dec_layers'], hp['dec_ffn_kernel_size'], hp['num_heads']),
110
+ }
111
+
112
+ self.model_path = model_path
113
+ self.dev = torch.device("cuda")
114
+
115
+ self._ = set_hparams(config=config_name, exp_name=self.project_name, infer=True,
116
+ reset=True,
117
+ hparams_str='',
118
+ print_hparams=False)
119
+
120
+ self.mel_bins = hparams['audio_num_mel_bins']
121
+ self.model = GaussianDiffusion(
122
+ phone_encoder=Hubertencoder(hparams['hubert_path']),
123
+ out_dims=self.mel_bins, denoise_fn=self.DIFF_DECODERS[hparams['diff_decoder_type']](hparams),
124
+ timesteps=hparams['timesteps'],
125
+ K_step=hparams['K_step'],
126
+ loss_type=hparams['diff_loss_type'],
127
+ spec_min=hparams['spec_min'], spec_max=hparams['spec_max'],
128
+ )
129
+ self.load_ckpt()
130
+ self.model.cuda()
131
+ hparams['hubert_gpu'] = hubert_gpu
132
+ self.hubert = Hubertencoder(hparams['hubert_path'])
133
+ self.pe = PitchExtractor().cuda()
134
+ utils.load_ckpt(self.pe, hparams['pe_ckpt'], 'model', strict=True)
135
+ self.pe.eval()
136
+ self.vocoder = get_vocoder_cls(hparams)()
137
+
138
+ def load_ckpt(self, model_name='model', force=True, strict=True):
139
+ utils.load_ckpt(self.model, self.model_path, model_name, force, strict)
140
+
141
+ def infer(self, in_path, key, acc, use_pe=True, use_crepe=True, thre=0.05, singer=False, **kwargs):
142
+ batch = self.pre(in_path, acc, use_crepe, thre)
143
+ spk_embed = batch.get('spk_embed') if not hparams['use_spk_id'] else batch.get('spk_ids')
144
+ hubert = batch['hubert']
145
+ ref_mels = batch["mels"]
146
+ energy=batch['energy']
147
+ mel2ph = batch['mel2ph']
148
+ batch['f0'] = batch['f0'] + (key / 12)
149
+ batch['f0'][batch['f0']>np.log2(hparams['f0_max'])]=0
150
+ f0 = batch['f0']
151
+ uv = batch['uv']
152
+ @timeit
153
+ def diff_infer():
154
+ outputs = self.model(
155
+ hubert.cuda(), spk_embed=spk_embed, mel2ph=mel2ph.cuda(), f0=f0.cuda(), uv=uv.cuda(),energy=energy.cuda(),
156
+ ref_mels=ref_mels.cuda(),
157
+ infer=True, **kwargs)
158
+ return outputs
159
+ outputs=diff_infer()
160
+ batch['outputs'] = self.model.out2mel(outputs['mel_out'])
161
+ batch['mel2ph_pred'] = outputs['mel2ph']
162
+ batch['f0_gt'] = denorm_f0(batch['f0'], batch['uv'], hparams)
163
+ if use_pe:
164
+ batch['f0_pred'] = self.pe(outputs['mel_out'])['f0_denorm_pred'].detach()
165
+ else:
166
+ batch['f0_pred'] = outputs.get('f0_denorm')
167
+ return self.after_infer(batch, singer, in_path)
168
+
169
+ @timeit
170
+ def after_infer(self, prediction, singer, in_path):
171
+ for k, v in prediction.items():
172
+ if type(v) is torch.Tensor:
173
+ prediction[k] = v.cpu().numpy()
174
+
175
+ # remove paddings
176
+ mel_gt = prediction["mels"]
177
+ mel_gt_mask = np.abs(mel_gt).sum(-1) > 0
178
+
179
+ mel_pred = prediction["outputs"]
180
+ mel_pred_mask = np.abs(mel_pred).sum(-1) > 0
181
+ mel_pred = mel_pred[mel_pred_mask]
182
+ mel_pred = np.clip(mel_pred, hparams['mel_vmin'], hparams['mel_vmax'])
183
+
184
+ f0_gt = prediction.get("f0_gt")
185
+ f0_pred = prediction.get("f0_pred")
186
+ if f0_pred is not None:
187
+ f0_gt = f0_gt[mel_gt_mask]
188
+ if len(f0_pred) > len(mel_pred_mask):
189
+ f0_pred = f0_pred[:len(mel_pred_mask)]
190
+ f0_pred = f0_pred[mel_pred_mask]
191
+ torch.cuda.is_available() and torch.cuda.empty_cache()
192
+
193
+ if singer:
194
+ data_path = in_path.replace("batch", "singer_data")
195
+ mel_path = data_path[:-4] + "_mel.npy"
196
+ f0_path = data_path[:-4] + "_f0.npy"
197
+ np.save(mel_path, mel_pred)
198
+ np.save(f0_path, f0_pred)
199
+ wav_pred = self.vocoder.spec2wav(mel_pred, f0=f0_pred)
200
+ return f0_gt, f0_pred, wav_pred
201
+
202
+ def temporary_dict2processed_input(self, item_name, temp_dict, use_crepe=True, thre=0.05):
203
+ '''
204
+ process data in temporary_dicts
205
+ '''
206
+
207
+ binarization_args = hparams['binarization_args']
208
+
209
+ @timeit
210
+ def get_pitch(wav, mel):
211
+ # get ground truth f0 by self.get_pitch_algorithm
212
+ global f0_dict
213
+ if use_crepe:
214
+ md5 = get_md5(wav)
215
+ if f"{md5}_gt" in f0_dict.keys():
216
+ print("load temp crepe f0")
217
+ gt_f0 = np.array(f0_dict[f"{md5}_gt"]["f0"])
218
+ coarse_f0 = np.array(f0_dict[f"{md5}_coarse"]["f0"])
219
+ else:
220
+ torch.cuda.is_available() and torch.cuda.empty_cache()
221
+ gt_f0, coarse_f0 = get_pitch_crepe(wav, mel, hparams, thre)
222
+ f0_dict[f"{md5}_gt"] = {"f0": gt_f0.tolist(), "time": int(time.time())}
223
+ f0_dict[f"{md5}_coarse"] = {"f0": coarse_f0.tolist(), "time": int(time.time())}
224
+ write_temp("./infer_tools/f0_temp.json", f0_dict)
225
+ else:
226
+ md5 = get_md5(wav)
227
+ if f"{md5}_gt_harvest" in f0_dict.keys():
228
+ print("load temp harvest f0")
229
+ gt_f0 = np.array(f0_dict[f"{md5}_gt_harvest"]["f0"])
230
+ coarse_f0 = np.array(f0_dict[f"{md5}_coarse_harvest"]["f0"])
231
+ else:
232
+ gt_f0, coarse_f0 = get_pitch_world(wav, mel, hparams)
233
+ f0_dict[f"{md5}_gt_harvest"] = {"f0": gt_f0.tolist(), "time": int(time.time())}
234
+ f0_dict[f"{md5}_coarse_harvest"] = {"f0": coarse_f0.tolist(), "time": int(time.time())}
235
+ write_temp("./infer_tools/f0_temp.json", f0_dict)
236
+ processed_input['f0'] = gt_f0
237
+ processed_input['pitch'] = coarse_f0
238
+
239
+ def get_align(mel, phone_encoded):
240
+ mel2ph = np.zeros([mel.shape[0]], int)
241
+ start_frame = 0
242
+ ph_durs = mel.shape[0] / phone_encoded.shape[0]
243
+ if hparams['debug']:
244
+ print(mel.shape, phone_encoded.shape, mel.shape[0] / phone_encoded.shape[0])
245
+ for i_ph in range(phone_encoded.shape[0]):
246
+ end_frame = int(i_ph * ph_durs + ph_durs + 0.5)
247
+ mel2ph[start_frame:end_frame + 1] = i_ph + 1
248
+ start_frame = end_frame + 1
249
+
250
+ processed_input['mel2ph'] = mel2ph
251
+
252
+ if hparams['vocoder'] in VOCODERS:
253
+ wav, mel = VOCODERS[hparams['vocoder']].wav2spec(temp_dict['wav_fn'])
254
+ else:
255
+ wav, mel = VOCODERS[hparams['vocoder'].split('.')[-1]].wav2spec(temp_dict['wav_fn'])
256
+ processed_input = {
257
+ 'item_name': item_name, 'mel': mel,
258
+ 'sec': len(wav) / hparams['audio_sample_rate'], 'len': mel.shape[0]
259
+ }
260
+ processed_input = {**temp_dict, **processed_input} # merge two dicts
261
+
262
+ if binarization_args['with_f0']:
263
+ get_pitch(wav, mel)
264
+ if binarization_args['with_hubert']:
265
+ st = time.time()
266
+ hubert_encoded = processed_input['hubert'] = self.hubert.encode(temp_dict['wav_fn'])
267
+ et = time.time()
268
+ dev = 'cuda' if hparams['hubert_gpu'] and torch.cuda.is_available() else 'cpu'
269
+ print(f'hubert (on {dev}) time used {et - st}')
270
+
271
+ if binarization_args['with_align']:
272
+ get_align(mel, hubert_encoded)
273
+ return processed_input
274
+
275
+ def pre(self, wav_fn, accelerate, use_crepe=True, thre=0.05):
276
+ if isinstance(wav_fn, BytesIO):
277
+ item_name = self.project_name
278
+ else:
279
+ song_info = wav_fn.split('/')
280
+ item_name = song_info[-1].split('.')[-2]
281
+ temp_dict = {'wav_fn': wav_fn, 'spk_id': self.project_name}
282
+
283
+ temp_dict = self.temporary_dict2processed_input(item_name, temp_dict, use_crepe, thre)
284
+ hparams['pndm_speedup'] = accelerate
285
+ batch = processed_input2batch([getitem(temp_dict)])
286
+ return batch
287
+
288
+
289
+ def getitem(item):
290
+ max_frames = hparams['max_frames']
291
+ spec = torch.Tensor(item['mel'])[:max_frames]
292
+ energy = (spec.exp() ** 2).sum(-1).sqrt()
293
+ mel2ph = torch.LongTensor(item['mel2ph'])[:max_frames] if 'mel2ph' in item else None
294
+ f0, uv = norm_interp_f0(item["f0"][:max_frames], hparams)
295
+ hubert = torch.Tensor(item['hubert'][:hparams['max_input_tokens']])
296
+ pitch = torch.LongTensor(item.get("pitch"))[:max_frames]
297
+ sample = {
298
+ "item_name": item['item_name'],
299
+ "hubert": hubert,
300
+ "mel": spec,
301
+ "pitch": pitch,
302
+ "energy": energy,
303
+ "f0": f0,
304
+ "uv": uv,
305
+ "mel2ph": mel2ph,
306
+ "mel_nonpadding": spec.abs().sum(-1) > 0,
307
+ }
308
+ return sample
309
+
310
+
311
+ def processed_input2batch(samples):
312
+ '''
313
+ Args:
314
+ samples: one batch of processed_input
315
+ NOTE:
316
+ the batch size is controlled by hparams['max_sentences']
317
+ '''
318
+ if len(samples) == 0:
319
+ return {}
320
+ item_names = [s['item_name'] for s in samples]
321
+ hubert = utils.collate_2d([s['hubert'] for s in samples], 0.0)
322
+ f0 = utils.collate_1d([s['f0'] for s in samples], 0.0)
323
+ pitch = utils.collate_1d([s['pitch'] for s in samples])
324
+ uv = utils.collate_1d([s['uv'] for s in samples])
325
+ energy = utils.collate_1d([s['energy'] for s in samples], 0.0)
326
+ mel2ph = utils.collate_1d([s['mel2ph'] for s in samples], 0.0) \
327
+ if samples[0]['mel2ph'] is not None else None
328
+ mels = utils.collate_2d([s['mel'] for s in samples], 0.0)
329
+ mel_lengths = torch.LongTensor([s['mel'].shape[0] for s in samples])
330
+
331
+ batch = {
332
+ 'item_name': item_names,
333
+ 'nsamples': len(samples),
334
+ 'hubert': hubert,
335
+ 'mels': mels,
336
+ 'mel_lengths': mel_lengths,
337
+ 'mel2ph': mel2ph,
338
+ 'energy': energy,
339
+ 'pitch': pitch,
340
+ 'f0': f0,
341
+ 'uv': uv,
342
+ }
343
+ return batch
infer_tools/slicer.py ADDED
@@ -0,0 +1,158 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import time
2
+
3
+ import numpy as np
4
+ import torch
5
+ import torchaudio
6
+ from scipy.ndimage import maximum_filter1d, uniform_filter1d
7
+
8
+
9
+ def timeit(func):
10
+ def run(*args, **kwargs):
11
+ t = time.time()
12
+ res = func(*args, **kwargs)
13
+ print('executing \'%s\' costed %.3fs' % (func.__name__, time.time() - t))
14
+ return res
15
+
16
+ return run
17
+
18
+
19
+ # @timeit
20
+ def _window_maximum(arr, win_sz):
21
+ return maximum_filter1d(arr, size=win_sz)[win_sz // 2: win_sz // 2 + arr.shape[0] - win_sz + 1]
22
+
23
+
24
+ # @timeit
25
+ def _window_rms(arr, win_sz):
26
+ filtered = np.sqrt(uniform_filter1d(np.power(arr, 2), win_sz) - np.power(uniform_filter1d(arr, win_sz), 2))
27
+ return filtered[win_sz // 2: win_sz // 2 + arr.shape[0] - win_sz + 1]
28
+
29
+
30
+ def level2db(levels, eps=1e-12):
31
+ return 20 * np.log10(np.clip(levels, a_min=eps, a_max=1))
32
+
33
+
34
+ def _apply_slice(audio, begin, end):
35
+ if len(audio.shape) > 1:
36
+ return audio[:, begin: end]
37
+ else:
38
+ return audio[begin: end]
39
+
40
+
41
+ class Slicer:
42
+ def __init__(self,
43
+ sr: int,
44
+ db_threshold: float = -40,
45
+ min_length: int = 5000,
46
+ win_l: int = 300,
47
+ win_s: int = 20,
48
+ max_silence_kept: int = 500):
49
+ self.db_threshold = db_threshold
50
+ self.min_samples = round(sr * min_length / 1000)
51
+ self.win_ln = round(sr * win_l / 1000)
52
+ self.win_sn = round(sr * win_s / 1000)
53
+ self.max_silence = round(sr * max_silence_kept / 1000)
54
+ if not self.min_samples >= self.win_ln >= self.win_sn:
55
+ raise ValueError('The following condition must be satisfied: min_length >= win_l >= win_s')
56
+ if not self.max_silence >= self.win_sn:
57
+ raise ValueError('The following condition must be satisfied: max_silence_kept >= win_s')
58
+
59
+ @timeit
60
+ def slice(self, audio):
61
+ samples = audio
62
+ if samples.shape[0] <= self.min_samples:
63
+ return {"0": {"slice": False, "split_time": f"0,{len(audio)}"}}
64
+ # get absolute amplitudes
65
+ abs_amp = np.abs(samples - np.mean(samples))
66
+ # calculate local maximum with large window
67
+ win_max_db = level2db(_window_maximum(abs_amp, win_sz=self.win_ln))
68
+ sil_tags = []
69
+ left = right = 0
70
+ while right < win_max_db.shape[0]:
71
+ if win_max_db[right] < self.db_threshold:
72
+ right += 1
73
+ elif left == right:
74
+ left += 1
75
+ right += 1
76
+ else:
77
+ if left == 0:
78
+ split_loc_l = left
79
+ else:
80
+ sil_left_n = min(self.max_silence, (right + self.win_ln - left) // 2)
81
+ rms_db_left = level2db(_window_rms(samples[left: left + sil_left_n], win_sz=self.win_sn))
82
+ split_win_l = left + np.argmin(rms_db_left)
83
+ split_loc_l = split_win_l + np.argmin(abs_amp[split_win_l: split_win_l + self.win_sn])
84
+ if len(sil_tags) != 0 and split_loc_l - sil_tags[-1][1] < self.min_samples and right < win_max_db.shape[
85
+ 0] - 1:
86
+ right += 1
87
+ left = right
88
+ continue
89
+ if right == win_max_db.shape[0] - 1:
90
+ split_loc_r = right + self.win_ln
91
+ else:
92
+ sil_right_n = min(self.max_silence, (right + self.win_ln - left) // 2)
93
+ rms_db_right = level2db(_window_rms(samples[right + self.win_ln - sil_right_n: right + self.win_ln],
94
+ win_sz=self.win_sn))
95
+ split_win_r = right + self.win_ln - sil_right_n + np.argmin(rms_db_right)
96
+ split_loc_r = split_win_r + np.argmin(abs_amp[split_win_r: split_win_r + self.win_sn])
97
+ sil_tags.append((split_loc_l, split_loc_r))
98
+ right += 1
99
+ left = right
100
+ if left != right:
101
+ sil_left_n = min(self.max_silence, (right + self.win_ln - left) // 2)
102
+ rms_db_left = level2db(_window_rms(samples[left: left + sil_left_n], win_sz=self.win_sn))
103
+ split_win_l = left + np.argmin(rms_db_left)
104
+ split_loc_l = split_win_l + np.argmin(abs_amp[split_win_l: split_win_l + self.win_sn])
105
+ sil_tags.append((split_loc_l, samples.shape[0]))
106
+ if len(sil_tags) == 0:
107
+ return {"0": {"slice": False, "split_time": f"0,{len(audio)}"}}
108
+ else:
109
+ chunks = []
110
+ # 第一段静音并非从头开始,补上有声片段
111
+ if sil_tags[0][0]:
112
+ chunks.append({"slice": False, "split_time": f"0,{sil_tags[0][0]}"})
113
+ for i in range(0, len(sil_tags)):
114
+ # 标识有声片段(跳过第一段)
115
+ if i:
116
+ chunks.append({"slice": False, "split_time": f"{sil_tags[i - 1][1]},{sil_tags[i][0]}"})
117
+ # 标识所有静音片段
118
+ chunks.append({"slice": True, "split_time": f"{sil_tags[i][0]},{sil_tags[i][1]}"})
119
+ # 最后一段静音并非结尾,补上结尾片段
120
+ if sil_tags[-1][1] != len(audio):
121
+ chunks.append({"slice": False, "split_time": f"{sil_tags[-1][1]},{len(audio)}"})
122
+ chunk_dict = {}
123
+ for i in range(len(chunks)):
124
+ chunk_dict[str(i)] = chunks[i]
125
+ return chunk_dict
126
+
127
+
128
+ def cut(audio_path, db_thresh=-30, min_len=5000, win_l=300, win_s=20, max_sil_kept=500):
129
+ audio, sr = torchaudio.load(audio_path)
130
+ if len(audio.shape) == 2 and audio.shape[1] >= 2:
131
+ audio = torch.mean(audio, dim=0).unsqueeze(0)
132
+ audio = audio.cpu().numpy()[0]
133
+
134
+ slicer = Slicer(
135
+ sr=sr,
136
+ db_threshold=db_thresh,
137
+ min_length=min_len,
138
+ win_l=win_l,
139
+ win_s=win_s,
140
+ max_silence_kept=max_sil_kept
141
+ )
142
+ chunks = slicer.slice(audio)
143
+ return chunks
144
+
145
+
146
+ def chunks2audio(audio_path, chunks):
147
+ chunks = dict(chunks)
148
+ audio, sr = torchaudio.load(audio_path)
149
+ if len(audio.shape) == 2 and audio.shape[1] >= 2:
150
+ audio = torch.mean(audio, dim=0).unsqueeze(0)
151
+ audio = audio.cpu().numpy()[0]
152
+ result = []
153
+ for k, v in chunks.items():
154
+ tag = v["split_time"].split(",")
155
+ result.append((v["slice"], audio[int(tag[0]):int(tag[1])]))
156
+ return result, sr
157
+
158
+
inference.ipynb ADDED
The diff for this file is too large to render. See raw diff
 
modules/commons/__pycache__/common_layers.cpython-38.pyc ADDED
Binary file (18.8 kB). View file
 
modules/commons/__pycache__/espnet_positional_embedding.cpython-38.pyc ADDED
Binary file (4.44 kB). View file
 
modules/commons/__pycache__/ssim.cpython-38.pyc ADDED
Binary file (2.69 kB). View file
 
modules/commons/common_layers.py ADDED
@@ -0,0 +1,671 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+ import torch
3
+ from torch import nn
4
+ from torch.nn import Parameter
5
+ import torch.onnx.operators
6
+ import torch.nn.functional as F
7
+ import utils
8
+
9
+
10
+ class Reshape(nn.Module):
11
+ def __init__(self, *args):
12
+ super(Reshape, self).__init__()
13
+ self.shape = args
14
+
15
+ def forward(self, x):
16
+ return x.view(self.shape)
17
+
18
+
19
+ class Permute(nn.Module):
20
+ def __init__(self, *args):
21
+ super(Permute, self).__init__()
22
+ self.args = args
23
+
24
+ def forward(self, x):
25
+ return x.permute(self.args)
26
+
27
+
28
+ class LinearNorm(torch.nn.Module):
29
+ def __init__(self, in_dim, out_dim, bias=True, w_init_gain='linear'):
30
+ super(LinearNorm, self).__init__()
31
+ self.linear_layer = torch.nn.Linear(in_dim, out_dim, bias=bias)
32
+
33
+ torch.nn.init.xavier_uniform_(
34
+ self.linear_layer.weight,
35
+ gain=torch.nn.init.calculate_gain(w_init_gain))
36
+
37
+ def forward(self, x):
38
+ return self.linear_layer(x)
39
+
40
+
41
+ class ConvNorm(torch.nn.Module):
42
+ def __init__(self, in_channels, out_channels, kernel_size=1, stride=1,
43
+ padding=None, dilation=1, bias=True, w_init_gain='linear'):
44
+ super(ConvNorm, self).__init__()
45
+ if padding is None:
46
+ assert (kernel_size % 2 == 1)
47
+ padding = int(dilation * (kernel_size - 1) / 2)
48
+
49
+ self.conv = torch.nn.Conv1d(in_channels, out_channels,
50
+ kernel_size=kernel_size, stride=stride,
51
+ padding=padding, dilation=dilation,
52
+ bias=bias)
53
+
54
+ torch.nn.init.xavier_uniform_(
55
+ self.conv.weight, gain=torch.nn.init.calculate_gain(w_init_gain))
56
+
57
+ def forward(self, signal):
58
+ conv_signal = self.conv(signal)
59
+ return conv_signal
60
+
61
+
62
+ def Embedding(num_embeddings, embedding_dim, padding_idx=None):
63
+ m = nn.Embedding(num_embeddings, embedding_dim, padding_idx=padding_idx)
64
+ nn.init.normal_(m.weight, mean=0, std=embedding_dim ** -0.5)
65
+ if padding_idx is not None:
66
+ nn.init.constant_(m.weight[padding_idx], 0)
67
+ return m
68
+
69
+
70
+ def LayerNorm(normalized_shape, eps=1e-5, elementwise_affine=True, export=False):
71
+ if not export and torch.cuda.is_available():
72
+ try:
73
+ from apex.normalization import FusedLayerNorm
74
+ return FusedLayerNorm(normalized_shape, eps, elementwise_affine)
75
+ except ImportError:
76
+ pass
77
+ return torch.nn.LayerNorm(normalized_shape, eps, elementwise_affine)
78
+
79
+
80
+ def Linear(in_features, out_features, bias=True):
81
+ m = nn.Linear(in_features, out_features, bias)
82
+ nn.init.xavier_uniform_(m.weight)
83
+ if bias:
84
+ nn.init.constant_(m.bias, 0.)
85
+ return m
86
+
87
+
88
+ class SinusoidalPositionalEmbedding(nn.Module):
89
+ """This module produces sinusoidal positional embeddings of any length.
90
+
91
+ Padding symbols are ignored.
92
+ """
93
+
94
+ def __init__(self, embedding_dim, padding_idx, init_size=1024):
95
+ super().__init__()
96
+ self.embedding_dim = embedding_dim
97
+ self.padding_idx = padding_idx
98
+ self.weights = SinusoidalPositionalEmbedding.get_embedding(
99
+ init_size,
100
+ embedding_dim,
101
+ padding_idx,
102
+ )
103
+ self.register_buffer('_float_tensor', torch.FloatTensor(1))
104
+
105
+ @staticmethod
106
+ def get_embedding(num_embeddings, embedding_dim, padding_idx=None):
107
+ """Build sinusoidal embeddings.
108
+
109
+ This matches the implementation in tensor2tensor, but differs slightly
110
+ from the description in Section 3.5 of "Attention Is All You Need".
111
+ """
112
+ half_dim = embedding_dim // 2
113
+ emb = math.log(10000) / (half_dim - 1)
114
+ emb = torch.exp(torch.arange(half_dim, dtype=torch.float) * -emb)
115
+ emb = torch.arange(num_embeddings, dtype=torch.float).unsqueeze(1) * emb.unsqueeze(0)
116
+ emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1).view(num_embeddings, -1)
117
+ if embedding_dim % 2 == 1:
118
+ # zero pad
119
+ emb = torch.cat([emb, torch.zeros(num_embeddings, 1)], dim=1)
120
+ if padding_idx is not None:
121
+ emb[padding_idx, :] = 0
122
+ return emb
123
+
124
+ def forward(self, input, incremental_state=None, timestep=None, positions=None, **kwargs):
125
+ """Input is expected to be of size [bsz x seqlen]."""
126
+ bsz, seq_len = input.shape[:2]
127
+ max_pos = self.padding_idx + 1 + seq_len
128
+ if self.weights is None or max_pos > self.weights.size(0):
129
+ # recompute/expand embeddings if needed
130
+ self.weights = SinusoidalPositionalEmbedding.get_embedding(
131
+ max_pos,
132
+ self.embedding_dim,
133
+ self.padding_idx,
134
+ )
135
+ self.weights = self.weights.to(self._float_tensor)
136
+
137
+ if incremental_state is not None:
138
+ # positions is the same for every token when decoding a single step
139
+ pos = timestep.view(-1)[0] + 1 if timestep is not None else seq_len
140
+ return self.weights[self.padding_idx + pos, :].expand(bsz, 1, -1)
141
+
142
+ positions = utils.make_positions(input, self.padding_idx) if positions is None else positions
143
+ return self.weights.index_select(0, positions.view(-1)).view(bsz, seq_len, -1).detach()
144
+
145
+ def max_positions(self):
146
+ """Maximum number of supported positions."""
147
+ return int(1e5) # an arbitrary large number
148
+
149
+
150
+ class ConvTBC(nn.Module):
151
+ def __init__(self, in_channels, out_channels, kernel_size, padding=0):
152
+ super(ConvTBC, self).__init__()
153
+ self.in_channels = in_channels
154
+ self.out_channels = out_channels
155
+ self.kernel_size = kernel_size
156
+ self.padding = padding
157
+
158
+ self.weight = torch.nn.Parameter(torch.Tensor(
159
+ self.kernel_size, in_channels, out_channels))
160
+ self.bias = torch.nn.Parameter(torch.Tensor(out_channels))
161
+
162
+ def forward(self, input):
163
+ return torch.conv_tbc(input.contiguous(), self.weight, self.bias, self.padding)
164
+
165
+
166
+ class MultiheadAttention(nn.Module):
167
+ def __init__(self, embed_dim, num_heads, kdim=None, vdim=None, dropout=0., bias=True,
168
+ add_bias_kv=False, add_zero_attn=False, self_attention=False,
169
+ encoder_decoder_attention=False):
170
+ super().__init__()
171
+ self.embed_dim = embed_dim
172
+ self.kdim = kdim if kdim is not None else embed_dim
173
+ self.vdim = vdim if vdim is not None else embed_dim
174
+ self.qkv_same_dim = self.kdim == embed_dim and self.vdim == embed_dim
175
+
176
+ self.num_heads = num_heads
177
+ self.dropout = dropout
178
+ self.head_dim = embed_dim // num_heads
179
+ assert self.head_dim * num_heads == self.embed_dim, "embed_dim must be divisible by num_heads"
180
+ self.scaling = self.head_dim ** -0.5
181
+
182
+ self.self_attention = self_attention
183
+ self.encoder_decoder_attention = encoder_decoder_attention
184
+
185
+ assert not self.self_attention or self.qkv_same_dim, 'Self-attention requires query, key and ' \
186
+ 'value to be of the same size'
187
+
188
+ if self.qkv_same_dim:
189
+ self.in_proj_weight = Parameter(torch.Tensor(3 * embed_dim, embed_dim))
190
+ else:
191
+ self.k_proj_weight = Parameter(torch.Tensor(embed_dim, self.kdim))
192
+ self.v_proj_weight = Parameter(torch.Tensor(embed_dim, self.vdim))
193
+ self.q_proj_weight = Parameter(torch.Tensor(embed_dim, embed_dim))
194
+
195
+ if bias:
196
+ self.in_proj_bias = Parameter(torch.Tensor(3 * embed_dim))
197
+ else:
198
+ self.register_parameter('in_proj_bias', None)
199
+
200
+ self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
201
+
202
+ if add_bias_kv:
203
+ self.bias_k = Parameter(torch.Tensor(1, 1, embed_dim))
204
+ self.bias_v = Parameter(torch.Tensor(1, 1, embed_dim))
205
+ else:
206
+ self.bias_k = self.bias_v = None
207
+
208
+ self.add_zero_attn = add_zero_attn
209
+
210
+ self.reset_parameters()
211
+
212
+ self.enable_torch_version = False
213
+ if hasattr(F, "multi_head_attention_forward"):
214
+ self.enable_torch_version = True
215
+ else:
216
+ self.enable_torch_version = False
217
+ self.last_attn_probs = None
218
+
219
+ def reset_parameters(self):
220
+ if self.qkv_same_dim:
221
+ nn.init.xavier_uniform_(self.in_proj_weight)
222
+ else:
223
+ nn.init.xavier_uniform_(self.k_proj_weight)
224
+ nn.init.xavier_uniform_(self.v_proj_weight)
225
+ nn.init.xavier_uniform_(self.q_proj_weight)
226
+
227
+ nn.init.xavier_uniform_(self.out_proj.weight)
228
+ if self.in_proj_bias is not None:
229
+ nn.init.constant_(self.in_proj_bias, 0.)
230
+ nn.init.constant_(self.out_proj.bias, 0.)
231
+ if self.bias_k is not None:
232
+ nn.init.xavier_normal_(self.bias_k)
233
+ if self.bias_v is not None:
234
+ nn.init.xavier_normal_(self.bias_v)
235
+
236
+ def forward(
237
+ self,
238
+ query, key, value,
239
+ key_padding_mask=None,
240
+ incremental_state=None,
241
+ need_weights=True,
242
+ static_kv=False,
243
+ attn_mask=None,
244
+ before_softmax=False,
245
+ need_head_weights=False,
246
+ enc_dec_attn_constraint_mask=None,
247
+ reset_attn_weight=None
248
+ ):
249
+ """Input shape: Time x Batch x Channel
250
+
251
+ Args:
252
+ key_padding_mask (ByteTensor, optional): mask to exclude
253
+ keys that are pads, of shape `(batch, src_len)`, where
254
+ padding elements are indicated by 1s.
255
+ need_weights (bool, optional): return the attention weights,
256
+ averaged over heads (default: False).
257
+ attn_mask (ByteTensor, optional): typically used to
258
+ implement causal attention, where the mask prevents the
259
+ attention from looking forward in time (default: None).
260
+ before_softmax (bool, optional): return the raw attention
261
+ weights and values before the attention softmax.
262
+ need_head_weights (bool, optional): return the attention
263
+ weights for each head. Implies *need_weights*. Default:
264
+ return the average attention weights over all heads.
265
+ """
266
+ if need_head_weights:
267
+ need_weights = True
268
+
269
+ tgt_len, bsz, embed_dim = query.size()
270
+ assert embed_dim == self.embed_dim
271
+ assert list(query.size()) == [tgt_len, bsz, embed_dim]
272
+
273
+ if self.enable_torch_version and incremental_state is None and not static_kv and reset_attn_weight is None:
274
+ if self.qkv_same_dim:
275
+ return F.multi_head_attention_forward(query, key, value,
276
+ self.embed_dim, self.num_heads,
277
+ self.in_proj_weight,
278
+ self.in_proj_bias, self.bias_k, self.bias_v,
279
+ self.add_zero_attn, self.dropout,
280
+ self.out_proj.weight, self.out_proj.bias,
281
+ self.training, key_padding_mask, need_weights,
282
+ attn_mask)
283
+ else:
284
+ return F.multi_head_attention_forward(query, key, value,
285
+ self.embed_dim, self.num_heads,
286
+ torch.empty([0]),
287
+ self.in_proj_bias, self.bias_k, self.bias_v,
288
+ self.add_zero_attn, self.dropout,
289
+ self.out_proj.weight, self.out_proj.bias,
290
+ self.training, key_padding_mask, need_weights,
291
+ attn_mask, use_separate_proj_weight=True,
292
+ q_proj_weight=self.q_proj_weight,
293
+ k_proj_weight=self.k_proj_weight,
294
+ v_proj_weight=self.v_proj_weight)
295
+
296
+ if incremental_state is not None:
297
+ print('Not implemented error.')
298
+ exit()
299
+ else:
300
+ saved_state = None
301
+
302
+ if self.self_attention:
303
+ # self-attention
304
+ q, k, v = self.in_proj_qkv(query)
305
+ elif self.encoder_decoder_attention:
306
+ # encoder-decoder attention
307
+ q = self.in_proj_q(query)
308
+ if key is None:
309
+ assert value is None
310
+ k = v = None
311
+ else:
312
+ k = self.in_proj_k(key)
313
+ v = self.in_proj_v(key)
314
+
315
+ else:
316
+ q = self.in_proj_q(query)
317
+ k = self.in_proj_k(key)
318
+ v = self.in_proj_v(value)
319
+ q *= self.scaling
320
+
321
+ if self.bias_k is not None:
322
+ assert self.bias_v is not None
323
+ k = torch.cat([k, self.bias_k.repeat(1, bsz, 1)])
324
+ v = torch.cat([v, self.bias_v.repeat(1, bsz, 1)])
325
+ if attn_mask is not None:
326
+ attn_mask = torch.cat([attn_mask, attn_mask.new_zeros(attn_mask.size(0), 1)], dim=1)
327
+ if key_padding_mask is not None:
328
+ key_padding_mask = torch.cat(
329
+ [key_padding_mask, key_padding_mask.new_zeros(key_padding_mask.size(0), 1)], dim=1)
330
+
331
+ q = q.contiguous().view(tgt_len, bsz * self.num_heads, self.head_dim).transpose(0, 1)
332
+ if k is not None:
333
+ k = k.contiguous().view(-1, bsz * self.num_heads, self.head_dim).transpose(0, 1)
334
+ if v is not None:
335
+ v = v.contiguous().view(-1, bsz * self.num_heads, self.head_dim).transpose(0, 1)
336
+
337
+ if saved_state is not None:
338
+ print('Not implemented error.')
339
+ exit()
340
+
341
+ src_len = k.size(1)
342
+
343
+ # This is part of a workaround to get around fork/join parallelism
344
+ # not supporting Optional types.
345
+ if key_padding_mask is not None and key_padding_mask.shape == torch.Size([]):
346
+ key_padding_mask = None
347
+
348
+ if key_padding_mask is not None:
349
+ assert key_padding_mask.size(0) == bsz
350
+ assert key_padding_mask.size(1) == src_len
351
+
352
+ if self.add_zero_attn:
353
+ src_len += 1
354
+ k = torch.cat([k, k.new_zeros((k.size(0), 1) + k.size()[2:])], dim=1)
355
+ v = torch.cat([v, v.new_zeros((v.size(0), 1) + v.size()[2:])], dim=1)
356
+ if attn_mask is not None:
357
+ attn_mask = torch.cat([attn_mask, attn_mask.new_zeros(attn_mask.size(0), 1)], dim=1)
358
+ if key_padding_mask is not None:
359
+ key_padding_mask = torch.cat(
360
+ [key_padding_mask, torch.zeros(key_padding_mask.size(0), 1).type_as(key_padding_mask)], dim=1)
361
+
362
+ attn_weights = torch.bmm(q, k.transpose(1, 2))
363
+ attn_weights = self.apply_sparse_mask(attn_weights, tgt_len, src_len, bsz)
364
+
365
+ assert list(attn_weights.size()) == [bsz * self.num_heads, tgt_len, src_len]
366
+
367
+ if attn_mask is not None:
368
+ if len(attn_mask.shape) == 2:
369
+ attn_mask = attn_mask.unsqueeze(0)
370
+ elif len(attn_mask.shape) == 3:
371
+ attn_mask = attn_mask[:, None].repeat([1, self.num_heads, 1, 1]).reshape(
372
+ bsz * self.num_heads, tgt_len, src_len)
373
+ attn_weights = attn_weights + attn_mask
374
+
375
+ if enc_dec_attn_constraint_mask is not None: # bs x head x L_kv
376
+ attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
377
+ attn_weights = attn_weights.masked_fill(
378
+ enc_dec_attn_constraint_mask.unsqueeze(2).bool(),
379
+ -1e9,
380
+ )
381
+ attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
382
+
383
+ if key_padding_mask is not None:
384
+ # don't attend to padding symbols
385
+ attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
386
+ attn_weights = attn_weights.masked_fill(
387
+ key_padding_mask.unsqueeze(1).unsqueeze(2),
388
+ -1e9,
389
+ )
390
+ attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
391
+
392
+ attn_logits = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
393
+
394
+ if before_softmax:
395
+ return attn_weights, v
396
+
397
+ attn_weights_float = utils.softmax(attn_weights, dim=-1)
398
+ attn_weights = attn_weights_float.type_as(attn_weights)
399
+ attn_probs = F.dropout(attn_weights_float.type_as(attn_weights), p=self.dropout, training=self.training)
400
+
401
+ if reset_attn_weight is not None:
402
+ if reset_attn_weight:
403
+ self.last_attn_probs = attn_probs.detach()
404
+ else:
405
+ assert self.last_attn_probs is not None
406
+ attn_probs = self.last_attn_probs
407
+ attn = torch.bmm(attn_probs, v)
408
+ assert list(attn.size()) == [bsz * self.num_heads, tgt_len, self.head_dim]
409
+ attn = attn.transpose(0, 1).contiguous().view(tgt_len, bsz, embed_dim)
410
+ attn = self.out_proj(attn)
411
+
412
+ if need_weights:
413
+ attn_weights = attn_weights_float.view(bsz, self.num_heads, tgt_len, src_len).transpose(1, 0)
414
+ if not need_head_weights:
415
+ # average attention weights over heads
416
+ attn_weights = attn_weights.mean(dim=0)
417
+ else:
418
+ attn_weights = None
419
+
420
+ return attn, (attn_weights, attn_logits)
421
+
422
+ def in_proj_qkv(self, query):
423
+ return self._in_proj(query).chunk(3, dim=-1)
424
+
425
+ def in_proj_q(self, query):
426
+ if self.qkv_same_dim:
427
+ return self._in_proj(query, end=self.embed_dim)
428
+ else:
429
+ bias = self.in_proj_bias
430
+ if bias is not None:
431
+ bias = bias[:self.embed_dim]
432
+ return F.linear(query, self.q_proj_weight, bias)
433
+
434
+ def in_proj_k(self, key):
435
+ if self.qkv_same_dim:
436
+ return self._in_proj(key, start=self.embed_dim, end=2 * self.embed_dim)
437
+ else:
438
+ weight = self.k_proj_weight
439
+ bias = self.in_proj_bias
440
+ if bias is not None:
441
+ bias = bias[self.embed_dim:2 * self.embed_dim]
442
+ return F.linear(key, weight, bias)
443
+
444
+ def in_proj_v(self, value):
445
+ if self.qkv_same_dim:
446
+ return self._in_proj(value, start=2 * self.embed_dim)
447
+ else:
448
+ weight = self.v_proj_weight
449
+ bias = self.in_proj_bias
450
+ if bias is not None:
451
+ bias = bias[2 * self.embed_dim:]
452
+ return F.linear(value, weight, bias)
453
+
454
+ def _in_proj(self, input, start=0, end=None):
455
+ weight = self.in_proj_weight
456
+ bias = self.in_proj_bias
457
+ weight = weight[start:end, :]
458
+ if bias is not None:
459
+ bias = bias[start:end]
460
+ return F.linear(input, weight, bias)
461
+
462
+
463
+ def apply_sparse_mask(self, attn_weights, tgt_len, src_len, bsz):
464
+ return attn_weights
465
+
466
+
467
+ class Swish(torch.autograd.Function):
468
+ @staticmethod
469
+ def forward(ctx, i):
470
+ result = i * torch.sigmoid(i)
471
+ ctx.save_for_backward(i)
472
+ return result
473
+
474
+ @staticmethod
475
+ def backward(ctx, grad_output):
476
+ i = ctx.saved_variables[0]
477
+ sigmoid_i = torch.sigmoid(i)
478
+ return grad_output * (sigmoid_i * (1 + i * (1 - sigmoid_i)))
479
+
480
+
481
+ class CustomSwish(nn.Module):
482
+ def forward(self, input_tensor):
483
+ return Swish.apply(input_tensor)
484
+
485
+ class Mish(nn.Module):
486
+ def forward(self, x):
487
+ return x * torch.tanh(F.softplus(x))
488
+
489
+ class TransformerFFNLayer(nn.Module):
490
+ def __init__(self, hidden_size, filter_size, padding="SAME", kernel_size=1, dropout=0., act='gelu'):
491
+ super().__init__()
492
+ self.kernel_size = kernel_size
493
+ self.dropout = dropout
494
+ self.act = act
495
+ if padding == 'SAME':
496
+ self.ffn_1 = nn.Conv1d(hidden_size, filter_size, kernel_size, padding=kernel_size // 2)
497
+ elif padding == 'LEFT':
498
+ self.ffn_1 = nn.Sequential(
499
+ nn.ConstantPad1d((kernel_size - 1, 0), 0.0),
500
+ nn.Conv1d(hidden_size, filter_size, kernel_size)
501
+ )
502
+ self.ffn_2 = Linear(filter_size, hidden_size)
503
+ if self.act == 'swish':
504
+ self.swish_fn = CustomSwish()
505
+
506
+ def forward(self, x, incremental_state=None):
507
+ # x: T x B x C
508
+ if incremental_state is not None:
509
+ assert incremental_state is None, 'Nar-generation does not allow this.'
510
+ exit(1)
511
+
512
+ x = self.ffn_1(x.permute(1, 2, 0)).permute(2, 0, 1)
513
+ x = x * self.kernel_size ** -0.5
514
+
515
+ if incremental_state is not None:
516
+ x = x[-1:]
517
+ if self.act == 'gelu':
518
+ x = F.gelu(x)
519
+ if self.act == 'relu':
520
+ x = F.relu(x)
521
+ if self.act == 'swish':
522
+ x = self.swish_fn(x)
523
+ x = F.dropout(x, self.dropout, training=self.training)
524
+ x = self.ffn_2(x)
525
+ return x
526
+
527
+
528
+ class BatchNorm1dTBC(nn.Module):
529
+ def __init__(self, c):
530
+ super(BatchNorm1dTBC, self).__init__()
531
+ self.bn = nn.BatchNorm1d(c)
532
+
533
+ def forward(self, x):
534
+ """
535
+
536
+ :param x: [T, B, C]
537
+ :return: [T, B, C]
538
+ """
539
+ x = x.permute(1, 2, 0) # [B, C, T]
540
+ x = self.bn(x) # [B, C, T]
541
+ x = x.permute(2, 0, 1) # [T, B, C]
542
+ return x
543
+
544
+
545
+ class EncSALayer(nn.Module):
546
+ def __init__(self, c, num_heads, dropout, attention_dropout=0.1,
547
+ relu_dropout=0.1, kernel_size=9, padding='SAME', norm='ln', act='gelu'):
548
+ super().__init__()
549
+ self.c = c
550
+ self.dropout = dropout
551
+ self.num_heads = num_heads
552
+ if num_heads > 0:
553
+ if norm == 'ln':
554
+ self.layer_norm1 = LayerNorm(c)
555
+ elif norm == 'bn':
556
+ self.layer_norm1 = BatchNorm1dTBC(c)
557
+ self.self_attn = MultiheadAttention(
558
+ self.c, num_heads, self_attention=True, dropout=attention_dropout, bias=False,
559
+ )
560
+ if norm == 'ln':
561
+ self.layer_norm2 = LayerNorm(c)
562
+ elif norm == 'bn':
563
+ self.layer_norm2 = BatchNorm1dTBC(c)
564
+ self.ffn = TransformerFFNLayer(
565
+ c, 4 * c, kernel_size=kernel_size, dropout=relu_dropout, padding=padding, act=act)
566
+
567
+ def forward(self, x, encoder_padding_mask=None, **kwargs):
568
+ layer_norm_training = kwargs.get('layer_norm_training', None)
569
+ if layer_norm_training is not None:
570
+ self.layer_norm1.training = layer_norm_training
571
+ self.layer_norm2.training = layer_norm_training
572
+ if self.num_heads > 0:
573
+ residual = x
574
+ x = self.layer_norm1(x)
575
+ x, _, = self.self_attn(
576
+ query=x,
577
+ key=x,
578
+ value=x,
579
+ key_padding_mask=encoder_padding_mask
580
+ )
581
+ x = F.dropout(x, self.dropout, training=self.training)
582
+ x = residual + x
583
+ x = x * (1 - encoder_padding_mask.float()).transpose(0, 1)[..., None]
584
+
585
+ residual = x
586
+ x = self.layer_norm2(x)
587
+ x = self.ffn(x)
588
+ x = F.dropout(x, self.dropout, training=self.training)
589
+ x = residual + x
590
+ x = x * (1 - encoder_padding_mask.float()).transpose(0, 1)[..., None]
591
+ return x
592
+
593
+
594
+ class DecSALayer(nn.Module):
595
+ def __init__(self, c, num_heads, dropout, attention_dropout=0.1, relu_dropout=0.1, kernel_size=9, act='gelu'):
596
+ super().__init__()
597
+ self.c = c
598
+ self.dropout = dropout
599
+ self.layer_norm1 = LayerNorm(c)
600
+ self.self_attn = MultiheadAttention(
601
+ c, num_heads, self_attention=True, dropout=attention_dropout, bias=False
602
+ )
603
+ self.layer_norm2 = LayerNorm(c)
604
+ self.encoder_attn = MultiheadAttention(
605
+ c, num_heads, encoder_decoder_attention=True, dropout=attention_dropout, bias=False,
606
+ )
607
+ self.layer_norm3 = LayerNorm(c)
608
+ self.ffn = TransformerFFNLayer(
609
+ c, 4 * c, padding='LEFT', kernel_size=kernel_size, dropout=relu_dropout, act=act)
610
+
611
+ def forward(
612
+ self,
613
+ x,
614
+ encoder_out=None,
615
+ encoder_padding_mask=None,
616
+ incremental_state=None,
617
+ self_attn_mask=None,
618
+ self_attn_padding_mask=None,
619
+ attn_out=None,
620
+ reset_attn_weight=None,
621
+ **kwargs,
622
+ ):
623
+ layer_norm_training = kwargs.get('layer_norm_training', None)
624
+ if layer_norm_training is not None:
625
+ self.layer_norm1.training = layer_norm_training
626
+ self.layer_norm2.training = layer_norm_training
627
+ self.layer_norm3.training = layer_norm_training
628
+ residual = x
629
+ x = self.layer_norm1(x)
630
+ x, _ = self.self_attn(
631
+ query=x,
632
+ key=x,
633
+ value=x,
634
+ key_padding_mask=self_attn_padding_mask,
635
+ incremental_state=incremental_state,
636
+ attn_mask=self_attn_mask
637
+ )
638
+ x = F.dropout(x, self.dropout, training=self.training)
639
+ x = residual + x
640
+
641
+ residual = x
642
+ x = self.layer_norm2(x)
643
+ if encoder_out is not None:
644
+ x, attn = self.encoder_attn(
645
+ query=x,
646
+ key=encoder_out,
647
+ value=encoder_out,
648
+ key_padding_mask=encoder_padding_mask,
649
+ incremental_state=incremental_state,
650
+ static_kv=True,
651
+ enc_dec_attn_constraint_mask=None, #utils.get_incremental_state(self, incremental_state, 'enc_dec_attn_constraint_mask'),
652
+ reset_attn_weight=reset_attn_weight
653
+ )
654
+ attn_logits = attn[1]
655
+ else:
656
+ assert attn_out is not None
657
+ x = self.encoder_attn.in_proj_v(attn_out.transpose(0, 1))
658
+ attn_logits = None
659
+ x = F.dropout(x, self.dropout, training=self.training)
660
+ x = residual + x
661
+
662
+ residual = x
663
+ x = self.layer_norm3(x)
664
+ x = self.ffn(x, incremental_state=incremental_state)
665
+ x = F.dropout(x, self.dropout, training=self.training)
666
+ x = residual + x
667
+ # if len(attn_logits.size()) > 3:
668
+ # indices = attn_logits.softmax(-1).max(-1).values.sum(-1).argmax(-1)
669
+ # attn_logits = attn_logits.gather(1,
670
+ # indices[:, None, None, None].repeat(1, 1, attn_logits.size(-2), attn_logits.size(-1))).squeeze(1)
671
+ return x, attn_logits
modules/commons/espnet_positional_embedding.py ADDED
@@ -0,0 +1,113 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+ import torch
3
+
4
+
5
+ class PositionalEncoding(torch.nn.Module):
6
+ """Positional encoding.
7
+ Args:
8
+ d_model (int): Embedding dimension.
9
+ dropout_rate (float): Dropout rate.
10
+ max_len (int): Maximum input length.
11
+ reverse (bool): Whether to reverse the input position.
12
+ """
13
+
14
+ def __init__(self, d_model, dropout_rate, max_len=5000, reverse=False):
15
+ """Construct an PositionalEncoding object."""
16
+ super(PositionalEncoding, self).__init__()
17
+ self.d_model = d_model
18
+ self.reverse = reverse
19
+ self.xscale = math.sqrt(self.d_model)
20
+ self.dropout = torch.nn.Dropout(p=dropout_rate)
21
+ self.pe = None
22
+ self.extend_pe(torch.tensor(0.0).expand(1, max_len))
23
+
24
+ def extend_pe(self, x):
25
+ """Reset the positional encodings."""
26
+ if self.pe is not None:
27
+ if self.pe.size(1) >= x.size(1):
28
+ if self.pe.dtype != x.dtype or self.pe.device != x.device:
29
+ self.pe = self.pe.to(dtype=x.dtype, device=x.device)
30
+ return
31
+ pe = torch.zeros(x.size(1), self.d_model)
32
+ if self.reverse:
33
+ position = torch.arange(
34
+ x.size(1) - 1, -1, -1.0, dtype=torch.float32
35
+ ).unsqueeze(1)
36
+ else:
37
+ position = torch.arange(0, x.size(1), dtype=torch.float32).unsqueeze(1)
38
+ div_term = torch.exp(
39
+ torch.arange(0, self.d_model, 2, dtype=torch.float32)
40
+ * -(math.log(10000.0) / self.d_model)
41
+ )
42
+ pe[:, 0::2] = torch.sin(position * div_term)
43
+ pe[:, 1::2] = torch.cos(position * div_term)
44
+ pe = pe.unsqueeze(0)
45
+ self.pe = pe.to(device=x.device, dtype=x.dtype)
46
+
47
+ def forward(self, x: torch.Tensor):
48
+ """Add positional encoding.
49
+ Args:
50
+ x (torch.Tensor): Input tensor (batch, time, `*`).
51
+ Returns:
52
+ torch.Tensor: Encoded tensor (batch, time, `*`).
53
+ """
54
+ self.extend_pe(x)
55
+ x = x * self.xscale + self.pe[:, : x.size(1)]
56
+ return self.dropout(x)
57
+
58
+
59
+ class ScaledPositionalEncoding(PositionalEncoding):
60
+ """Scaled positional encoding module.
61
+ See Sec. 3.2 https://arxiv.org/abs/1809.08895
62
+ Args:
63
+ d_model (int): Embedding dimension.
64
+ dropout_rate (float): Dropout rate.
65
+ max_len (int): Maximum input length.
66
+ """
67
+
68
+ def __init__(self, d_model, dropout_rate, max_len=5000):
69
+ """Initialize class."""
70
+ super().__init__(d_model=d_model, dropout_rate=dropout_rate, max_len=max_len)
71
+ self.alpha = torch.nn.Parameter(torch.tensor(1.0))
72
+
73
+ def reset_parameters(self):
74
+ """Reset parameters."""
75
+ self.alpha.data = torch.tensor(1.0)
76
+
77
+ def forward(self, x):
78
+ """Add positional encoding.
79
+ Args:
80
+ x (torch.Tensor): Input tensor (batch, time, `*`).
81
+ Returns:
82
+ torch.Tensor: Encoded tensor (batch, time, `*`).
83
+ """
84
+ self.extend_pe(x)
85
+ x = x + self.alpha * self.pe[:, : x.size(1)]
86
+ return self.dropout(x)
87
+
88
+
89
+ class RelPositionalEncoding(PositionalEncoding):
90
+ """Relative positional encoding module.
91
+ See : Appendix B in https://arxiv.org/abs/1901.02860
92
+ Args:
93
+ d_model (int): Embedding dimension.
94
+ dropout_rate (float): Dropout rate.
95
+ max_len (int): Maximum input length.
96
+ """
97
+
98
+ def __init__(self, d_model, dropout_rate, max_len=5000):
99
+ """Initialize class."""
100
+ super().__init__(d_model, dropout_rate, max_len, reverse=True)
101
+
102
+ def forward(self, x):
103
+ """Compute positional encoding.
104
+ Args:
105
+ x (torch.Tensor): Input tensor (batch, time, `*`).
106
+ Returns:
107
+ torch.Tensor: Encoded tensor (batch, time, `*`).
108
+ torch.Tensor: Positional embedding tensor (1, time, `*`).
109
+ """
110
+ self.extend_pe(x)
111
+ x = x * self.xscale
112
+ pos_emb = self.pe[:, : x.size(1)]
113
+ return self.dropout(x) + self.dropout(pos_emb)
modules/commons/ssim.py ADDED
@@ -0,0 +1,391 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # '''
2
+ # https://github.com/One-sixth/ms_ssim_pytorch/blob/master/ssim.py
3
+ # '''
4
+ #
5
+ # import torch
6
+ # import torch.jit
7
+ # import torch.nn.functional as F
8
+ #
9
+ #
10
+ # @torch.jit.script
11
+ # def create_window(window_size: int, sigma: float, channel: int):
12
+ # '''
13
+ # Create 1-D gauss kernel
14
+ # :param window_size: the size of gauss kernel
15
+ # :param sigma: sigma of normal distribution
16
+ # :param channel: input channel
17
+ # :return: 1D kernel
18
+ # '''
19
+ # coords = torch.arange(window_size, dtype=torch.float)
20
+ # coords -= window_size // 2
21
+ #
22
+ # g = torch.exp(-(coords ** 2) / (2 * sigma ** 2))
23
+ # g /= g.sum()
24
+ #
25
+ # g = g.reshape(1, 1, 1, -1).repeat(channel, 1, 1, 1)
26
+ # return g
27
+ #
28
+ #
29
+ # @torch.jit.script
30
+ # def _gaussian_filter(x, window_1d, use_padding: bool):
31
+ # '''
32
+ # Blur input with 1-D kernel
33
+ # :param x: batch of tensors to be blured
34
+ # :param window_1d: 1-D gauss kernel
35
+ # :param use_padding: padding image before conv
36
+ # :return: blured tensors
37
+ # '''
38
+ # C = x.shape[1]
39
+ # padding = 0
40
+ # if use_padding:
41
+ # window_size = window_1d.shape[3]
42
+ # padding = window_size // 2
43
+ # out = F.conv2d(x, window_1d, stride=1, padding=(0, padding), groups=C)
44
+ # out = F.conv2d(out, window_1d.transpose(2, 3), stride=1, padding=(padding, 0), groups=C)
45
+ # return out
46
+ #
47
+ #
48
+ # @torch.jit.script
49
+ # def ssim(X, Y, window, data_range: float, use_padding: bool = False):
50
+ # '''
51
+ # Calculate ssim index for X and Y
52
+ # :param X: images [B, C, H, N_bins]
53
+ # :param Y: images [B, C, H, N_bins]
54
+ # :param window: 1-D gauss kernel
55
+ # :param data_range: value range of input images. (usually 1.0 or 255)
56
+ # :param use_padding: padding image before conv
57
+ # :return:
58
+ # '''
59
+ #
60
+ # K1 = 0.01
61
+ # K2 = 0.03
62
+ # compensation = 1.0
63
+ #
64
+ # C1 = (K1 * data_range) ** 2
65
+ # C2 = (K2 * data_range) ** 2
66
+ #
67
+ # mu1 = _gaussian_filter(X, window, use_padding)
68
+ # mu2 = _gaussian_filter(Y, window, use_padding)
69
+ # sigma1_sq = _gaussian_filter(X * X, window, use_padding)
70
+ # sigma2_sq = _gaussian_filter(Y * Y, window, use_padding)
71
+ # sigma12 = _gaussian_filter(X * Y, window, use_padding)
72
+ #
73
+ # mu1_sq = mu1.pow(2)
74
+ # mu2_sq = mu2.pow(2)
75
+ # mu1_mu2 = mu1 * mu2
76
+ #
77
+ # sigma1_sq = compensation * (sigma1_sq - mu1_sq)
78
+ # sigma2_sq = compensation * (sigma2_sq - mu2_sq)
79
+ # sigma12 = compensation * (sigma12 - mu1_mu2)
80
+ #
81
+ # cs_map = (2 * sigma12 + C2) / (sigma1_sq + sigma2_sq + C2)
82
+ # # Fixed the issue that the negative value of cs_map caused ms_ssim to output Nan.
83
+ # cs_map = cs_map.clamp_min(0.)
84
+ # ssim_map = ((2 * mu1_mu2 + C1) / (mu1_sq + mu2_sq + C1)) * cs_map
85
+ #
86
+ # ssim_val = ssim_map.mean(dim=(1, 2, 3)) # reduce along CHW
87
+ # cs = cs_map.mean(dim=(1, 2, 3))
88
+ #
89
+ # return ssim_val, cs
90
+ #
91
+ #
92
+ # @torch.jit.script
93
+ # def ms_ssim(X, Y, window, data_range: float, weights, use_padding: bool = False, eps: float = 1e-8):
94
+ # '''
95
+ # interface of ms-ssim
96
+ # :param X: a batch of images, (N,C,H,W)
97
+ # :param Y: a batch of images, (N,C,H,W)
98
+ # :param window: 1-D gauss kernel
99
+ # :param data_range: value range of input images. (usually 1.0 or 255)
100
+ # :param weights: weights for different levels
101
+ # :param use_padding: padding image before conv
102
+ # :param eps: use for avoid grad nan.
103
+ # :return:
104
+ # '''
105
+ # levels = weights.shape[0]
106
+ # cs_vals = []
107
+ # ssim_vals = []
108
+ # for _ in range(levels):
109
+ # ssim_val, cs = ssim(X, Y, window=window, data_range=data_range, use_padding=use_padding)
110
+ # # Use for fix a issue. When c = a ** b and a is 0, c.backward() will cause the a.grad become inf.
111
+ # ssim_val = ssim_val.clamp_min(eps)
112
+ # cs = cs.clamp_min(eps)
113
+ # cs_vals.append(cs)
114
+ #
115
+ # ssim_vals.append(ssim_val)
116
+ # padding = (X.shape[2] % 2, X.shape[3] % 2)
117
+ # X = F.avg_pool2d(X, kernel_size=2, stride=2, padding=padding)
118
+ # Y = F.avg_pool2d(Y, kernel_size=2, stride=2, padding=padding)
119
+ #
120
+ # cs_vals = torch.stack(cs_vals, dim=0)
121
+ # ms_ssim_val = torch.prod((cs_vals[:-1] ** weights[:-1].unsqueeze(1)) * (ssim_vals[-1] ** weights[-1]), dim=0)
122
+ # return ms_ssim_val
123
+ #
124
+ #
125
+ # class SSIM(torch.jit.ScriptModule):
126
+ # __constants__ = ['data_range', 'use_padding']
127
+ #
128
+ # def __init__(self, window_size=11, window_sigma=1.5, data_range=255., channel=3, use_padding=False):
129
+ # '''
130
+ # :param window_size: the size of gauss kernel
131
+ # :param window_sigma: sigma of normal distribution
132
+ # :param data_range: value range of input images. (usually 1.0 or 255)
133
+ # :param channel: input channels (default: 3)
134
+ # :param use_padding: padding image before conv
135
+ # '''
136
+ # super().__init__()
137
+ # assert window_size % 2 == 1, 'Window size must be odd.'
138
+ # window = create_window(window_size, window_sigma, channel)
139
+ # self.register_buffer('window', window)
140
+ # self.data_range = data_range
141
+ # self.use_padding = use_padding
142
+ #
143
+ # @torch.jit.script_method
144
+ # def forward(self, X, Y):
145
+ # r = ssim(X, Y, window=self.window, data_range=self.data_range, use_padding=self.use_padding)
146
+ # return r[0]
147
+ #
148
+ #
149
+ # class MS_SSIM(torch.jit.ScriptModule):
150
+ # __constants__ = ['data_range', 'use_padding', 'eps']
151
+ #
152
+ # def __init__(self, window_size=11, window_sigma=1.5, data_range=255., channel=3, use_padding=False, weights=None,
153
+ # levels=None, eps=1e-8):
154
+ # '''
155
+ # class for ms-ssim
156
+ # :param window_size: the size of gauss kernel
157
+ # :param window_sigma: sigma of normal distribution
158
+ # :param data_range: value range of input images. (usually 1.0 or 255)
159
+ # :param channel: input channels
160
+ # :param use_padding: padding image before conv
161
+ # :param weights: weights for different levels. (default [0.0448, 0.2856, 0.3001, 0.2363, 0.1333])
162
+ # :param levels: number of downsampling
163
+ # :param eps: Use for fix a issue. When c = a ** b and a is 0, c.backward() will cause the a.grad become inf.
164
+ # '''
165
+ # super().__init__()
166
+ # assert window_size % 2 == 1, 'Window size must be odd.'
167
+ # self.data_range = data_range
168
+ # self.use_padding = use_padding
169
+ # self.eps = eps
170
+ #
171
+ # window = create_window(window_size, window_sigma, channel)
172
+ # self.register_buffer('window', window)
173
+ #
174
+ # if weights is None:
175
+ # weights = [0.0448, 0.2856, 0.3001, 0.2363, 0.1333]
176
+ # weights = torch.tensor(weights, dtype=torch.float)
177
+ #
178
+ # if levels is not None:
179
+ # weights = weights[:levels]
180
+ # weights = weights / weights.sum()
181
+ #
182
+ # self.register_buffer('weights', weights)
183
+ #
184
+ # @torch.jit.script_method
185
+ # def forward(self, X, Y):
186
+ # return ms_ssim(X, Y, window=self.window, data_range=self.data_range, weights=self.weights,
187
+ # use_padding=self.use_padding, eps=self.eps)
188
+ #
189
+ #
190
+ # if __name__ == '__main__':
191
+ # print('Simple Test')
192
+ # im = torch.randint(0, 255, (5, 3, 256, 256), dtype=torch.float, device='cuda')
193
+ # img1 = im / 255
194
+ # img2 = img1 * 0.5
195
+ #
196
+ # losser = SSIM(data_range=1.).cuda()
197
+ # loss = losser(img1, img2).mean()
198
+ #
199
+ # losser2 = MS_SSIM(data_range=1.).cuda()
200
+ # loss2 = losser2(img1, img2).mean()
201
+ #
202
+ # print(loss.item())
203
+ # print(loss2.item())
204
+ #
205
+ # if __name__ == '__main__':
206
+ # print('Training Test')
207
+ # import cv2
208
+ # import torch.optim
209
+ # import numpy as np
210
+ # import imageio
211
+ # import time
212
+ #
213
+ # out_test_video = False
214
+ # # 最好不要直接输出gif图,会非常大,最好先输出mkv文件后用ffmpeg转换到GIF
215
+ # video_use_gif = False
216
+ #
217
+ # im = cv2.imread('test_img1.jpg', 1)
218
+ # t_im = torch.from_numpy(im).cuda().permute(2, 0, 1).float()[None] / 255.
219
+ #
220
+ # if out_test_video:
221
+ # if video_use_gif:
222
+ # fps = 0.5
223
+ # out_wh = (im.shape[1] // 2, im.shape[0] // 2)
224
+ # suffix = '.gif'
225
+ # else:
226
+ # fps = 5
227
+ # out_wh = (im.shape[1], im.shape[0])
228
+ # suffix = '.mkv'
229
+ # video_last_time = time.perf_counter()
230
+ # video = imageio.get_writer('ssim_test' + suffix, fps=fps)
231
+ #
232
+ # # 测试ssim
233
+ # print('Training SSIM')
234
+ # rand_im = torch.randint_like(t_im, 0, 255, dtype=torch.float32) / 255.
235
+ # rand_im.requires_grad = True
236
+ # optim = torch.optim.Adam([rand_im], 0.003, eps=1e-8)
237
+ # losser = SSIM(data_range=1., channel=t_im.shape[1]).cuda()
238
+ # ssim_score = 0
239
+ # while ssim_score < 0.999:
240
+ # optim.zero_grad()
241
+ # loss = losser(rand_im, t_im)
242
+ # (-loss).sum().backward()
243
+ # ssim_score = loss.item()
244
+ # optim.step()
245
+ # r_im = np.transpose(rand_im.detach().cpu().numpy().clip(0, 1) * 255, [0, 2, 3, 1]).astype(np.uint8)[0]
246
+ # r_im = cv2.putText(r_im, 'ssim %f' % ssim_score, (10, 30), cv2.FONT_HERSHEY_PLAIN, 2, (255, 0, 0), 2)
247
+ #
248
+ # if out_test_video:
249
+ # if time.perf_counter() - video_last_time > 1. / fps:
250
+ # video_last_time = time.perf_counter()
251
+ # out_frame = cv2.cvtColor(r_im, cv2.COLOR_BGR2RGB)
252
+ # out_frame = cv2.resize(out_frame, out_wh, interpolation=cv2.INTER_AREA)
253
+ # if isinstance(out_frame, cv2.UMat):
254
+ # out_frame = out_frame.get()
255
+ # video.append_data(out_frame)
256
+ #
257
+ # cv2.imshow('ssim', r_im)
258
+ # cv2.setWindowTitle('ssim', 'ssim %f' % ssim_score)
259
+ # cv2.waitKey(1)
260
+ #
261
+ # if out_test_video:
262
+ # video.close()
263
+ #
264
+ # # 测试ms_ssim
265
+ # if out_test_video:
266
+ # if video_use_gif:
267
+ # fps = 0.5
268
+ # out_wh = (im.shape[1] // 2, im.shape[0] // 2)
269
+ # suffix = '.gif'
270
+ # else:
271
+ # fps = 5
272
+ # out_wh = (im.shape[1], im.shape[0])
273
+ # suffix = '.mkv'
274
+ # video_last_time = time.perf_counter()
275
+ # video = imageio.get_writer('ms_ssim_test' + suffix, fps=fps)
276
+ #
277
+ # print('Training MS_SSIM')
278
+ # rand_im = torch.randint_like(t_im, 0, 255, dtype=torch.float32) / 255.
279
+ # rand_im.requires_grad = True
280
+ # optim = torch.optim.Adam([rand_im], 0.003, eps=1e-8)
281
+ # losser = MS_SSIM(data_range=1., channel=t_im.shape[1]).cuda()
282
+ # ssim_score = 0
283
+ # while ssim_score < 0.999:
284
+ # optim.zero_grad()
285
+ # loss = losser(rand_im, t_im)
286
+ # (-loss).sum().backward()
287
+ # ssim_score = loss.item()
288
+ # optim.step()
289
+ # r_im = np.transpose(rand_im.detach().cpu().numpy().clip(0, 1) * 255, [0, 2, 3, 1]).astype(np.uint8)[0]
290
+ # r_im = cv2.putText(r_im, 'ms_ssim %f' % ssim_score, (10, 30), cv2.FONT_HERSHEY_PLAIN, 2, (255, 0, 0), 2)
291
+ #
292
+ # if out_test_video:
293
+ # if time.perf_counter() - video_last_time > 1. / fps:
294
+ # video_last_time = time.perf_counter()
295
+ # out_frame = cv2.cvtColor(r_im, cv2.COLOR_BGR2RGB)
296
+ # out_frame = cv2.resize(out_frame, out_wh, interpolation=cv2.INTER_AREA)
297
+ # if isinstance(out_frame, cv2.UMat):
298
+ # out_frame = out_frame.get()
299
+ # video.append_data(out_frame)
300
+ #
301
+ # cv2.imshow('ms_ssim', r_im)
302
+ # cv2.setWindowTitle('ms_ssim', 'ms_ssim %f' % ssim_score)
303
+ # cv2.waitKey(1)
304
+ #
305
+ # if out_test_video:
306
+ # video.close()
307
+
308
+ """
309
+ Adapted from https://github.com/Po-Hsun-Su/pytorch-ssim
310
+ """
311
+
312
+ import torch
313
+ import torch.nn.functional as F
314
+ from torch.autograd import Variable
315
+ import numpy as np
316
+ from math import exp
317
+
318
+
319
+ def gaussian(window_size, sigma):
320
+ gauss = torch.Tensor([exp(-(x - window_size // 2) ** 2 / float(2 * sigma ** 2)) for x in range(window_size)])
321
+ return gauss / gauss.sum()
322
+
323
+
324
+ def create_window(window_size, channel):
325
+ _1D_window = gaussian(window_size, 1.5).unsqueeze(1)
326
+ _2D_window = _1D_window.mm(_1D_window.t()).float().unsqueeze(0).unsqueeze(0)
327
+ window = Variable(_2D_window.expand(channel, 1, window_size, window_size).contiguous())
328
+ return window
329
+
330
+
331
+ def _ssim(img1, img2, window, window_size, channel, size_average=True):
332
+ mu1 = F.conv2d(img1, window, padding=window_size // 2, groups=channel)
333
+ mu2 = F.conv2d(img2, window, padding=window_size // 2, groups=channel)
334
+
335
+ mu1_sq = mu1.pow(2)
336
+ mu2_sq = mu2.pow(2)
337
+ mu1_mu2 = mu1 * mu2
338
+
339
+ sigma1_sq = F.conv2d(img1 * img1, window, padding=window_size // 2, groups=channel) - mu1_sq
340
+ sigma2_sq = F.conv2d(img2 * img2, window, padding=window_size // 2, groups=channel) - mu2_sq
341
+ sigma12 = F.conv2d(img1 * img2, window, padding=window_size // 2, groups=channel) - mu1_mu2
342
+
343
+ C1 = 0.01 ** 2
344
+ C2 = 0.03 ** 2
345
+
346
+ ssim_map = ((2 * mu1_mu2 + C1) * (2 * sigma12 + C2)) / ((mu1_sq + mu2_sq + C1) * (sigma1_sq + sigma2_sq + C2))
347
+
348
+ if size_average:
349
+ return ssim_map.mean()
350
+ else:
351
+ return ssim_map.mean(1)
352
+
353
+
354
+ class SSIM(torch.nn.Module):
355
+ def __init__(self, window_size=11, size_average=True):
356
+ super(SSIM, self).__init__()
357
+ self.window_size = window_size
358
+ self.size_average = size_average
359
+ self.channel = 1
360
+ self.window = create_window(window_size, self.channel)
361
+
362
+ def forward(self, img1, img2):
363
+ (_, channel, _, _) = img1.size()
364
+
365
+ if channel == self.channel and self.window.data.type() == img1.data.type():
366
+ window = self.window
367
+ else:
368
+ window = create_window(self.window_size, channel)
369
+
370
+ if img1.is_cuda:
371
+ window = window.cuda(img1.get_device())
372
+ window = window.type_as(img1)
373
+
374
+ self.window = window
375
+ self.channel = channel
376
+
377
+ return _ssim(img1, img2, window, self.window_size, channel, self.size_average)
378
+
379
+
380
+ window = None
381
+
382
+
383
+ def ssim(img1, img2, window_size=11, size_average=True):
384
+ (_, channel, _, _) = img1.size()
385
+ global window
386
+ if window is None:
387
+ window = create_window(window_size, channel)
388
+ if img1.is_cuda:
389
+ window = window.cuda(img1.get_device())
390
+ window = window.type_as(img1)
391
+ return _ssim(img1, img2, window, window_size, channel, size_average)
modules/fastspeech/__pycache__/fs2.cpython-38.pyc ADDED
Binary file (5.87 kB). View file
 
modules/fastspeech/__pycache__/pe.cpython-38.pyc ADDED
Binary file (5.05 kB). View file
 
modules/fastspeech/__pycache__/tts_modules.cpython-38.pyc ADDED
Binary file (13.6 kB). View file
 
modules/fastspeech/fs2.py ADDED
@@ -0,0 +1,255 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from modules.commons.common_layers import *
2
+ from modules.commons.common_layers import Embedding
3
+ from modules.fastspeech.tts_modules import FastspeechDecoder, DurationPredictor, LengthRegulator, PitchPredictor, \
4
+ EnergyPredictor, FastspeechEncoder
5
+ from utils.cwt import cwt2f0
6
+ from utils.hparams import hparams
7
+ from utils.pitch_utils import f0_to_coarse, denorm_f0, norm_f0
8
+
9
+ FS_ENCODERS = {
10
+ 'fft': lambda hp: FastspeechEncoder(
11
+ hp['hidden_size'], hp['enc_layers'], hp['enc_ffn_kernel_size'],
12
+ num_heads=hp['num_heads']),
13
+ }
14
+
15
+ FS_DECODERS = {
16
+ 'fft': lambda hp: FastspeechDecoder(
17
+ hp['hidden_size'], hp['dec_layers'], hp['dec_ffn_kernel_size'], hp['num_heads']),
18
+ }
19
+
20
+
21
+ class FastSpeech2(nn.Module):
22
+ def __init__(self, dictionary, out_dims=None):
23
+ super().__init__()
24
+ # self.dictionary = dictionary
25
+ self.padding_idx = 0
26
+ if not hparams['no_fs2'] if 'no_fs2' in hparams.keys() else True:
27
+ self.enc_layers = hparams['enc_layers']
28
+ self.dec_layers = hparams['dec_layers']
29
+ self.encoder = FS_ENCODERS[hparams['encoder_type']](hparams)
30
+ self.decoder = FS_DECODERS[hparams['decoder_type']](hparams)
31
+ self.hidden_size = hparams['hidden_size']
32
+ # self.encoder_embed_tokens = self.build_embedding(self.dictionary, self.hidden_size)
33
+ self.out_dims = out_dims
34
+ if out_dims is None:
35
+ self.out_dims = hparams['audio_num_mel_bins']
36
+ self.mel_out = Linear(self.hidden_size, self.out_dims, bias=True)
37
+ #=========not used===========
38
+ # if hparams['use_spk_id']:
39
+ # self.spk_embed_proj = Embedding(hparams['num_spk'] + 1, self.hidden_size)
40
+ # if hparams['use_split_spk_id']:
41
+ # self.spk_embed_f0 = Embedding(hparams['num_spk'] + 1, self.hidden_size)
42
+ # self.spk_embed_dur = Embedding(hparams['num_spk'] + 1, self.hidden_size)
43
+ # elif hparams['use_spk_embed']:
44
+ # self.spk_embed_proj = Linear(256, self.hidden_size, bias=True)
45
+ predictor_hidden = hparams['predictor_hidden'] if hparams['predictor_hidden'] > 0 else self.hidden_size
46
+ # self.dur_predictor = DurationPredictor(
47
+ # self.hidden_size,
48
+ # n_chans=predictor_hidden,
49
+ # n_layers=hparams['dur_predictor_layers'],
50
+ # dropout_rate=hparams['predictor_dropout'], padding=hparams['ffn_padding'],
51
+ # kernel_size=hparams['dur_predictor_kernel'])
52
+ # self.length_regulator = LengthRegulator()
53
+ if hparams['use_pitch_embed']:
54
+ self.pitch_embed = Embedding(300, self.hidden_size, self.padding_idx)
55
+ if hparams['pitch_type'] == 'cwt':
56
+ h = hparams['cwt_hidden_size']
57
+ cwt_out_dims = 10
58
+ if hparams['use_uv']:
59
+ cwt_out_dims = cwt_out_dims + 1
60
+ self.cwt_predictor = nn.Sequential(
61
+ nn.Linear(self.hidden_size, h),
62
+ PitchPredictor(
63
+ h,
64
+ n_chans=predictor_hidden,
65
+ n_layers=hparams['predictor_layers'],
66
+ dropout_rate=hparams['predictor_dropout'], odim=cwt_out_dims,
67
+ padding=hparams['ffn_padding'], kernel_size=hparams['predictor_kernel']))
68
+ self.cwt_stats_layers = nn.Sequential(
69
+ nn.Linear(self.hidden_size, h), nn.ReLU(),
70
+ nn.Linear(h, h), nn.ReLU(), nn.Linear(h, 2)
71
+ )
72
+ else:
73
+ self.pitch_predictor = PitchPredictor(
74
+ self.hidden_size,
75
+ n_chans=predictor_hidden,
76
+ n_layers=hparams['predictor_layers'],
77
+ dropout_rate=hparams['predictor_dropout'],
78
+ odim=2 if hparams['pitch_type'] == 'frame' else 1,
79
+ padding=hparams['ffn_padding'], kernel_size=hparams['predictor_kernel'])
80
+ if hparams['use_energy_embed']:
81
+ self.energy_embed = Embedding(256, self.hidden_size, self.padding_idx)
82
+ # self.energy_predictor = EnergyPredictor(
83
+ # self.hidden_size,
84
+ # n_chans=predictor_hidden,
85
+ # n_layers=hparams['predictor_layers'],
86
+ # dropout_rate=hparams['predictor_dropout'], odim=1,
87
+ # padding=hparams['ffn_padding'], kernel_size=hparams['predictor_kernel'])
88
+
89
+ # def build_embedding(self, dictionary, embed_dim):
90
+ # num_embeddings = len(dictionary)
91
+ # emb = Embedding(num_embeddings, embed_dim, self.padding_idx)
92
+ # return emb
93
+
94
+ def forward(self, hubert, mel2ph=None, spk_embed=None,
95
+ ref_mels=None, f0=None, uv=None, energy=None, skip_decoder=True,
96
+ spk_embed_dur_id=None, spk_embed_f0_id=None, infer=False, **kwargs):
97
+ ret = {}
98
+ if not hparams['no_fs2'] if 'no_fs2' in hparams.keys() else True:
99
+ encoder_out =self.encoder(hubert) # [B, T, C]
100
+ else:
101
+ encoder_out =hubert
102
+ src_nonpadding = (hubert!=0).any(-1)[:,:,None]
103
+
104
+ # add ref style embed
105
+ # Not implemented
106
+ # variance encoder
107
+ var_embed = 0
108
+
109
+ # encoder_out_dur denotes encoder outputs for duration predictor
110
+ # in speech adaptation, duration predictor use old speaker embedding
111
+ if hparams['use_spk_embed']:
112
+ spk_embed_dur = spk_embed_f0 = spk_embed = self.spk_embed_proj(spk_embed)[:, None, :]
113
+ elif hparams['use_spk_id']:
114
+ spk_embed_id = spk_embed
115
+ if spk_embed_dur_id is None:
116
+ spk_embed_dur_id = spk_embed_id
117
+ if spk_embed_f0_id is None:
118
+ spk_embed_f0_id = spk_embed_id
119
+ spk_embed = self.spk_embed_proj(spk_embed_id)[:, None, :]
120
+ spk_embed_dur = spk_embed_f0 = spk_embed
121
+ if hparams['use_split_spk_id']:
122
+ spk_embed_dur = self.spk_embed_dur(spk_embed_dur_id)[:, None, :]
123
+ spk_embed_f0 = self.spk_embed_f0(spk_embed_f0_id)[:, None, :]
124
+ else:
125
+ spk_embed_dur = spk_embed_f0 = spk_embed = 0
126
+
127
+ # add dur
128
+ # dur_inp = (encoder_out + var_embed + spk_embed_dur) * src_nonpadding
129
+
130
+ # mel2ph = self.add_dur(dur_inp, mel2ph, hubert, ret)
131
+ ret['mel2ph'] = mel2ph
132
+
133
+ decoder_inp = F.pad(encoder_out, [0, 0, 1, 0])
134
+
135
+ mel2ph_ = mel2ph[..., None].repeat([1, 1, encoder_out.shape[-1]])
136
+ decoder_inp_origin = decoder_inp = torch.gather(decoder_inp, 1, mel2ph_) # [B, T, H]
137
+
138
+ tgt_nonpadding = (mel2ph > 0).float()[:, :, None]
139
+
140
+ # add pitch and energy embed
141
+ pitch_inp = (decoder_inp_origin + var_embed + spk_embed_f0) * tgt_nonpadding
142
+ if hparams['use_pitch_embed']:
143
+ pitch_inp_ph = (encoder_out + var_embed + spk_embed_f0) * src_nonpadding
144
+ decoder_inp = decoder_inp + self.add_pitch(pitch_inp, f0, uv, mel2ph, ret, encoder_out=pitch_inp_ph)
145
+ if hparams['use_energy_embed']:
146
+ decoder_inp = decoder_inp + self.add_energy(pitch_inp, energy, ret)
147
+
148
+ ret['decoder_inp'] = decoder_inp = (decoder_inp + spk_embed) * tgt_nonpadding
149
+ if not hparams['no_fs2'] if 'no_fs2' in hparams.keys() else True:
150
+ if skip_decoder:
151
+ return ret
152
+ ret['mel_out'] = self.run_decoder(decoder_inp, tgt_nonpadding, ret, infer=infer, **kwargs)
153
+
154
+ return ret
155
+
156
+ def add_dur(self, dur_input, mel2ph, hubert, ret):
157
+ src_padding = (hubert==0).all(-1)
158
+ dur_input = dur_input.detach() + hparams['predictor_grad'] * (dur_input - dur_input.detach())
159
+ if mel2ph is None:
160
+ dur, xs = self.dur_predictor.inference(dur_input, src_padding)
161
+ ret['dur'] = xs
162
+ ret['dur_choice'] = dur
163
+ mel2ph = self.length_regulator(dur, src_padding).detach()
164
+ else:
165
+ ret['dur'] = self.dur_predictor(dur_input, src_padding)
166
+ ret['mel2ph'] = mel2ph
167
+ return mel2ph
168
+
169
+ def run_decoder(self, decoder_inp, tgt_nonpadding, ret, infer, **kwargs):
170
+ x = decoder_inp # [B, T, H]
171
+ x = self.decoder(x)
172
+ x = self.mel_out(x)
173
+ return x * tgt_nonpadding
174
+
175
+ def cwt2f0_norm(self, cwt_spec, mean, std, mel2ph):
176
+ f0 = cwt2f0(cwt_spec, mean, std, hparams['cwt_scales'])
177
+ f0 = torch.cat(
178
+ [f0] + [f0[:, -1:]] * (mel2ph.shape[1] - f0.shape[1]), 1)
179
+ f0_norm = norm_f0(f0, None, hparams)
180
+ return f0_norm
181
+
182
+ def out2mel(self, out):
183
+ return out
184
+
185
+ def add_pitch(self,decoder_inp, f0, uv, mel2ph, ret, encoder_out=None):
186
+ # if hparams['pitch_type'] == 'ph':
187
+ # pitch_pred_inp = encoder_out.detach() + hparams['predictor_grad'] * (encoder_out - encoder_out.detach())
188
+ # pitch_padding = (encoder_out.sum().abs() == 0)
189
+ # ret['pitch_pred'] = pitch_pred = self.pitch_predictor(pitch_pred_inp)
190
+ # if f0 is None:
191
+ # f0 = pitch_pred[:, :, 0]
192
+ # ret['f0_denorm'] = f0_denorm = denorm_f0(f0, None, hparams, pitch_padding=pitch_padding)
193
+ # pitch = f0_to_coarse(f0_denorm) # start from 0 [B, T_txt]
194
+ # pitch = F.pad(pitch, [1, 0])
195
+ # pitch = torch.gather(pitch, 1, mel2ph) # [B, T_mel]
196
+ # pitch_embedding = pitch_embed(pitch)
197
+ # return pitch_embedding
198
+
199
+ decoder_inp = decoder_inp.detach() + hparams['predictor_grad'] * (decoder_inp - decoder_inp.detach())
200
+
201
+ pitch_padding = (mel2ph == 0)
202
+
203
+ # if hparams['pitch_type'] == 'cwt':
204
+ # # NOTE: this part of script is *isolated* from other scripts, which means
205
+ # # it may not be compatible with the current version.
206
+ # pass
207
+ # # pitch_padding = None
208
+ # # ret['cwt'] = cwt_out = self.cwt_predictor(decoder_inp)
209
+ # # stats_out = self.cwt_stats_layers(encoder_out[:, 0, :]) # [B, 2]
210
+ # # mean = ret['f0_mean'] = stats_out[:, 0]
211
+ # # std = ret['f0_std'] = stats_out[:, 1]
212
+ # # cwt_spec = cwt_out[:, :, :10]
213
+ # # if f0 is None:
214
+ # # std = std * hparams['cwt_std_scale']
215
+ # # f0 = self.cwt2f0_norm(cwt_spec, mean, std, mel2ph)
216
+ # # if hparams['use_uv']:
217
+ # # assert cwt_out.shape[-1] == 11
218
+ # # uv = cwt_out[:, :, -1] > 0
219
+ # elif hparams['pitch_ar']:
220
+ # ret['pitch_pred'] = pitch_pred = self.pitch_predictor(decoder_inp, f0 if is_training else None)
221
+ # if f0 is None:
222
+ # f0 = pitch_pred[:, :, 0]
223
+ # else:
224
+ #ret['pitch_pred'] = pitch_pred = self.pitch_predictor(decoder_inp)
225
+ # if f0 is None:
226
+ # f0 = pitch_pred[:, :, 0]
227
+ # if hparams['use_uv'] and uv is None:
228
+ # uv = pitch_pred[:, :, 1] > 0
229
+ ret['f0_denorm'] = f0_denorm = denorm_f0(f0, uv, hparams, pitch_padding=pitch_padding)
230
+ if pitch_padding is not None:
231
+ f0[pitch_padding] = 0
232
+
233
+ pitch = f0_to_coarse(f0_denorm,hparams) # start from 0
234
+ ret['pitch_pred']=pitch.unsqueeze(-1)
235
+ # print(ret['pitch_pred'].shape)
236
+ # print(pitch.shape)
237
+ pitch_embedding = self.pitch_embed(pitch)
238
+ return pitch_embedding
239
+
240
+ def add_energy(self,decoder_inp, energy, ret):
241
+ decoder_inp = decoder_inp.detach() + hparams['predictor_grad'] * (decoder_inp - decoder_inp.detach())
242
+ ret['energy_pred'] = energy#energy_pred = self.energy_predictor(decoder_inp)[:, :, 0]
243
+ # if energy is None:
244
+ # energy = energy_pred
245
+ energy = torch.clamp(energy * 256 // 4, max=255).long() # energy_to_coarse
246
+ energy_embedding = self.energy_embed(energy)
247
+ return energy_embedding
248
+
249
+ @staticmethod
250
+ def mel_norm(x):
251
+ return (x + 5.5) / (6.3 / 2) - 1
252
+
253
+ @staticmethod
254
+ def mel_denorm(x):
255
+ return (x + 1) * (6.3 / 2) - 5.5
modules/fastspeech/pe.py ADDED
@@ -0,0 +1,149 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from modules.commons.common_layers import *
2
+ from utils.hparams import hparams
3
+ from modules.fastspeech.tts_modules import PitchPredictor
4
+ from utils.pitch_utils import denorm_f0
5
+
6
+
7
+ class Prenet(nn.Module):
8
+ def __init__(self, in_dim=80, out_dim=256, kernel=5, n_layers=3, strides=None):
9
+ super(Prenet, self).__init__()
10
+ padding = kernel // 2
11
+ self.layers = []
12
+ self.strides = strides if strides is not None else [1] * n_layers
13
+ for l in range(n_layers):
14
+ self.layers.append(nn.Sequential(
15
+ nn.Conv1d(in_dim, out_dim, kernel_size=kernel, padding=padding, stride=self.strides[l]),
16
+ nn.ReLU(),
17
+ nn.BatchNorm1d(out_dim)
18
+ ))
19
+ in_dim = out_dim
20
+ self.layers = nn.ModuleList(self.layers)
21
+ self.out_proj = nn.Linear(out_dim, out_dim)
22
+
23
+ def forward(self, x):
24
+ """
25
+
26
+ :param x: [B, T, 80]
27
+ :return: [L, B, T, H], [B, T, H]
28
+ """
29
+ # padding_mask = x.abs().sum(-1).eq(0).data # [B, T]
30
+ padding_mask = x.abs().sum(-1).eq(0).detach()
31
+ nonpadding_mask_TB = 1 - padding_mask.float()[:, None, :] # [B, 1, T]
32
+ x = x.transpose(1, 2)
33
+ hiddens = []
34
+ for i, l in enumerate(self.layers):
35
+ nonpadding_mask_TB = nonpadding_mask_TB[:, :, ::self.strides[i]]
36
+ x = l(x) * nonpadding_mask_TB
37
+ hiddens.append(x)
38
+ hiddens = torch.stack(hiddens, 0) # [L, B, H, T]
39
+ hiddens = hiddens.transpose(2, 3) # [L, B, T, H]
40
+ x = self.out_proj(x.transpose(1, 2)) # [B, T, H]
41
+ x = x * nonpadding_mask_TB.transpose(1, 2)
42
+ return hiddens, x
43
+
44
+
45
+ class ConvBlock(nn.Module):
46
+ def __init__(self, idim=80, n_chans=256, kernel_size=3, stride=1, norm='gn', dropout=0):
47
+ super().__init__()
48
+ self.conv = ConvNorm(idim, n_chans, kernel_size, stride=stride)
49
+ self.norm = norm
50
+ if self.norm == 'bn':
51
+ self.norm = nn.BatchNorm1d(n_chans)
52
+ elif self.norm == 'in':
53
+ self.norm = nn.InstanceNorm1d(n_chans, affine=True)
54
+ elif self.norm == 'gn':
55
+ self.norm = nn.GroupNorm(n_chans // 16, n_chans)
56
+ elif self.norm == 'ln':
57
+ self.norm = LayerNorm(n_chans // 16, n_chans)
58
+ elif self.norm == 'wn':
59
+ self.conv = torch.nn.utils.weight_norm(self.conv.conv)
60
+ self.dropout = nn.Dropout(dropout)
61
+ self.relu = nn.ReLU()
62
+
63
+ def forward(self, x):
64
+ """
65
+
66
+ :param x: [B, C, T]
67
+ :return: [B, C, T]
68
+ """
69
+ x = self.conv(x)
70
+ if not isinstance(self.norm, str):
71
+ if self.norm == 'none':
72
+ pass
73
+ elif self.norm == 'ln':
74
+ x = self.norm(x.transpose(1, 2)).transpose(1, 2)
75
+ else:
76
+ x = self.norm(x)
77
+ x = self.relu(x)
78
+ x = self.dropout(x)
79
+ return x
80
+
81
+
82
+ class ConvStacks(nn.Module):
83
+ def __init__(self, idim=80, n_layers=5, n_chans=256, odim=32, kernel_size=5, norm='gn',
84
+ dropout=0, strides=None, res=True):
85
+ super().__init__()
86
+ self.conv = torch.nn.ModuleList()
87
+ self.kernel_size = kernel_size
88
+ self.res = res
89
+ self.in_proj = Linear(idim, n_chans)
90
+ if strides is None:
91
+ strides = [1] * n_layers
92
+ else:
93
+ assert len(strides) == n_layers
94
+ for idx in range(n_layers):
95
+ self.conv.append(ConvBlock(
96
+ n_chans, n_chans, kernel_size, stride=strides[idx], norm=norm, dropout=dropout))
97
+ self.out_proj = Linear(n_chans, odim)
98
+
99
+ def forward(self, x, return_hiddens=False):
100
+ """
101
+
102
+ :param x: [B, T, H]
103
+ :return: [B, T, H]
104
+ """
105
+ x = self.in_proj(x)
106
+ x = x.transpose(1, -1) # (B, idim, Tmax)
107
+ hiddens = []
108
+ for f in self.conv:
109
+ x_ = f(x)
110
+ x = x + x_ if self.res else x_ # (B, C, Tmax)
111
+ hiddens.append(x)
112
+ x = x.transpose(1, -1)
113
+ x = self.out_proj(x) # (B, Tmax, H)
114
+ if return_hiddens:
115
+ hiddens = torch.stack(hiddens, 1) # [B, L, C, T]
116
+ return x, hiddens
117
+ return x
118
+
119
+
120
+ class PitchExtractor(nn.Module):
121
+ def __init__(self, n_mel_bins=80, conv_layers=2):
122
+ super().__init__()
123
+ self.hidden_size = hparams['hidden_size']
124
+ self.predictor_hidden = hparams['predictor_hidden'] if hparams['predictor_hidden'] > 0 else self.hidden_size
125
+ self.conv_layers = conv_layers
126
+
127
+ self.mel_prenet = Prenet(n_mel_bins, self.hidden_size, strides=[1, 1, 1])
128
+ if self.conv_layers > 0:
129
+ self.mel_encoder = ConvStacks(
130
+ idim=self.hidden_size, n_chans=self.hidden_size, odim=self.hidden_size, n_layers=self.conv_layers)
131
+ self.pitch_predictor = PitchPredictor(
132
+ self.hidden_size, n_chans=self.predictor_hidden,
133
+ n_layers=5, dropout_rate=0.1, odim=2,
134
+ padding=hparams['ffn_padding'], kernel_size=hparams['predictor_kernel'])
135
+
136
+ def forward(self, mel_input=None):
137
+ ret = {}
138
+ mel_hidden = self.mel_prenet(mel_input)[1]
139
+ if self.conv_layers > 0:
140
+ mel_hidden = self.mel_encoder(mel_hidden)
141
+
142
+ ret['pitch_pred'] = pitch_pred = self.pitch_predictor(mel_hidden)
143
+
144
+ pitch_padding = mel_input.abs().sum(-1) == 0
145
+ use_uv = hparams['pitch_type'] == 'frame' #and hparams['use_uv']
146
+ ret['f0_denorm_pred'] = denorm_f0(
147
+ pitch_pred[:, :, 0], (pitch_pred[:, :, 1] > 0) if use_uv else None,
148
+ hparams, pitch_padding=pitch_padding)
149
+ return ret
modules/fastspeech/tts_modules.py ADDED
@@ -0,0 +1,364 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import logging
2
+ import math
3
+
4
+ import torch
5
+ import torch.nn as nn
6
+ from torch.nn import functional as F
7
+
8
+ from modules.commons.espnet_positional_embedding import RelPositionalEncoding
9
+ from modules.commons.common_layers import SinusoidalPositionalEmbedding, Linear, EncSALayer, DecSALayer, BatchNorm1dTBC
10
+ from utils.hparams import hparams
11
+
12
+ DEFAULT_MAX_SOURCE_POSITIONS = 2000
13
+ DEFAULT_MAX_TARGET_POSITIONS = 2000
14
+
15
+
16
+ class TransformerEncoderLayer(nn.Module):
17
+ def __init__(self, hidden_size, dropout, kernel_size=None, num_heads=2, norm='ln'):
18
+ super().__init__()
19
+ self.hidden_size = hidden_size
20
+ self.dropout = dropout
21
+ self.num_heads = num_heads
22
+ self.op = EncSALayer(
23
+ hidden_size, num_heads, dropout=dropout,
24
+ attention_dropout=0.0, relu_dropout=dropout,
25
+ kernel_size=kernel_size
26
+ if kernel_size is not None else hparams['enc_ffn_kernel_size'],
27
+ padding=hparams['ffn_padding'],
28
+ norm=norm, act=hparams['ffn_act'])
29
+
30
+ def forward(self, x, **kwargs):
31
+ return self.op(x, **kwargs)
32
+
33
+
34
+ ######################
35
+ # fastspeech modules
36
+ ######################
37
+ class LayerNorm(torch.nn.LayerNorm):
38
+ """Layer normalization module.
39
+ :param int nout: output dim size
40
+ :param int dim: dimension to be normalized
41
+ """
42
+
43
+ def __init__(self, nout, dim=-1):
44
+ """Construct an LayerNorm object."""
45
+ super(LayerNorm, self).__init__(nout, eps=1e-12)
46
+ self.dim = dim
47
+
48
+ def forward(self, x):
49
+ """Apply layer normalization.
50
+ :param torch.Tensor x: input tensor
51
+ :return: layer normalized tensor
52
+ :rtype torch.Tensor
53
+ """
54
+ if self.dim == -1:
55
+ return super(LayerNorm, self).forward(x)
56
+ return super(LayerNorm, self).forward(x.transpose(1, -1)).transpose(1, -1)
57
+
58
+
59
+ class DurationPredictor(torch.nn.Module):
60
+ """Duration predictor module.
61
+ This is a module of duration predictor described in `FastSpeech: Fast, Robust and Controllable Text to Speech`_.
62
+ The duration predictor predicts a duration of each frame in log domain from the hidden embeddings of encoder.
63
+ .. _`FastSpeech: Fast, Robust and Controllable Text to Speech`:
64
+ https://arxiv.org/pdf/1905.09263.pdf
65
+ Note:
66
+ The calculation domain of outputs is different between in `forward` and in `inference`. In `forward`,
67
+ the outputs are calculated in log domain but in `inference`, those are calculated in linear domain.
68
+ """
69
+
70
+ def __init__(self, idim, n_layers=2, n_chans=384, kernel_size=3, dropout_rate=0.1, offset=1.0, padding='SAME'):
71
+ """Initilize duration predictor module.
72
+ Args:
73
+ idim (int): Input dimension.
74
+ n_layers (int, optional): Number of convolutional layers.
75
+ n_chans (int, optional): Number of channels of convolutional layers.
76
+ kernel_size (int, optional): Kernel size of convolutional layers.
77
+ dropout_rate (float, optional): Dropout rate.
78
+ offset (float, optional): Offset value to avoid nan in log domain.
79
+ """
80
+ super(DurationPredictor, self).__init__()
81
+ self.offset = offset
82
+ self.conv = torch.nn.ModuleList()
83
+ self.kernel_size = kernel_size
84
+ self.padding = padding
85
+ for idx in range(n_layers):
86
+ in_chans = idim if idx == 0 else n_chans
87
+ self.conv += [torch.nn.Sequential(
88
+ torch.nn.ConstantPad1d(((kernel_size - 1) // 2, (kernel_size - 1) // 2)
89
+ if padding == 'SAME'
90
+ else (kernel_size - 1, 0), 0),
91
+ torch.nn.Conv1d(in_chans, n_chans, kernel_size, stride=1, padding=0),
92
+ torch.nn.ReLU(),
93
+ LayerNorm(n_chans, dim=1),
94
+ torch.nn.Dropout(dropout_rate)
95
+ )]
96
+ if hparams['dur_loss'] in ['mse', 'huber']:
97
+ odims = 1
98
+ elif hparams['dur_loss'] == 'mog':
99
+ odims = 15
100
+ elif hparams['dur_loss'] == 'crf':
101
+ odims = 32
102
+ from torchcrf import CRF
103
+ self.crf = CRF(odims, batch_first=True)
104
+ self.linear = torch.nn.Linear(n_chans, odims)
105
+
106
+ def _forward(self, xs, x_masks=None, is_inference=False):
107
+ xs = xs.transpose(1, -1) # (B, idim, Tmax)
108
+ for f in self.conv:
109
+ xs = f(xs) # (B, C, Tmax)
110
+ if x_masks is not None:
111
+ xs = xs * (1 - x_masks.float())[:, None, :]
112
+
113
+ xs = self.linear(xs.transpose(1, -1)) # [B, T, C]
114
+ xs = xs * (1 - x_masks.float())[:, :, None] # (B, T, C)
115
+ if is_inference:
116
+ return self.out2dur(xs), xs
117
+ else:
118
+ if hparams['dur_loss'] in ['mse']:
119
+ xs = xs.squeeze(-1) # (B, Tmax)
120
+ return xs
121
+
122
+ def out2dur(self, xs):
123
+ if hparams['dur_loss'] in ['mse']:
124
+ # NOTE: calculate in log domain
125
+ xs = xs.squeeze(-1) # (B, Tmax)
126
+ dur = torch.clamp(torch.round(xs.exp() - self.offset), min=0).long() # avoid negative value
127
+ elif hparams['dur_loss'] == 'mog':
128
+ return NotImplementedError
129
+ elif hparams['dur_loss'] == 'crf':
130
+ dur = torch.LongTensor(self.crf.decode(xs)).cuda()
131
+ return dur
132
+
133
+ def forward(self, xs, x_masks=None):
134
+ """Calculate forward propagation.
135
+ Args:
136
+ xs (Tensor): Batch of input sequences (B, Tmax, idim).
137
+ x_masks (ByteTensor, optional): Batch of masks indicating padded part (B, Tmax).
138
+ Returns:
139
+ Tensor: Batch of predicted durations in log domain (B, Tmax).
140
+ """
141
+ return self._forward(xs, x_masks, False)
142
+
143
+ def inference(self, xs, x_masks=None):
144
+ """Inference duration.
145
+ Args:
146
+ xs (Tensor): Batch of input sequences (B, Tmax, idim).
147
+ x_masks (ByteTensor, optional): Batch of masks indicating padded part (B, Tmax).
148
+ Returns:
149
+ LongTensor: Batch of predicted durations in linear domain (B, Tmax).
150
+ """
151
+ return self._forward(xs, x_masks, True)
152
+
153
+
154
+ class LengthRegulator(torch.nn.Module):
155
+ def __init__(self, pad_value=0.0):
156
+ super(LengthRegulator, self).__init__()
157
+ self.pad_value = pad_value
158
+
159
+ def forward(self, dur, dur_padding=None, alpha=1.0):
160
+ """
161
+ Example (no batch dim version):
162
+ 1. dur = [2,2,3]
163
+ 2. token_idx = [[1],[2],[3]], dur_cumsum = [2,4,7], dur_cumsum_prev = [0,2,4]
164
+ 3. token_mask = [[1,1,0,0,0,0,0],
165
+ [0,0,1,1,0,0,0],
166
+ [0,0,0,0,1,1,1]]
167
+ 4. token_idx * token_mask = [[1,1,0,0,0,0,0],
168
+ [0,0,2,2,0,0,0],
169
+ [0,0,0,0,3,3,3]]
170
+ 5. (token_idx * token_mask).sum(0) = [1,1,2,2,3,3,3]
171
+
172
+ :param dur: Batch of durations of each frame (B, T_txt)
173
+ :param dur_padding: Batch of padding of each frame (B, T_txt)
174
+ :param alpha: duration rescale coefficient
175
+ :return:
176
+ mel2ph (B, T_speech)
177
+ """
178
+ assert alpha > 0
179
+ dur = torch.round(dur.float() * alpha).long()
180
+ if dur_padding is not None:
181
+ dur = dur * (1 - dur_padding.long())
182
+ token_idx = torch.arange(1, dur.shape[1] + 1)[None, :, None].to(dur.device)
183
+ dur_cumsum = torch.cumsum(dur, 1)
184
+ dur_cumsum_prev = F.pad(dur_cumsum, [1, -1], mode='constant', value=0)
185
+
186
+ pos_idx = torch.arange(dur.sum(-1).max())[None, None].to(dur.device)
187
+ token_mask = (pos_idx >= dur_cumsum_prev[:, :, None]) & (pos_idx < dur_cumsum[:, :, None])
188
+ mel2ph = (token_idx * token_mask.long()).sum(1)
189
+ return mel2ph
190
+
191
+
192
+ class PitchPredictor(torch.nn.Module):
193
+ def __init__(self, idim, n_layers=5, n_chans=384, odim=2, kernel_size=5,
194
+ dropout_rate=0.1, padding='SAME'):
195
+ """Initilize pitch predictor module.
196
+ Args:
197
+ idim (int): Input dimension.
198
+ n_layers (int, optional): Number of convolutional layers.
199
+ n_chans (int, optional): Number of channels of convolutional layers.
200
+ kernel_size (int, optional): Kernel size of convolutional layers.
201
+ dropout_rate (float, optional): Dropout rate.
202
+ """
203
+ super(PitchPredictor, self).__init__()
204
+ self.conv = torch.nn.ModuleList()
205
+ self.kernel_size = kernel_size
206
+ self.padding = padding
207
+ for idx in range(n_layers):
208
+ in_chans = idim if idx == 0 else n_chans
209
+ self.conv += [torch.nn.Sequential(
210
+ torch.nn.ConstantPad1d(((kernel_size - 1) // 2, (kernel_size - 1) // 2)
211
+ if padding == 'SAME'
212
+ else (kernel_size - 1, 0), 0),
213
+ torch.nn.Conv1d(in_chans, n_chans, kernel_size, stride=1, padding=0),
214
+ torch.nn.ReLU(),
215
+ LayerNorm(n_chans, dim=1),
216
+ torch.nn.Dropout(dropout_rate)
217
+ )]
218
+ self.linear = torch.nn.Linear(n_chans, odim)
219
+ self.embed_positions = SinusoidalPositionalEmbedding(idim, 0, init_size=4096)
220
+ self.pos_embed_alpha = nn.Parameter(torch.Tensor([1]))
221
+
222
+ def forward(self, xs):
223
+ """
224
+
225
+ :param xs: [B, T, H]
226
+ :return: [B, T, H]
227
+ """
228
+ positions = self.pos_embed_alpha * self.embed_positions(xs[..., 0])
229
+ xs = xs + positions
230
+ xs = xs.transpose(1, -1) # (B, idim, Tmax)
231
+ for f in self.conv:
232
+ xs = f(xs) # (B, C, Tmax)
233
+ # NOTE: calculate in log domain
234
+ xs = self.linear(xs.transpose(1, -1)) # (B, Tmax, H)
235
+ return xs
236
+
237
+
238
+ class EnergyPredictor(PitchPredictor):
239
+ pass
240
+
241
+
242
+ def mel2ph_to_dur(mel2ph, T_txt, max_dur=None):
243
+ B, _ = mel2ph.shape
244
+ dur = mel2ph.new_zeros(B, T_txt + 1).scatter_add(1, mel2ph, torch.ones_like(mel2ph))
245
+ dur = dur[:, 1:]
246
+ if max_dur is not None:
247
+ dur = dur.clamp(max=max_dur)
248
+ return dur
249
+
250
+
251
+ class FFTBlocks(nn.Module):
252
+ def __init__(self, hidden_size, num_layers, ffn_kernel_size=9, dropout=None, num_heads=2,
253
+ use_pos_embed=True, use_last_norm=True, norm='ln', use_pos_embed_alpha=True):
254
+ super().__init__()
255
+ self.num_layers = num_layers
256
+ embed_dim = self.hidden_size = hidden_size
257
+ self.dropout = dropout if dropout is not None else hparams['dropout']
258
+ self.use_pos_embed = use_pos_embed
259
+ self.use_last_norm = use_last_norm
260
+ if use_pos_embed:
261
+ self.max_source_positions = DEFAULT_MAX_TARGET_POSITIONS
262
+ self.padding_idx = 0
263
+ self.pos_embed_alpha = nn.Parameter(torch.Tensor([1])) if use_pos_embed_alpha else 1
264
+ self.embed_positions = SinusoidalPositionalEmbedding(
265
+ embed_dim, self.padding_idx, init_size=DEFAULT_MAX_TARGET_POSITIONS,
266
+ )
267
+
268
+ self.layers = nn.ModuleList([])
269
+ self.layers.extend([
270
+ TransformerEncoderLayer(self.hidden_size, self.dropout,
271
+ kernel_size=ffn_kernel_size, num_heads=num_heads)
272
+ for _ in range(self.num_layers)
273
+ ])
274
+ if self.use_last_norm:
275
+ if norm == 'ln':
276
+ self.layer_norm = nn.LayerNorm(embed_dim)
277
+ elif norm == 'bn':
278
+ self.layer_norm = BatchNorm1dTBC(embed_dim)
279
+ else:
280
+ self.layer_norm = None
281
+
282
+ def forward(self, x, padding_mask=None, attn_mask=None, return_hiddens=False):
283
+ """
284
+ :param x: [B, T, C]
285
+ :param padding_mask: [B, T]
286
+ :return: [B, T, C] or [L, B, T, C]
287
+ """
288
+ # padding_mask = x.abs().sum(-1).eq(0).data if padding_mask is None else padding_mask
289
+ padding_mask = x.abs().sum(-1).eq(0).detach() if padding_mask is None else padding_mask
290
+ nonpadding_mask_TB = 1 - padding_mask.transpose(0, 1).float()[:, :, None] # [T, B, 1]
291
+ if self.use_pos_embed:
292
+ positions = self.pos_embed_alpha * self.embed_positions(x[..., 0])
293
+ x = x + positions
294
+ x = F.dropout(x, p=self.dropout, training=self.training)
295
+ # B x T x C -> T x B x C
296
+ x = x.transpose(0, 1) * nonpadding_mask_TB
297
+ hiddens = []
298
+ for layer in self.layers:
299
+ x = layer(x, encoder_padding_mask=padding_mask, attn_mask=attn_mask) * nonpadding_mask_TB
300
+ hiddens.append(x)
301
+ if self.use_last_norm:
302
+ x = self.layer_norm(x) * nonpadding_mask_TB
303
+ if return_hiddens:
304
+ x = torch.stack(hiddens, 0) # [L, T, B, C]
305
+ x = x.transpose(1, 2) # [L, B, T, C]
306
+ else:
307
+ x = x.transpose(0, 1) # [B, T, C]
308
+ return x
309
+
310
+
311
+ class FastspeechEncoder(FFTBlocks):
312
+ '''
313
+ compared to FFTBlocks:
314
+ - input is [B, T, H], not [B, T, C]
315
+ - supports "relative" positional encoding
316
+ '''
317
+ def __init__(self, hidden_size=None, num_layers=None, kernel_size=None, num_heads=2):
318
+ hidden_size = hparams['hidden_size'] if hidden_size is None else hidden_size
319
+ kernel_size = hparams['enc_ffn_kernel_size'] if kernel_size is None else kernel_size
320
+ num_layers = hparams['dec_layers'] if num_layers is None else num_layers
321
+ super().__init__(hidden_size, num_layers, kernel_size, num_heads=num_heads,
322
+ use_pos_embed=False) # use_pos_embed_alpha for compatibility
323
+ #self.embed_tokens = embed_tokens
324
+ self.embed_scale = math.sqrt(hidden_size)
325
+ self.padding_idx = 0
326
+ if hparams.get('rel_pos') is not None and hparams['rel_pos']:
327
+ self.embed_positions = RelPositionalEncoding(hidden_size, dropout_rate=0.0)
328
+ else:
329
+ self.embed_positions = SinusoidalPositionalEmbedding(
330
+ hidden_size, self.padding_idx, init_size=DEFAULT_MAX_TARGET_POSITIONS,
331
+ )
332
+
333
+ def forward(self, hubert):
334
+ """
335
+
336
+ :param hubert: [B, T, H ]
337
+ :return: {
338
+ 'encoder_out': [T x B x C]
339
+ }
340
+ """
341
+ # encoder_padding_mask = txt_tokens.eq(self.padding_idx).data
342
+ encoder_padding_mask = (hubert==0).all(-1)
343
+ x = self.forward_embedding(hubert) # [B, T, H]
344
+ x = super(FastspeechEncoder, self).forward(x, encoder_padding_mask)
345
+ return x
346
+
347
+ def forward_embedding(self, hubert):
348
+ # embed tokens and positions
349
+ x = self.embed_scale * hubert
350
+ if hparams['use_pos_embed']:
351
+ positions = self.embed_positions(hubert)
352
+ x = x + positions
353
+ x = F.dropout(x, p=self.dropout, training=self.training)
354
+ return x
355
+
356
+
357
+ class FastspeechDecoder(FFTBlocks):
358
+ def __init__(self, hidden_size=None, num_layers=None, kernel_size=None, num_heads=None):
359
+ num_heads = hparams['num_heads'] if num_heads is None else num_heads
360
+ hidden_size = hparams['hidden_size'] if hidden_size is None else hidden_size
361
+ kernel_size = hparams['dec_ffn_kernel_size'] if kernel_size is None else kernel_size
362
+ num_layers = hparams['dec_layers'] if num_layers is None else num_layers
363
+ super().__init__(hidden_size, num_layers, kernel_size, num_heads=num_heads)
364
+
modules/hifigan/__pycache__/hifigan.cpython-38.pyc ADDED
Binary file (11.5 kB). View file
 
modules/hifigan/hifigan.py ADDED
@@ -0,0 +1,365 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn.functional as F
3
+ import torch.nn as nn
4
+ from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d
5
+ from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm
6
+
7
+ from modules.parallel_wavegan.layers import UpsampleNetwork, ConvInUpsampleNetwork
8
+ from modules.parallel_wavegan.models.source import SourceModuleHnNSF
9
+ import numpy as np
10
+
11
+ LRELU_SLOPE = 0.1
12
+
13
+
14
+ def init_weights(m, mean=0.0, std=0.01):
15
+ classname = m.__class__.__name__
16
+ if classname.find("Conv") != -1:
17
+ m.weight.data.normal_(mean, std)
18
+
19
+
20
+ def apply_weight_norm(m):
21
+ classname = m.__class__.__name__
22
+ if classname.find("Conv") != -1:
23
+ weight_norm(m)
24
+
25
+
26
+ def get_padding(kernel_size, dilation=1):
27
+ return int((kernel_size * dilation - dilation) / 2)
28
+
29
+
30
+ class ResBlock1(torch.nn.Module):
31
+ def __init__(self, h, channels, kernel_size=3, dilation=(1, 3, 5)):
32
+ super(ResBlock1, self).__init__()
33
+ self.h = h
34
+ self.convs1 = nn.ModuleList([
35
+ weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0],
36
+ padding=get_padding(kernel_size, dilation[0]))),
37
+ weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1],
38
+ padding=get_padding(kernel_size, dilation[1]))),
39
+ weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[2],
40
+ padding=get_padding(kernel_size, dilation[2])))
41
+ ])
42
+ self.convs1.apply(init_weights)
43
+
44
+ self.convs2 = nn.ModuleList([
45
+ weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1,
46
+ padding=get_padding(kernel_size, 1))),
47
+ weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1,
48
+ padding=get_padding(kernel_size, 1))),
49
+ weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1,
50
+ padding=get_padding(kernel_size, 1)))
51
+ ])
52
+ self.convs2.apply(init_weights)
53
+
54
+ def forward(self, x):
55
+ for c1, c2 in zip(self.convs1, self.convs2):
56
+ xt = F.leaky_relu(x, LRELU_SLOPE)
57
+ xt = c1(xt)
58
+ xt = F.leaky_relu(xt, LRELU_SLOPE)
59
+ xt = c2(xt)
60
+ x = xt + x
61
+ return x
62
+
63
+ def remove_weight_norm(self):
64
+ for l in self.convs1:
65
+ remove_weight_norm(l)
66
+ for l in self.convs2:
67
+ remove_weight_norm(l)
68
+
69
+
70
+ class ResBlock2(torch.nn.Module):
71
+ def __init__(self, h, channels, kernel_size=3, dilation=(1, 3)):
72
+ super(ResBlock2, self).__init__()
73
+ self.h = h
74
+ self.convs = nn.ModuleList([
75
+ weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0],
76
+ padding=get_padding(kernel_size, dilation[0]))),
77
+ weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1],
78
+ padding=get_padding(kernel_size, dilation[1])))
79
+ ])
80
+ self.convs.apply(init_weights)
81
+
82
+ def forward(self, x):
83
+ for c in self.convs:
84
+ xt = F.leaky_relu(x, LRELU_SLOPE)
85
+ xt = c(xt)
86
+ x = xt + x
87
+ return x
88
+
89
+ def remove_weight_norm(self):
90
+ for l in self.convs:
91
+ remove_weight_norm(l)
92
+
93
+
94
+ class Conv1d1x1(Conv1d):
95
+ """1x1 Conv1d with customized initialization."""
96
+
97
+ def __init__(self, in_channels, out_channels, bias):
98
+ """Initialize 1x1 Conv1d module."""
99
+ super(Conv1d1x1, self).__init__(in_channels, out_channels,
100
+ kernel_size=1, padding=0,
101
+ dilation=1, bias=bias)
102
+
103
+
104
+ class HifiGanGenerator(torch.nn.Module):
105
+ def __init__(self, h, c_out=1):
106
+ super(HifiGanGenerator, self).__init__()
107
+ self.h = h
108
+ self.num_kernels = len(h['resblock_kernel_sizes'])
109
+ self.num_upsamples = len(h['upsample_rates'])
110
+
111
+ if h['use_pitch_embed']:
112
+ self.harmonic_num = 8
113
+ self.f0_upsamp = torch.nn.Upsample(scale_factor=np.prod(h['upsample_rates']))
114
+ self.m_source = SourceModuleHnNSF(
115
+ sampling_rate=h['audio_sample_rate'],
116
+ harmonic_num=self.harmonic_num)
117
+ self.noise_convs = nn.ModuleList()
118
+ self.conv_pre = weight_norm(Conv1d(80, h['upsample_initial_channel'], 7, 1, padding=3))
119
+ resblock = ResBlock1 if h['resblock'] == '1' else ResBlock2
120
+
121
+ self.ups = nn.ModuleList()
122
+ for i, (u, k) in enumerate(zip(h['upsample_rates'], h['upsample_kernel_sizes'])):
123
+ c_cur = h['upsample_initial_channel'] // (2 ** (i + 1))
124
+ self.ups.append(weight_norm(
125
+ ConvTranspose1d(c_cur * 2, c_cur, k, u, padding=(k - u) // 2)))
126
+ if h['use_pitch_embed']:
127
+ if i + 1 < len(h['upsample_rates']):
128
+ stride_f0 = np.prod(h['upsample_rates'][i + 1:])
129
+ self.noise_convs.append(Conv1d(
130
+ 1, c_cur, kernel_size=stride_f0 * 2, stride=stride_f0, padding=stride_f0 // 2))
131
+ else:
132
+ self.noise_convs.append(Conv1d(1, c_cur, kernel_size=1))
133
+
134
+ self.resblocks = nn.ModuleList()
135
+ for i in range(len(self.ups)):
136
+ ch = h['upsample_initial_channel'] // (2 ** (i + 1))
137
+ for j, (k, d) in enumerate(zip(h['resblock_kernel_sizes'], h['resblock_dilation_sizes'])):
138
+ self.resblocks.append(resblock(h, ch, k, d))
139
+
140
+ self.conv_post = weight_norm(Conv1d(ch, c_out, 7, 1, padding=3))
141
+ self.ups.apply(init_weights)
142
+ self.conv_post.apply(init_weights)
143
+
144
+ def forward(self, x, f0=None):
145
+ if f0 is not None:
146
+ # harmonic-source signal, noise-source signal, uv flag
147
+ f0 = self.f0_upsamp(f0[:, None]).transpose(1, 2)
148
+ har_source, noi_source, uv = self.m_source(f0)
149
+ har_source = har_source.transpose(1, 2)
150
+
151
+ x = self.conv_pre(x)
152
+ for i in range(self.num_upsamples):
153
+ x = F.leaky_relu(x, LRELU_SLOPE)
154
+ x = self.ups[i](x)
155
+ if f0 is not None:
156
+ x_source = self.noise_convs[i](har_source)
157
+ x = x + x_source
158
+ xs = None
159
+ for j in range(self.num_kernels):
160
+ if xs is None:
161
+ xs = self.resblocks[i * self.num_kernels + j](x)
162
+ else:
163
+ xs += self.resblocks[i * self.num_kernels + j](x)
164
+ x = xs / self.num_kernels
165
+ x = F.leaky_relu(x)
166
+ x = self.conv_post(x)
167
+ x = torch.tanh(x)
168
+
169
+ return x
170
+
171
+ def remove_weight_norm(self):
172
+ print('Removing weight norm...')
173
+ for l in self.ups:
174
+ remove_weight_norm(l)
175
+ for l in self.resblocks:
176
+ l.remove_weight_norm()
177
+ remove_weight_norm(self.conv_pre)
178
+ remove_weight_norm(self.conv_post)
179
+
180
+
181
+ class DiscriminatorP(torch.nn.Module):
182
+ def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False, use_cond=False, c_in=1):
183
+ super(DiscriminatorP, self).__init__()
184
+ self.use_cond = use_cond
185
+ if use_cond:
186
+ from utils.hparams import hparams
187
+ t = hparams['hop_size']
188
+ self.cond_net = torch.nn.ConvTranspose1d(80, 1, t * 2, stride=t, padding=t // 2)
189
+ c_in = 2
190
+
191
+ self.period = period
192
+ norm_f = weight_norm if use_spectral_norm == False else spectral_norm
193
+ self.convs = nn.ModuleList([
194
+ norm_f(Conv2d(c_in, 32, (kernel_size, 1), (stride, 1), padding=(get_padding(5, 1), 0))),
195
+ norm_f(Conv2d(32, 128, (kernel_size, 1), (stride, 1), padding=(get_padding(5, 1), 0))),
196
+ norm_f(Conv2d(128, 512, (kernel_size, 1), (stride, 1), padding=(get_padding(5, 1), 0))),
197
+ norm_f(Conv2d(512, 1024, (kernel_size, 1), (stride, 1), padding=(get_padding(5, 1), 0))),
198
+ norm_f(Conv2d(1024, 1024, (kernel_size, 1), 1, padding=(2, 0))),
199
+ ])
200
+ self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0)))
201
+
202
+ def forward(self, x, mel):
203
+ fmap = []
204
+ if self.use_cond:
205
+ x_mel = self.cond_net(mel)
206
+ x = torch.cat([x_mel, x], 1)
207
+ # 1d to 2d
208
+ b, c, t = x.shape
209
+ if t % self.period != 0: # pad first
210
+ n_pad = self.period - (t % self.period)
211
+ x = F.pad(x, (0, n_pad), "reflect")
212
+ t = t + n_pad
213
+ x = x.view(b, c, t // self.period, self.period)
214
+
215
+ for l in self.convs:
216
+ x = l(x)
217
+ x = F.leaky_relu(x, LRELU_SLOPE)
218
+ fmap.append(x)
219
+ x = self.conv_post(x)
220
+ fmap.append(x)
221
+ x = torch.flatten(x, 1, -1)
222
+
223
+ return x, fmap
224
+
225
+
226
+ class MultiPeriodDiscriminator(torch.nn.Module):
227
+ def __init__(self, use_cond=False, c_in=1):
228
+ super(MultiPeriodDiscriminator, self).__init__()
229
+ self.discriminators = nn.ModuleList([
230
+ DiscriminatorP(2, use_cond=use_cond, c_in=c_in),
231
+ DiscriminatorP(3, use_cond=use_cond, c_in=c_in),
232
+ DiscriminatorP(5, use_cond=use_cond, c_in=c_in),
233
+ DiscriminatorP(7, use_cond=use_cond, c_in=c_in),
234
+ DiscriminatorP(11, use_cond=use_cond, c_in=c_in),
235
+ ])
236
+
237
+ def forward(self, y, y_hat, mel=None):
238
+ y_d_rs = []
239
+ y_d_gs = []
240
+ fmap_rs = []
241
+ fmap_gs = []
242
+ for i, d in enumerate(self.discriminators):
243
+ y_d_r, fmap_r = d(y, mel)
244
+ y_d_g, fmap_g = d(y_hat, mel)
245
+ y_d_rs.append(y_d_r)
246
+ fmap_rs.append(fmap_r)
247
+ y_d_gs.append(y_d_g)
248
+ fmap_gs.append(fmap_g)
249
+
250
+ return y_d_rs, y_d_gs, fmap_rs, fmap_gs
251
+
252
+
253
+ class DiscriminatorS(torch.nn.Module):
254
+ def __init__(self, use_spectral_norm=False, use_cond=False, upsample_rates=None, c_in=1):
255
+ super(DiscriminatorS, self).__init__()
256
+ self.use_cond = use_cond
257
+ if use_cond:
258
+ t = np.prod(upsample_rates)
259
+ self.cond_net = torch.nn.ConvTranspose1d(80, 1, t * 2, stride=t, padding=t // 2)
260
+ c_in = 2
261
+ norm_f = weight_norm if use_spectral_norm == False else spectral_norm
262
+ self.convs = nn.ModuleList([
263
+ norm_f(Conv1d(c_in, 128, 15, 1, padding=7)),
264
+ norm_f(Conv1d(128, 128, 41, 2, groups=4, padding=20)),
265
+ norm_f(Conv1d(128, 256, 41, 2, groups=16, padding=20)),
266
+ norm_f(Conv1d(256, 512, 41, 4, groups=16, padding=20)),
267
+ norm_f(Conv1d(512, 1024, 41, 4, groups=16, padding=20)),
268
+ norm_f(Conv1d(1024, 1024, 41, 1, groups=16, padding=20)),
269
+ norm_f(Conv1d(1024, 1024, 5, 1, padding=2)),
270
+ ])
271
+ self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1))
272
+
273
+ def forward(self, x, mel):
274
+ if self.use_cond:
275
+ x_mel = self.cond_net(mel)
276
+ x = torch.cat([x_mel, x], 1)
277
+ fmap = []
278
+ for l in self.convs:
279
+ x = l(x)
280
+ x = F.leaky_relu(x, LRELU_SLOPE)
281
+ fmap.append(x)
282
+ x = self.conv_post(x)
283
+ fmap.append(x)
284
+ x = torch.flatten(x, 1, -1)
285
+
286
+ return x, fmap
287
+
288
+
289
+ class MultiScaleDiscriminator(torch.nn.Module):
290
+ def __init__(self, use_cond=False, c_in=1):
291
+ super(MultiScaleDiscriminator, self).__init__()
292
+ from utils.hparams import hparams
293
+ self.discriminators = nn.ModuleList([
294
+ DiscriminatorS(use_spectral_norm=True, use_cond=use_cond,
295
+ upsample_rates=[4, 4, hparams['hop_size'] // 16],
296
+ c_in=c_in),
297
+ DiscriminatorS(use_cond=use_cond,
298
+ upsample_rates=[4, 4, hparams['hop_size'] // 32],
299
+ c_in=c_in),
300
+ DiscriminatorS(use_cond=use_cond,
301
+ upsample_rates=[4, 4, hparams['hop_size'] // 64],
302
+ c_in=c_in),
303
+ ])
304
+ self.meanpools = nn.ModuleList([
305
+ AvgPool1d(4, 2, padding=1),
306
+ AvgPool1d(4, 2, padding=1)
307
+ ])
308
+
309
+ def forward(self, y, y_hat, mel=None):
310
+ y_d_rs = []
311
+ y_d_gs = []
312
+ fmap_rs = []
313
+ fmap_gs = []
314
+ for i, d in enumerate(self.discriminators):
315
+ if i != 0:
316
+ y = self.meanpools[i - 1](y)
317
+ y_hat = self.meanpools[i - 1](y_hat)
318
+ y_d_r, fmap_r = d(y, mel)
319
+ y_d_g, fmap_g = d(y_hat, mel)
320
+ y_d_rs.append(y_d_r)
321
+ fmap_rs.append(fmap_r)
322
+ y_d_gs.append(y_d_g)
323
+ fmap_gs.append(fmap_g)
324
+
325
+ return y_d_rs, y_d_gs, fmap_rs, fmap_gs
326
+
327
+
328
+ def feature_loss(fmap_r, fmap_g):
329
+ loss = 0
330
+ for dr, dg in zip(fmap_r, fmap_g):
331
+ for rl, gl in zip(dr, dg):
332
+ loss += torch.mean(torch.abs(rl - gl))
333
+
334
+ return loss * 2
335
+
336
+
337
+ def discriminator_loss(disc_real_outputs, disc_generated_outputs):
338
+ r_losses = 0
339
+ g_losses = 0
340
+ for dr, dg in zip(disc_real_outputs, disc_generated_outputs):
341
+ r_loss = torch.mean((1 - dr) ** 2)
342
+ g_loss = torch.mean(dg ** 2)
343
+ r_losses += r_loss
344
+ g_losses += g_loss
345
+ r_losses = r_losses / len(disc_real_outputs)
346
+ g_losses = g_losses / len(disc_real_outputs)
347
+ return r_losses, g_losses
348
+
349
+
350
+ def cond_discriminator_loss(outputs):
351
+ loss = 0
352
+ for dg in outputs:
353
+ g_loss = torch.mean(dg ** 2)
354
+ loss += g_loss
355
+ loss = loss / len(outputs)
356
+ return loss
357
+
358
+
359
+ def generator_loss(disc_outputs):
360
+ loss = 0
361
+ for dg in disc_outputs:
362
+ l = torch.mean((1 - dg) ** 2)
363
+ loss += l
364
+ loss = loss / len(disc_outputs)
365
+ return loss
modules/hifigan/mel_utils.py ADDED
@@ -0,0 +1,80 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import torch
3
+ import torch.utils.data
4
+ from librosa.filters import mel as librosa_mel_fn
5
+ from scipy.io.wavfile import read
6
+
7
+ MAX_WAV_VALUE = 32768.0
8
+
9
+
10
+ def load_wav(full_path):
11
+ sampling_rate, data = read(full_path)
12
+ return data, sampling_rate
13
+
14
+
15
+ def dynamic_range_compression(x, C=1, clip_val=1e-5):
16
+ return np.log(np.clip(x, a_min=clip_val, a_max=None) * C)
17
+
18
+
19
+ def dynamic_range_decompression(x, C=1):
20
+ return np.exp(x) / C
21
+
22
+
23
+ def dynamic_range_compression_torch(x, C=1, clip_val=1e-5):
24
+ return torch.log(torch.clamp(x, min=clip_val) * C)
25
+
26
+
27
+ def dynamic_range_decompression_torch(x, C=1):
28
+ return torch.exp(x) / C
29
+
30
+
31
+ def spectral_normalize_torch(magnitudes):
32
+ output = dynamic_range_compression_torch(magnitudes)
33
+ return output
34
+
35
+
36
+ def spectral_de_normalize_torch(magnitudes):
37
+ output = dynamic_range_decompression_torch(magnitudes)
38
+ return output
39
+
40
+
41
+ mel_basis = {}
42
+ hann_window = {}
43
+
44
+
45
+ def mel_spectrogram(y, hparams, center=False, complex=False):
46
+ # hop_size: 512 # For 22050Hz, 275 ~= 12.5 ms (0.0125 * sample_rate)
47
+ # win_size: 2048 # For 22050Hz, 1100 ~= 50 ms (If None, win_size: fft_size) (0.05 * sample_rate)
48
+ # fmin: 55 # Set this to 55 if your speaker is male! if female, 95 should help taking off noise. (To test depending on dataset. Pitch info: male~[65, 260], female~[100, 525])
49
+ # fmax: 10000 # To be increased/reduced depending on data.
50
+ # fft_size: 2048 # Extra window size is filled with 0 paddings to match this parameter
51
+ # n_fft, num_mels, sampling_rate, hop_size, win_size, fmin, fmax,
52
+ n_fft = hparams['fft_size']
53
+ num_mels = hparams['audio_num_mel_bins']
54
+ sampling_rate = hparams['audio_sample_rate']
55
+ hop_size = hparams['hop_size']
56
+ win_size = hparams['win_size']
57
+ fmin = hparams['fmin']
58
+ fmax = hparams['fmax']
59
+ y = y.clamp(min=-1., max=1.)
60
+ global mel_basis, hann_window
61
+ if fmax not in mel_basis:
62
+ mel = librosa_mel_fn(sampling_rate, n_fft, num_mels, fmin, fmax)
63
+ mel_basis[str(fmax) + '_' + str(y.device)] = torch.from_numpy(mel).float().to(y.device)
64
+ hann_window[str(y.device)] = torch.hann_window(win_size).to(y.device)
65
+
66
+ y = torch.nn.functional.pad(y.unsqueeze(1), (int((n_fft - hop_size) / 2), int((n_fft - hop_size) / 2)),
67
+ mode='reflect')
68
+ y = y.squeeze(1)
69
+
70
+ spec = torch.stft(y, n_fft, hop_length=hop_size, win_length=win_size, window=hann_window[str(y.device)],
71
+ center=center, pad_mode='reflect', normalized=False, onesided=True)
72
+
73
+ if not complex:
74
+ spec = torch.sqrt(spec.pow(2).sum(-1) + (1e-9))
75
+ spec = torch.matmul(mel_basis[str(fmax) + '_' + str(y.device)], spec)
76
+ spec = spectral_normalize_torch(spec)
77
+ else:
78
+ B, C, T, _ = spec.shape
79
+ spec = spec.transpose(1, 2) # [B, T, n_fft, 2]
80
+ return spec
modules/nsf_hifigan/__pycache__/env.cpython-38.pyc ADDED
Binary file (793 Bytes). View file
 
modules/nsf_hifigan/__pycache__/models.cpython-38.pyc ADDED
Binary file (16.3 kB). View file
 
modules/nsf_hifigan/__pycache__/nvSTFT.cpython-38.pyc ADDED
Binary file (3.93 kB). View file
 
modules/nsf_hifigan/__pycache__/utils.cpython-38.pyc ADDED
Binary file (2.34 kB). View file
 
modules/nsf_hifigan/env.py ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import shutil
3
+
4
+
5
+ class AttrDict(dict):
6
+ def __init__(self, *args, **kwargs):
7
+ super(AttrDict, self).__init__(*args, **kwargs)
8
+ self.__dict__ = self
9
+
10
+
11
+ def build_env(config, config_name, path):
12
+ t_path = os.path.join(path, config_name)
13
+ if config != t_path:
14
+ os.makedirs(path, exist_ok=True)
15
+ shutil.copyfile(config, os.path.join(path, config_name))
modules/nsf_hifigan/models.py ADDED
@@ -0,0 +1,549 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import json
3
+ from .env import AttrDict
4
+ import numpy as np
5
+ import torch
6
+ import torch.nn.functional as F
7
+ import torch.nn as nn
8
+ from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d
9
+ from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm
10
+ from .utils import init_weights, get_padding
11
+
12
+ LRELU_SLOPE = 0.1
13
+
14
+ def load_model(model_path, device='cuda'):
15
+ config_file = os.path.join(os.path.split(model_path)[0], 'config.json')
16
+ with open(config_file) as f:
17
+ data = f.read()
18
+
19
+ global h
20
+ json_config = json.loads(data)
21
+ h = AttrDict(json_config)
22
+
23
+ generator = Generator(h).to(device)
24
+
25
+ cp_dict = torch.load(model_path)
26
+ generator.load_state_dict(cp_dict['generator'])
27
+ generator.eval()
28
+ generator.remove_weight_norm()
29
+ del cp_dict
30
+ return generator, h
31
+
32
+
33
+ class ResBlock1(torch.nn.Module):
34
+ def __init__(self, h, channels, kernel_size=3, dilation=(1, 3, 5)):
35
+ super(ResBlock1, self).__init__()
36
+ self.h = h
37
+ self.convs1 = nn.ModuleList([
38
+ weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0],
39
+ padding=get_padding(kernel_size, dilation[0]))),
40
+ weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1],
41
+ padding=get_padding(kernel_size, dilation[1]))),
42
+ weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[2],
43
+ padding=get_padding(kernel_size, dilation[2])))
44
+ ])
45
+ self.convs1.apply(init_weights)
46
+
47
+ self.convs2 = nn.ModuleList([
48
+ weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1,
49
+ padding=get_padding(kernel_size, 1))),
50
+ weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1,
51
+ padding=get_padding(kernel_size, 1))),
52
+ weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1,
53
+ padding=get_padding(kernel_size, 1)))
54
+ ])
55
+ self.convs2.apply(init_weights)
56
+
57
+ def forward(self, x):
58
+ for c1, c2 in zip(self.convs1, self.convs2):
59
+ xt = F.leaky_relu(x, LRELU_SLOPE)
60
+ xt = c1(xt)
61
+ xt = F.leaky_relu(xt, LRELU_SLOPE)
62
+ xt = c2(xt)
63
+ x = xt + x
64
+ return x
65
+
66
+ def remove_weight_norm(self):
67
+ for l in self.convs1:
68
+ remove_weight_norm(l)
69
+ for l in self.convs2:
70
+ remove_weight_norm(l)
71
+
72
+
73
+ class ResBlock2(torch.nn.Module):
74
+ def __init__(self, h, channels, kernel_size=3, dilation=(1, 3)):
75
+ super(ResBlock2, self).__init__()
76
+ self.h = h
77
+ self.convs = nn.ModuleList([
78
+ weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0],
79
+ padding=get_padding(kernel_size, dilation[0]))),
80
+ weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1],
81
+ padding=get_padding(kernel_size, dilation[1])))
82
+ ])
83
+ self.convs.apply(init_weights)
84
+
85
+ def forward(self, x):
86
+ for c in self.convs:
87
+ xt = F.leaky_relu(x, LRELU_SLOPE)
88
+ xt = c(xt)
89
+ x = xt + x
90
+ return x
91
+
92
+ def remove_weight_norm(self):
93
+ for l in self.convs:
94
+ remove_weight_norm(l)
95
+
96
+
97
+ class Generator(torch.nn.Module):
98
+ def __init__(self, h):
99
+ super(Generator, self).__init__()
100
+ self.h = h
101
+ self.num_kernels = len(h.resblock_kernel_sizes)
102
+ self.num_upsamples = len(h.upsample_rates)
103
+ self.conv_pre = weight_norm(Conv1d(h.num_mels, h.upsample_initial_channel, 7, 1, padding=3))
104
+ resblock = ResBlock1 if h.resblock == '1' else ResBlock2
105
+
106
+ self.ups = nn.ModuleList()
107
+ for i, (u, k) in enumerate(zip(h.upsample_rates, h.upsample_kernel_sizes)):
108
+ self.ups.append(weight_norm(
109
+ ConvTranspose1d(h.upsample_initial_channel//(2**i), h.upsample_initial_channel//(2**(i+1)),
110
+ k, u, padding=(k-u)//2)))
111
+
112
+ self.resblocks = nn.ModuleList()
113
+ for i in range(len(self.ups)):
114
+ ch = h.upsample_initial_channel//(2**(i+1))
115
+ for j, (k, d) in enumerate(zip(h.resblock_kernel_sizes, h.resblock_dilation_sizes)):
116
+ self.resblocks.append(resblock(h, ch, k, d))
117
+
118
+ self.conv_post = weight_norm(Conv1d(ch, 1, 7, 1, padding=3))
119
+ self.ups.apply(init_weights)
120
+ self.conv_post.apply(init_weights)
121
+
122
+ def forward(self, x):
123
+ x = self.conv_pre(x)
124
+ for i in range(self.num_upsamples):
125
+ x = F.leaky_relu(x, LRELU_SLOPE)
126
+ x = self.ups[i](x)
127
+ xs = None
128
+ for j in range(self.num_kernels):
129
+ if xs is None:
130
+ xs = self.resblocks[i*self.num_kernels+j](x)
131
+ else:
132
+ xs += self.resblocks[i*self.num_kernels+j](x)
133
+ x = xs / self.num_kernels
134
+ x = F.leaky_relu(x)
135
+ x = self.conv_post(x)
136
+ x = torch.tanh(x)
137
+
138
+ return x
139
+
140
+ def remove_weight_norm(self):
141
+ print('Removing weight norm...')
142
+ for l in self.ups:
143
+ remove_weight_norm(l)
144
+ for l in self.resblocks:
145
+ l.remove_weight_norm()
146
+ remove_weight_norm(self.conv_pre)
147
+ remove_weight_norm(self.conv_post)
148
+ class SineGen(torch.nn.Module):
149
+ """ Definition of sine generator
150
+ SineGen(samp_rate, harmonic_num = 0,
151
+ sine_amp = 0.1, noise_std = 0.003,
152
+ voiced_threshold = 0,
153
+ flag_for_pulse=False)
154
+ samp_rate: sampling rate in Hz
155
+ harmonic_num: number of harmonic overtones (default 0)
156
+ sine_amp: amplitude of sine-wavefrom (default 0.1)
157
+ noise_std: std of Gaussian noise (default 0.003)
158
+ voiced_thoreshold: F0 threshold for U/V classification (default 0)
159
+ flag_for_pulse: this SinGen is used inside PulseGen (default False)
160
+ Note: when flag_for_pulse is True, the first time step of a voiced
161
+ segment is always sin(np.pi) or cos(0)
162
+ """
163
+
164
+ def __init__(self, samp_rate, harmonic_num=0,
165
+ sine_amp=0.1, noise_std=0.003,
166
+ voiced_threshold=0,
167
+ flag_for_pulse=False):
168
+ super(SineGen, self).__init__()
169
+ self.sine_amp = sine_amp
170
+ self.noise_std = noise_std
171
+ self.harmonic_num = harmonic_num
172
+ self.dim = self.harmonic_num + 1
173
+ self.sampling_rate = samp_rate
174
+ self.voiced_threshold = voiced_threshold
175
+ self.flag_for_pulse = flag_for_pulse
176
+
177
+ def _f02uv(self, f0):
178
+ # generate uv signal
179
+ uv = torch.ones_like(f0)
180
+ uv = uv * (f0 > self.voiced_threshold)
181
+ return uv
182
+
183
+ def _f02sine(self, f0_values):
184
+ """ f0_values: (batchsize, length, dim)
185
+ where dim indicates fundamental tone and overtones
186
+ """
187
+ # convert to F0 in rad. The interger part n can be ignored
188
+ # because 2 * np.pi * n doesn't affect phase
189
+ rad_values = (f0_values / self.sampling_rate) % 1
190
+
191
+ # initial phase noise (no noise for fundamental component)
192
+ rand_ini = torch.rand(f0_values.shape[0], f0_values.shape[2], \
193
+ device=f0_values.device)
194
+ rand_ini[:, 0] = 0
195
+ rad_values[:, 0, :] = rad_values[:, 0, :] + rand_ini
196
+
197
+ # instantanouse phase sine[t] = sin(2*pi \sum_i=1 ^{t} rad)
198
+ if not self.flag_for_pulse:
199
+ # for normal case
200
+
201
+ # To prevent torch.cumsum numerical overflow,
202
+ # it is necessary to add -1 whenever \sum_k=1^n rad_value_k > 1.
203
+ # Buffer tmp_over_one_idx indicates the time step to add -1.
204
+ # This will not change F0 of sine because (x-1) * 2*pi = x * 2*pi
205
+ tmp_over_one = torch.cumsum(rad_values, 1) % 1
206
+ tmp_over_one_idx = (tmp_over_one[:, 1:, :] -
207
+ tmp_over_one[:, :-1, :]) < 0
208
+ cumsum_shift = torch.zeros_like(rad_values)
209
+ cumsum_shift[:, 1:, :] = tmp_over_one_idx * -1.0
210
+
211
+ sines = torch.sin(torch.cumsum(rad_values + cumsum_shift, dim=1)
212
+ * 2 * np.pi)
213
+ else:
214
+ # If necessary, make sure that the first time step of every
215
+ # voiced segments is sin(pi) or cos(0)
216
+ # This is used for pulse-train generation
217
+
218
+ # identify the last time step in unvoiced segments
219
+ uv = self._f02uv(f0_values)
220
+ uv_1 = torch.roll(uv, shifts=-1, dims=1)
221
+ uv_1[:, -1, :] = 1
222
+ u_loc = (uv < 1) * (uv_1 > 0)
223
+
224
+ # get the instantanouse phase
225
+ tmp_cumsum = torch.cumsum(rad_values, dim=1)
226
+ # different batch needs to be processed differently
227
+ for idx in range(f0_values.shape[0]):
228
+ temp_sum = tmp_cumsum[idx, u_loc[idx, :, 0], :]
229
+ temp_sum[1:, :] = temp_sum[1:, :] - temp_sum[0:-1, :]
230
+ # stores the accumulation of i.phase within
231
+ # each voiced segments
232
+ tmp_cumsum[idx, :, :] = 0
233
+ tmp_cumsum[idx, u_loc[idx, :, 0], :] = temp_sum
234
+
235
+ # rad_values - tmp_cumsum: remove the accumulation of i.phase
236
+ # within the previous voiced segment.
237
+ i_phase = torch.cumsum(rad_values - tmp_cumsum, dim=1)
238
+
239
+ # get the sines
240
+ sines = torch.cos(i_phase * 2 * np.pi)
241
+ return sines
242
+
243
+ def forward(self, f0):
244
+ """ sine_tensor, uv = forward(f0)
245
+ input F0: tensor(batchsize=1, length, dim=1)
246
+ f0 for unvoiced steps should be 0
247
+ output sine_tensor: tensor(batchsize=1, length, dim)
248
+ output uv: tensor(batchsize=1, length, 1)
249
+ """
250
+ with torch.no_grad():
251
+ f0_buf = torch.zeros(f0.shape[0], f0.shape[1], self.dim,
252
+ device=f0.device)
253
+ # fundamental component
254
+ f0_buf[:, :, 0] = f0[:, :, 0]
255
+ for idx in np.arange(self.harmonic_num):
256
+ # idx + 2: the (idx+1)-th overtone, (idx+2)-th harmonic
257
+ f0_buf[:, :, idx + 1] = f0_buf[:, :, 0] * (idx + 2)
258
+
259
+ # generate sine waveforms
260
+ sine_waves = self._f02sine(f0_buf) * self.sine_amp
261
+
262
+ # generate uv signal
263
+ # uv = torch.ones(f0.shape)
264
+ # uv = uv * (f0 > self.voiced_threshold)
265
+ uv = self._f02uv(f0)
266
+
267
+ # noise: for unvoiced should be similar to sine_amp
268
+ # std = self.sine_amp/3 -> max value ~ self.sine_amp
269
+ # . for voiced regions is self.noise_std
270
+ noise_amp = uv * self.noise_std + (1 - uv) * self.sine_amp / 3
271
+ noise = noise_amp * torch.randn_like(sine_waves)
272
+
273
+ # first: set the unvoiced part to 0 by uv
274
+ # then: additive noise
275
+ sine_waves = sine_waves * uv + noise
276
+ return sine_waves, uv, noise
277
+ class SourceModuleHnNSF(torch.nn.Module):
278
+ """ SourceModule for hn-nsf
279
+ SourceModule(sampling_rate, harmonic_num=0, sine_amp=0.1,
280
+ add_noise_std=0.003, voiced_threshod=0)
281
+ sampling_rate: sampling_rate in Hz
282
+ harmonic_num: number of harmonic above F0 (default: 0)
283
+ sine_amp: amplitude of sine source signal (default: 0.1)
284
+ add_noise_std: std of additive Gaussian noise (default: 0.003)
285
+ note that amplitude of noise in unvoiced is decided
286
+ by sine_amp
287
+ voiced_threshold: threhold to set U/V given F0 (default: 0)
288
+ Sine_source, noise_source = SourceModuleHnNSF(F0_sampled)
289
+ F0_sampled (batchsize, length, 1)
290
+ Sine_source (batchsize, length, 1)
291
+ noise_source (batchsize, length 1)
292
+ uv (batchsize, length, 1)
293
+ """
294
+
295
+ def __init__(self, sampling_rate, harmonic_num=0, sine_amp=0.1,
296
+ add_noise_std=0.003, voiced_threshod=0):
297
+ super(SourceModuleHnNSF, self).__init__()
298
+
299
+ self.sine_amp = sine_amp
300
+ self.noise_std = add_noise_std
301
+
302
+ # to produce sine waveforms
303
+ self.l_sin_gen = SineGen(sampling_rate, harmonic_num,
304
+ sine_amp, add_noise_std, voiced_threshod)
305
+
306
+ # to merge source harmonics into a single excitation
307
+ self.l_linear = torch.nn.Linear(harmonic_num + 1, 1)
308
+ self.l_tanh = torch.nn.Tanh()
309
+
310
+ def forward(self, x):
311
+ """
312
+ Sine_source, noise_source = SourceModuleHnNSF(F0_sampled)
313
+ F0_sampled (batchsize, length, 1)
314
+ Sine_source (batchsize, length, 1)
315
+ noise_source (batchsize, length 1)
316
+ """
317
+ # source for harmonic branch
318
+ sine_wavs, uv, _ = self.l_sin_gen(x)
319
+ sine_merge = self.l_tanh(self.l_linear(sine_wavs))
320
+
321
+ # source for noise branch, in the same shape as uv
322
+ noise = torch.randn_like(uv) * self.sine_amp / 3
323
+ return sine_merge, noise, uv
324
+
325
+ class Generator(torch.nn.Module):
326
+ def __init__(self, h):
327
+ super(Generator, self).__init__()
328
+ self.h = h
329
+ self.num_kernels = len(h.resblock_kernel_sizes)
330
+ self.num_upsamples = len(h.upsample_rates)
331
+ self.f0_upsamp = torch.nn.Upsample(scale_factor=np.prod(h.upsample_rates))
332
+ self.m_source = SourceModuleHnNSF(
333
+ sampling_rate=h.sampling_rate,
334
+ harmonic_num=8)
335
+ self.noise_convs = nn.ModuleList()
336
+ self.conv_pre = weight_norm(Conv1d(h.num_mels, h.upsample_initial_channel, 7, 1, padding=3))
337
+ resblock = ResBlock1 if h.resblock == '1' else ResBlock2
338
+
339
+ self.ups = nn.ModuleList()
340
+ for i, (u, k) in enumerate(zip(h.upsample_rates, h.upsample_kernel_sizes)):
341
+ c_cur = h.upsample_initial_channel // (2 ** (i + 1))
342
+ self.ups.append(weight_norm(
343
+ ConvTranspose1d(h.upsample_initial_channel//(2**i), h.upsample_initial_channel//(2**(i+1)),
344
+ k, u, padding=(k-u)//2)))
345
+ if i + 1 < len(h.upsample_rates):#
346
+ stride_f0 = np.prod(h.upsample_rates[i + 1:])
347
+ self.noise_convs.append(Conv1d(
348
+ 1, c_cur, kernel_size=stride_f0 * 2, stride=stride_f0, padding=stride_f0 // 2))
349
+ else:
350
+ self.noise_convs.append(Conv1d(1, c_cur, kernel_size=1))
351
+ self.resblocks = nn.ModuleList()
352
+ for i in range(len(self.ups)):
353
+ ch = h.upsample_initial_channel//(2**(i+1))
354
+ for j, (k, d) in enumerate(zip(h.resblock_kernel_sizes, h.resblock_dilation_sizes)):
355
+ self.resblocks.append(resblock(h, ch, k, d))
356
+
357
+ self.conv_post = weight_norm(Conv1d(ch, 1, 7, 1, padding=3))
358
+ self.ups.apply(init_weights)
359
+ self.conv_post.apply(init_weights)
360
+
361
+ def forward(self, x,f0):
362
+ # print(1,x.shape,f0.shape,f0[:, None].shape)
363
+ f0 = self.f0_upsamp(f0[:, None]).transpose(1, 2)#bs,n,t
364
+ # print(2,f0.shape)
365
+ har_source, noi_source, uv = self.m_source(f0)
366
+ har_source = har_source.transpose(1, 2)
367
+ x = self.conv_pre(x)
368
+ # print(124,x.shape,har_source.shape)
369
+ for i in range(self.num_upsamples):
370
+ x = F.leaky_relu(x, LRELU_SLOPE)
371
+ # print(3,x.shape)
372
+ x = self.ups[i](x)
373
+ x_source = self.noise_convs[i](har_source)
374
+ # print(4,x_source.shape,har_source.shape,x.shape)
375
+ x = x + x_source
376
+ xs = None
377
+ for j in range(self.num_kernels):
378
+ if xs is None:
379
+ xs = self.resblocks[i*self.num_kernels+j](x)
380
+ else:
381
+ xs += self.resblocks[i*self.num_kernels+j](x)
382
+ x = xs / self.num_kernels
383
+ x = F.leaky_relu(x)
384
+ x = self.conv_post(x)
385
+ x = torch.tanh(x)
386
+
387
+ return x
388
+
389
+ def remove_weight_norm(self):
390
+ print('Removing weight norm...')
391
+ for l in self.ups:
392
+ remove_weight_norm(l)
393
+ for l in self.resblocks:
394
+ l.remove_weight_norm()
395
+ remove_weight_norm(self.conv_pre)
396
+ remove_weight_norm(self.conv_post)
397
+
398
+ class DiscriminatorP(torch.nn.Module):
399
+ def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False):
400
+ super(DiscriminatorP, self).__init__()
401
+ self.period = period
402
+ norm_f = weight_norm if use_spectral_norm == False else spectral_norm
403
+ self.convs = nn.ModuleList([
404
+ norm_f(Conv2d(1, 32, (kernel_size, 1), (stride, 1), padding=(get_padding(5, 1), 0))),
405
+ norm_f(Conv2d(32, 128, (kernel_size, 1), (stride, 1), padding=(get_padding(5, 1), 0))),
406
+ norm_f(Conv2d(128, 512, (kernel_size, 1), (stride, 1), padding=(get_padding(5, 1), 0))),
407
+ norm_f(Conv2d(512, 1024, (kernel_size, 1), (stride, 1), padding=(get_padding(5, 1), 0))),
408
+ norm_f(Conv2d(1024, 1024, (kernel_size, 1), 1, padding=(2, 0))),
409
+ ])
410
+ self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0)))
411
+
412
+ def forward(self, x):
413
+ fmap = []
414
+
415
+ # 1d to 2d
416
+ b, c, t = x.shape
417
+ if t % self.period != 0: # pad first
418
+ n_pad = self.period - (t % self.period)
419
+ x = F.pad(x, (0, n_pad), "reflect")
420
+ t = t + n_pad
421
+ x = x.view(b, c, t // self.period, self.period)
422
+
423
+ for l in self.convs:
424
+ x = l(x)
425
+ x = F.leaky_relu(x, LRELU_SLOPE)
426
+ fmap.append(x)
427
+ x = self.conv_post(x)
428
+ fmap.append(x)
429
+ x = torch.flatten(x, 1, -1)
430
+
431
+ return x, fmap
432
+
433
+
434
+ class MultiPeriodDiscriminator(torch.nn.Module):
435
+ def __init__(self, periods=None):
436
+ super(MultiPeriodDiscriminator, self).__init__()
437
+ self.periods = periods if periods is not None else [2, 3, 5, 7, 11]
438
+ self.discriminators = nn.ModuleList()
439
+ for period in self.periods:
440
+ self.discriminators.append(DiscriminatorP(period))
441
+
442
+ def forward(self, y, y_hat):
443
+ y_d_rs = []
444
+ y_d_gs = []
445
+ fmap_rs = []
446
+ fmap_gs = []
447
+ for i, d in enumerate(self.discriminators):
448
+ y_d_r, fmap_r = d(y)
449
+ y_d_g, fmap_g = d(y_hat)
450
+ y_d_rs.append(y_d_r)
451
+ fmap_rs.append(fmap_r)
452
+ y_d_gs.append(y_d_g)
453
+ fmap_gs.append(fmap_g)
454
+
455
+ return y_d_rs, y_d_gs, fmap_rs, fmap_gs
456
+
457
+
458
+ class DiscriminatorS(torch.nn.Module):
459
+ def __init__(self, use_spectral_norm=False):
460
+ super(DiscriminatorS, self).__init__()
461
+ norm_f = weight_norm if use_spectral_norm == False else spectral_norm
462
+ self.convs = nn.ModuleList([
463
+ norm_f(Conv1d(1, 128, 15, 1, padding=7)),
464
+ norm_f(Conv1d(128, 128, 41, 2, groups=4, padding=20)),
465
+ norm_f(Conv1d(128, 256, 41, 2, groups=16, padding=20)),
466
+ norm_f(Conv1d(256, 512, 41, 4, groups=16, padding=20)),
467
+ norm_f(Conv1d(512, 1024, 41, 4, groups=16, padding=20)),
468
+ norm_f(Conv1d(1024, 1024, 41, 1, groups=16, padding=20)),
469
+ norm_f(Conv1d(1024, 1024, 5, 1, padding=2)),
470
+ ])
471
+ self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1))
472
+
473
+ def forward(self, x):
474
+ fmap = []
475
+ for l in self.convs:
476
+ x = l(x)
477
+ x = F.leaky_relu(x, LRELU_SLOPE)
478
+ fmap.append(x)
479
+ x = self.conv_post(x)
480
+ fmap.append(x)
481
+ x = torch.flatten(x, 1, -1)
482
+
483
+ return x, fmap
484
+
485
+
486
+ class MultiScaleDiscriminator(torch.nn.Module):
487
+ def __init__(self):
488
+ super(MultiScaleDiscriminator, self).__init__()
489
+ self.discriminators = nn.ModuleList([
490
+ DiscriminatorS(use_spectral_norm=True),
491
+ DiscriminatorS(),
492
+ DiscriminatorS(),
493
+ ])
494
+ self.meanpools = nn.ModuleList([
495
+ AvgPool1d(4, 2, padding=2),
496
+ AvgPool1d(4, 2, padding=2)
497
+ ])
498
+
499
+ def forward(self, y, y_hat):
500
+ y_d_rs = []
501
+ y_d_gs = []
502
+ fmap_rs = []
503
+ fmap_gs = []
504
+ for i, d in enumerate(self.discriminators):
505
+ if i != 0:
506
+ y = self.meanpools[i-1](y)
507
+ y_hat = self.meanpools[i-1](y_hat)
508
+ y_d_r, fmap_r = d(y)
509
+ y_d_g, fmap_g = d(y_hat)
510
+ y_d_rs.append(y_d_r)
511
+ fmap_rs.append(fmap_r)
512
+ y_d_gs.append(y_d_g)
513
+ fmap_gs.append(fmap_g)
514
+
515
+ return y_d_rs, y_d_gs, fmap_rs, fmap_gs
516
+
517
+
518
+ def feature_loss(fmap_r, fmap_g):
519
+ loss = 0
520
+ for dr, dg in zip(fmap_r, fmap_g):
521
+ for rl, gl in zip(dr, dg):
522
+ loss += torch.mean(torch.abs(rl - gl))
523
+
524
+ return loss*2
525
+
526
+
527
+ def discriminator_loss(disc_real_outputs, disc_generated_outputs):
528
+ loss = 0
529
+ r_losses = []
530
+ g_losses = []
531
+ for dr, dg in zip(disc_real_outputs, disc_generated_outputs):
532
+ r_loss = torch.mean((1-dr)**2)
533
+ g_loss = torch.mean(dg**2)
534
+ loss += (r_loss + g_loss)
535
+ r_losses.append(r_loss.item())
536
+ g_losses.append(g_loss.item())
537
+
538
+ return loss, r_losses, g_losses
539
+
540
+
541
+ def generator_loss(disc_outputs):
542
+ loss = 0
543
+ gen_losses = []
544
+ for dg in disc_outputs:
545
+ l = torch.mean((1-dg)**2)
546
+ gen_losses.append(l)
547
+ loss += l
548
+
549
+ return loss, gen_losses
modules/nsf_hifigan/nvSTFT.py ADDED
@@ -0,0 +1,111 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+ import os
3
+ os.environ["LRU_CACHE_CAPACITY"] = "3"
4
+ import random
5
+ import torch
6
+ import torch.utils.data
7
+ import numpy as np
8
+ import librosa
9
+ from librosa.util import normalize
10
+ from librosa.filters import mel as librosa_mel_fn
11
+ from scipy.io.wavfile import read
12
+ import soundfile as sf
13
+
14
+ def load_wav_to_torch(full_path, target_sr=None, return_empty_on_exception=False):
15
+ sampling_rate = None
16
+ try:
17
+ data, sampling_rate = sf.read(full_path, always_2d=True)# than soundfile.
18
+ except Exception as ex:
19
+ print(f"'{full_path}' failed to load.\nException:")
20
+ print(ex)
21
+ if return_empty_on_exception:
22
+ return [], sampling_rate or target_sr or 48000
23
+ else:
24
+ raise Exception(ex)
25
+
26
+ if len(data.shape) > 1:
27
+ data = data[:, 0]
28
+ assert len(data) > 2# check duration of audio file is > 2 samples (because otherwise the slice operation was on the wrong dimension)
29
+
30
+ if np.issubdtype(data.dtype, np.integer): # if audio data is type int
31
+ max_mag = -np.iinfo(data.dtype).min # maximum magnitude = min possible value of intXX
32
+ else: # if audio data is type fp32
33
+ max_mag = max(np.amax(data), -np.amin(data))
34
+ max_mag = (2**31)+1 if max_mag > (2**15) else ((2**15)+1 if max_mag > 1.01 else 1.0) # data should be either 16-bit INT, 32-bit INT or [-1 to 1] float32
35
+
36
+ data = torch.FloatTensor(data.astype(np.float32))/max_mag
37
+
38
+ if (torch.isinf(data) | torch.isnan(data)).any() and return_empty_on_exception:# resample will crash with inf/NaN inputs. return_empty_on_exception will return empty arr instead of except
39
+ return [], sampling_rate or target_sr or 48000
40
+ if target_sr is not None and sampling_rate != target_sr:
41
+ data = torch.from_numpy(librosa.core.resample(data.numpy(), orig_sr=sampling_rate, target_sr=target_sr))
42
+ sampling_rate = target_sr
43
+
44
+ return data, sampling_rate
45
+
46
+ def dynamic_range_compression(x, C=1, clip_val=1e-5):
47
+ return np.log(np.clip(x, a_min=clip_val, a_max=None) * C)
48
+
49
+ def dynamic_range_decompression(x, C=1):
50
+ return np.exp(x) / C
51
+
52
+ def dynamic_range_compression_torch(x, C=1, clip_val=1e-5):
53
+ return torch.log(torch.clamp(x, min=clip_val) * C)
54
+
55
+ def dynamic_range_decompression_torch(x, C=1):
56
+ return torch.exp(x) / C
57
+
58
+ class STFT():
59
+ def __init__(self, sr=22050, n_mels=80, n_fft=1024, win_size=1024, hop_length=256, fmin=20, fmax=11025, clip_val=1e-5):
60
+ self.target_sr = sr
61
+
62
+ self.n_mels = n_mels
63
+ self.n_fft = n_fft
64
+ self.win_size = win_size
65
+ self.hop_length = hop_length
66
+ self.fmin = fmin
67
+ self.fmax = fmax
68
+ self.clip_val = clip_val
69
+ self.mel_basis = {}
70
+ self.hann_window = {}
71
+
72
+ def get_mel(self, y, center=False):
73
+ sampling_rate = self.target_sr
74
+ n_mels = self.n_mels
75
+ n_fft = self.n_fft
76
+ win_size = self.win_size
77
+ hop_length = self.hop_length
78
+ fmin = self.fmin
79
+ fmax = self.fmax
80
+ clip_val = self.clip_val
81
+
82
+ if torch.min(y) < -1.:
83
+ print('min value is ', torch.min(y))
84
+ if torch.max(y) > 1.:
85
+ print('max value is ', torch.max(y))
86
+
87
+ if fmax not in self.mel_basis:
88
+ mel = librosa_mel_fn(sr=sampling_rate, n_fft=n_fft, n_mels=n_mels, fmin=fmin, fmax=fmax)
89
+ self.mel_basis[str(fmax)+'_'+str(y.device)] = torch.from_numpy(mel).float().to(y.device)
90
+ self.hann_window[str(y.device)] = torch.hann_window(self.win_size).to(y.device)
91
+
92
+ y = torch.nn.functional.pad(y.unsqueeze(1), (int((n_fft-hop_length)/2), int((n_fft-hop_length)/2)), mode='reflect')
93
+ y = y.squeeze(1)
94
+
95
+ spec = torch.stft(y, n_fft, hop_length=hop_length, win_length=win_size, window=self.hann_window[str(y.device)],
96
+ center=center, pad_mode='reflect', normalized=False, onesided=True)
97
+ # print(111,spec)
98
+ spec = torch.sqrt(spec.pow(2).sum(-1)+(1e-9))
99
+ # print(222,spec)
100
+ spec = torch.matmul(self.mel_basis[str(fmax)+'_'+str(y.device)], spec)
101
+ # print(333,spec)
102
+ spec = dynamic_range_compression_torch(spec, clip_val=clip_val)
103
+ # print(444,spec)
104
+ return spec
105
+
106
+ def __call__(self, audiopath):
107
+ audio, sr = load_wav_to_torch(audiopath, target_sr=self.target_sr)
108
+ spect = self.get_mel(audio.unsqueeze(0)).squeeze(0)
109
+ return spect
110
+
111
+ stft = STFT()
modules/nsf_hifigan/utils.py ADDED
@@ -0,0 +1,67 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import glob
2
+ import os
3
+ import matplotlib
4
+ import torch
5
+ from torch.nn.utils import weight_norm
6
+ matplotlib.use("Agg")
7
+ import matplotlib.pylab as plt
8
+
9
+
10
+ def plot_spectrogram(spectrogram):
11
+ fig, ax = plt.subplots(figsize=(10, 2))
12
+ im = ax.imshow(spectrogram, aspect="auto", origin="lower",
13
+ interpolation='none')
14
+ plt.colorbar(im, ax=ax)
15
+
16
+ fig.canvas.draw()
17
+ plt.close()
18
+
19
+ return fig
20
+
21
+
22
+ def init_weights(m, mean=0.0, std=0.01):
23
+ classname = m.__class__.__name__
24
+ if classname.find("Conv") != -1:
25
+ m.weight.data.normal_(mean, std)
26
+
27
+
28
+ def apply_weight_norm(m):
29
+ classname = m.__class__.__name__
30
+ if classname.find("Conv") != -1:
31
+ weight_norm(m)
32
+
33
+
34
+ def get_padding(kernel_size, dilation=1):
35
+ return int((kernel_size*dilation - dilation)/2)
36
+
37
+
38
+ def load_checkpoint(filepath, device):
39
+ assert os.path.isfile(filepath)
40
+ print("Loading '{}'".format(filepath))
41
+ checkpoint_dict = torch.load(filepath, map_location=device)
42
+ print("Complete.")
43
+ return checkpoint_dict
44
+
45
+
46
+ def save_checkpoint(filepath, obj):
47
+ print("Saving checkpoint to {}".format(filepath))
48
+ torch.save(obj, filepath)
49
+ print("Complete.")
50
+
51
+
52
+ def del_old_checkpoints(cp_dir, prefix, n_models=2):
53
+ pattern = os.path.join(cp_dir, prefix + '????????')
54
+ cp_list = glob.glob(pattern) # get checkpoint paths
55
+ cp_list = sorted(cp_list)# sort by iter
56
+ if len(cp_list) > n_models: # if more than n_models models are found
57
+ for cp in cp_list[:-n_models]:# delete the oldest models other than lastest n_models
58
+ open(cp, 'w').close()# empty file contents
59
+ os.unlink(cp)# delete file (move to trash when using Colab)
60
+
61
+
62
+ def scan_checkpoint(cp_dir, prefix):
63
+ pattern = os.path.join(cp_dir, prefix + '????????')
64
+ cp_list = glob.glob(pattern)
65
+ if len(cp_list) == 0:
66
+ return None
67
+ return sorted(cp_list)[-1]
modules/parallel_wavegan/__init__.py ADDED
File without changes
modules/parallel_wavegan/__pycache__/__init__.cpython-38.pyc ADDED
Binary file (145 Bytes). View file
 
modules/parallel_wavegan/layers/__init__.py ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ from .causal_conv import * # NOQA
2
+ from .pqmf import * # NOQA
3
+ from .residual_block import * # NOQA
4
+ from modules.parallel_wavegan.layers.residual_stack import * # NOQA
5
+ from .upsample import * # NOQA
modules/parallel_wavegan/layers/__pycache__/__init__.cpython-38.pyc ADDED
Binary file (305 Bytes). View file
 
modules/parallel_wavegan/layers/__pycache__/causal_conv.cpython-38.pyc ADDED
Binary file (2.18 kB). View file
 
modules/parallel_wavegan/layers/__pycache__/pqmf.cpython-38.pyc ADDED
Binary file (3.89 kB). View file
 
modules/parallel_wavegan/layers/__pycache__/residual_block.cpython-38.pyc ADDED
Binary file (4.12 kB). View file
 
modules/parallel_wavegan/layers/__pycache__/residual_stack.cpython-38.pyc ADDED
Binary file (2.42 kB). View file
 
modules/parallel_wavegan/layers/__pycache__/upsample.cpython-38.pyc ADDED
Binary file (5.92 kB). View file
 
modules/parallel_wavegan/layers/causal_conv.py ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+
3
+ # Copyright 2020 Tomoki Hayashi
4
+ # MIT License (https://opensource.org/licenses/MIT)
5
+
6
+ """Causal convolusion layer modules."""
7
+
8
+
9
+ import torch
10
+
11
+
12
+ class CausalConv1d(torch.nn.Module):
13
+ """CausalConv1d module with customized initialization."""
14
+
15
+ def __init__(self, in_channels, out_channels, kernel_size,
16
+ dilation=1, bias=True, pad="ConstantPad1d", pad_params={"value": 0.0}):
17
+ """Initialize CausalConv1d module."""
18
+ super(CausalConv1d, self).__init__()
19
+ self.pad = getattr(torch.nn, pad)((kernel_size - 1) * dilation, **pad_params)
20
+ self.conv = torch.nn.Conv1d(in_channels, out_channels, kernel_size,
21
+ dilation=dilation, bias=bias)
22
+
23
+ def forward(self, x):
24
+ """Calculate forward propagation.
25
+
26
+ Args:
27
+ x (Tensor): Input tensor (B, in_channels, T).
28
+
29
+ Returns:
30
+ Tensor: Output tensor (B, out_channels, T).
31
+
32
+ """
33
+ return self.conv(self.pad(x))[:, :, :x.size(2)]
34
+
35
+
36
+ class CausalConvTranspose1d(torch.nn.Module):
37
+ """CausalConvTranspose1d module with customized initialization."""
38
+
39
+ def __init__(self, in_channels, out_channels, kernel_size, stride, bias=True):
40
+ """Initialize CausalConvTranspose1d module."""
41
+ super(CausalConvTranspose1d, self).__init__()
42
+ self.deconv = torch.nn.ConvTranspose1d(
43
+ in_channels, out_channels, kernel_size, stride, bias=bias)
44
+ self.stride = stride
45
+
46
+ def forward(self, x):
47
+ """Calculate forward propagation.
48
+
49
+ Args:
50
+ x (Tensor): Input tensor (B, in_channels, T_in).
51
+
52
+ Returns:
53
+ Tensor: Output tensor (B, out_channels, T_out).
54
+
55
+ """
56
+ return self.deconv(x)[:, :, :-self.stride]
modules/parallel_wavegan/layers/pqmf.py ADDED
@@ -0,0 +1,129 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+
3
+ # Copyright 2020 Tomoki Hayashi
4
+ # MIT License (https://opensource.org/licenses/MIT)
5
+
6
+ """Pseudo QMF modules."""
7
+
8
+ import numpy as np
9
+ import torch
10
+ import torch.nn.functional as F
11
+
12
+ from scipy.signal import kaiser
13
+
14
+
15
+ def design_prototype_filter(taps=62, cutoff_ratio=0.15, beta=9.0):
16
+ """Design prototype filter for PQMF.
17
+
18
+ This method is based on `A Kaiser window approach for the design of prototype
19
+ filters of cosine modulated filterbanks`_.
20
+
21
+ Args:
22
+ taps (int): The number of filter taps.
23
+ cutoff_ratio (float): Cut-off frequency ratio.
24
+ beta (float): Beta coefficient for kaiser window.
25
+
26
+ Returns:
27
+ ndarray: Impluse response of prototype filter (taps + 1,).
28
+
29
+ .. _`A Kaiser window approach for the design of prototype filters of cosine modulated filterbanks`:
30
+ https://ieeexplore.ieee.org/abstract/document/681427
31
+
32
+ """
33
+ # check the arguments are valid
34
+ assert taps % 2 == 0, "The number of taps mush be even number."
35
+ assert 0.0 < cutoff_ratio < 1.0, "Cutoff ratio must be > 0.0 and < 1.0."
36
+
37
+ # make initial filter
38
+ omega_c = np.pi * cutoff_ratio
39
+ with np.errstate(invalid='ignore'):
40
+ h_i = np.sin(omega_c * (np.arange(taps + 1) - 0.5 * taps)) \
41
+ / (np.pi * (np.arange(taps + 1) - 0.5 * taps))
42
+ h_i[taps // 2] = np.cos(0) * cutoff_ratio # fix nan due to indeterminate form
43
+
44
+ # apply kaiser window
45
+ w = kaiser(taps + 1, beta)
46
+ h = h_i * w
47
+
48
+ return h
49
+
50
+
51
+ class PQMF(torch.nn.Module):
52
+ """PQMF module.
53
+
54
+ This module is based on `Near-perfect-reconstruction pseudo-QMF banks`_.
55
+
56
+ .. _`Near-perfect-reconstruction pseudo-QMF banks`:
57
+ https://ieeexplore.ieee.org/document/258122
58
+
59
+ """
60
+
61
+ def __init__(self, subbands=4, taps=62, cutoff_ratio=0.15, beta=9.0):
62
+ """Initilize PQMF module.
63
+
64
+ Args:
65
+ subbands (int): The number of subbands.
66
+ taps (int): The number of filter taps.
67
+ cutoff_ratio (float): Cut-off frequency ratio.
68
+ beta (float): Beta coefficient for kaiser window.
69
+
70
+ """
71
+ super(PQMF, self).__init__()
72
+
73
+ # define filter coefficient
74
+ h_proto = design_prototype_filter(taps, cutoff_ratio, beta)
75
+ h_analysis = np.zeros((subbands, len(h_proto)))
76
+ h_synthesis = np.zeros((subbands, len(h_proto)))
77
+ for k in range(subbands):
78
+ h_analysis[k] = 2 * h_proto * np.cos(
79
+ (2 * k + 1) * (np.pi / (2 * subbands)) *
80
+ (np.arange(taps + 1) - ((taps - 1) / 2)) +
81
+ (-1) ** k * np.pi / 4)
82
+ h_synthesis[k] = 2 * h_proto * np.cos(
83
+ (2 * k + 1) * (np.pi / (2 * subbands)) *
84
+ (np.arange(taps + 1) - ((taps - 1) / 2)) -
85
+ (-1) ** k * np.pi / 4)
86
+
87
+ # convert to tensor
88
+ analysis_filter = torch.from_numpy(h_analysis).float().unsqueeze(1)
89
+ synthesis_filter = torch.from_numpy(h_synthesis).float().unsqueeze(0)
90
+
91
+ # register coefficients as beffer
92
+ self.register_buffer("analysis_filter", analysis_filter)
93
+ self.register_buffer("synthesis_filter", synthesis_filter)
94
+
95
+ # filter for downsampling & upsampling
96
+ updown_filter = torch.zeros((subbands, subbands, subbands)).float()
97
+ for k in range(subbands):
98
+ updown_filter[k, k, 0] = 1.0
99
+ self.register_buffer("updown_filter", updown_filter)
100
+ self.subbands = subbands
101
+
102
+ # keep padding info
103
+ self.pad_fn = torch.nn.ConstantPad1d(taps // 2, 0.0)
104
+
105
+ def analysis(self, x):
106
+ """Analysis with PQMF.
107
+
108
+ Args:
109
+ x (Tensor): Input tensor (B, 1, T).
110
+
111
+ Returns:
112
+ Tensor: Output tensor (B, subbands, T // subbands).
113
+
114
+ """
115
+ x = F.conv1d(self.pad_fn(x), self.analysis_filter)
116
+ return F.conv1d(x, self.updown_filter, stride=self.subbands)
117
+
118
+ def synthesis(self, x):
119
+ """Synthesis with PQMF.
120
+
121
+ Args:
122
+ x (Tensor): Input tensor (B, subbands, T // subbands).
123
+
124
+ Returns:
125
+ Tensor: Output tensor (B, 1, T).
126
+
127
+ """
128
+ x = F.conv_transpose1d(x, self.updown_filter * self.subbands, stride=self.subbands)
129
+ return F.conv1d(self.pad_fn(x), self.synthesis_filter)
modules/parallel_wavegan/layers/residual_block.py ADDED
@@ -0,0 +1,129 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+
3
+ """Residual block module in WaveNet.
4
+
5
+ This code is modified from https://github.com/r9y9/wavenet_vocoder.
6
+
7
+ """
8
+
9
+ import math
10
+
11
+ import torch
12
+ import torch.nn.functional as F
13
+
14
+
15
+ class Conv1d(torch.nn.Conv1d):
16
+ """Conv1d module with customized initialization."""
17
+
18
+ def __init__(self, *args, **kwargs):
19
+ """Initialize Conv1d module."""
20
+ super(Conv1d, self).__init__(*args, **kwargs)
21
+
22
+ def reset_parameters(self):
23
+ """Reset parameters."""
24
+ torch.nn.init.kaiming_normal_(self.weight, nonlinearity="relu")
25
+ if self.bias is not None:
26
+ torch.nn.init.constant_(self.bias, 0.0)
27
+
28
+
29
+ class Conv1d1x1(Conv1d):
30
+ """1x1 Conv1d with customized initialization."""
31
+
32
+ def __init__(self, in_channels, out_channels, bias):
33
+ """Initialize 1x1 Conv1d module."""
34
+ super(Conv1d1x1, self).__init__(in_channels, out_channels,
35
+ kernel_size=1, padding=0,
36
+ dilation=1, bias=bias)
37
+
38
+
39
+ class ResidualBlock(torch.nn.Module):
40
+ """Residual block module in WaveNet."""
41
+
42
+ def __init__(self,
43
+ kernel_size=3,
44
+ residual_channels=64,
45
+ gate_channels=128,
46
+ skip_channels=64,
47
+ aux_channels=80,
48
+ dropout=0.0,
49
+ dilation=1,
50
+ bias=True,
51
+ use_causal_conv=False
52
+ ):
53
+ """Initialize ResidualBlock module.
54
+
55
+ Args:
56
+ kernel_size (int): Kernel size of dilation convolution layer.
57
+ residual_channels (int): Number of channels for residual connection.
58
+ skip_channels (int): Number of channels for skip connection.
59
+ aux_channels (int): Local conditioning channels i.e. auxiliary input dimension.
60
+ dropout (float): Dropout probability.
61
+ dilation (int): Dilation factor.
62
+ bias (bool): Whether to add bias parameter in convolution layers.
63
+ use_causal_conv (bool): Whether to use use_causal_conv or non-use_causal_conv convolution.
64
+
65
+ """
66
+ super(ResidualBlock, self).__init__()
67
+ self.dropout = dropout
68
+ # no future time stamps available
69
+ if use_causal_conv:
70
+ padding = (kernel_size - 1) * dilation
71
+ else:
72
+ assert (kernel_size - 1) % 2 == 0, "Not support even number kernel size."
73
+ padding = (kernel_size - 1) // 2 * dilation
74
+ self.use_causal_conv = use_causal_conv
75
+
76
+ # dilation conv
77
+ self.conv = Conv1d(residual_channels, gate_channels, kernel_size,
78
+ padding=padding, dilation=dilation, bias=bias)
79
+
80
+ # local conditioning
81
+ if aux_channels > 0:
82
+ self.conv1x1_aux = Conv1d1x1(aux_channels, gate_channels, bias=False)
83
+ else:
84
+ self.conv1x1_aux = None
85
+
86
+ # conv output is split into two groups
87
+ gate_out_channels = gate_channels // 2
88
+ self.conv1x1_out = Conv1d1x1(gate_out_channels, residual_channels, bias=bias)
89
+ self.conv1x1_skip = Conv1d1x1(gate_out_channels, skip_channels, bias=bias)
90
+
91
+ def forward(self, x, c):
92
+ """Calculate forward propagation.
93
+
94
+ Args:
95
+ x (Tensor): Input tensor (B, residual_channels, T).
96
+ c (Tensor): Local conditioning auxiliary tensor (B, aux_channels, T).
97
+
98
+ Returns:
99
+ Tensor: Output tensor for residual connection (B, residual_channels, T).
100
+ Tensor: Output tensor for skip connection (B, skip_channels, T).
101
+
102
+ """
103
+ residual = x
104
+ x = F.dropout(x, p=self.dropout, training=self.training)
105
+ x = self.conv(x)
106
+
107
+ # remove future time steps if use_causal_conv conv
108
+ x = x[:, :, :residual.size(-1)] if self.use_causal_conv else x
109
+
110
+ # split into two part for gated activation
111
+ splitdim = 1
112
+ xa, xb = x.split(x.size(splitdim) // 2, dim=splitdim)
113
+
114
+ # local conditioning
115
+ if c is not None:
116
+ assert self.conv1x1_aux is not None
117
+ c = self.conv1x1_aux(c)
118
+ ca, cb = c.split(c.size(splitdim) // 2, dim=splitdim)
119
+ xa, xb = xa + ca, xb + cb
120
+
121
+ x = torch.tanh(xa) * torch.sigmoid(xb)
122
+
123
+ # for skip connection
124
+ s = self.conv1x1_skip(x)
125
+
126
+ # for residual connection
127
+ x = (self.conv1x1_out(x) + residual) * math.sqrt(0.5)
128
+
129
+ return x, s
modules/parallel_wavegan/layers/residual_stack.py ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+
3
+ # Copyright 2020 Tomoki Hayashi
4
+ # MIT License (https://opensource.org/licenses/MIT)
5
+
6
+ """Residual stack module in MelGAN."""
7
+
8
+ import torch
9
+
10
+ from . import CausalConv1d
11
+
12
+
13
+ class ResidualStack(torch.nn.Module):
14
+ """Residual stack module introduced in MelGAN."""
15
+
16
+ def __init__(self,
17
+ kernel_size=3,
18
+ channels=32,
19
+ dilation=1,
20
+ bias=True,
21
+ nonlinear_activation="LeakyReLU",
22
+ nonlinear_activation_params={"negative_slope": 0.2},
23
+ pad="ReflectionPad1d",
24
+ pad_params={},
25
+ use_causal_conv=False,
26
+ ):
27
+ """Initialize ResidualStack module.
28
+
29
+ Args:
30
+ kernel_size (int): Kernel size of dilation convolution layer.
31
+ channels (int): Number of channels of convolution layers.
32
+ dilation (int): Dilation factor.
33
+ bias (bool): Whether to add bias parameter in convolution layers.
34
+ nonlinear_activation (str): Activation function module name.
35
+ nonlinear_activation_params (dict): Hyperparameters for activation function.
36
+ pad (str): Padding function module name before dilated convolution layer.
37
+ pad_params (dict): Hyperparameters for padding function.
38
+ use_causal_conv (bool): Whether to use causal convolution.
39
+
40
+ """
41
+ super(ResidualStack, self).__init__()
42
+
43
+ # defile residual stack part
44
+ if not use_causal_conv:
45
+ assert (kernel_size - 1) % 2 == 0, "Not support even number kernel size."
46
+ self.stack = torch.nn.Sequential(
47
+ getattr(torch.nn, nonlinear_activation)(**nonlinear_activation_params),
48
+ getattr(torch.nn, pad)((kernel_size - 1) // 2 * dilation, **pad_params),
49
+ torch.nn.Conv1d(channels, channels, kernel_size, dilation=dilation, bias=bias),
50
+ getattr(torch.nn, nonlinear_activation)(**nonlinear_activation_params),
51
+ torch.nn.Conv1d(channels, channels, 1, bias=bias),
52
+ )
53
+ else:
54
+ self.stack = torch.nn.Sequential(
55
+ getattr(torch.nn, nonlinear_activation)(**nonlinear_activation_params),
56
+ CausalConv1d(channels, channels, kernel_size, dilation=dilation,
57
+ bias=bias, pad=pad, pad_params=pad_params),
58
+ getattr(torch.nn, nonlinear_activation)(**nonlinear_activation_params),
59
+ torch.nn.Conv1d(channels, channels, 1, bias=bias),
60
+ )
61
+
62
+ # defile extra layer for skip connection
63
+ self.skip_layer = torch.nn.Conv1d(channels, channels, 1, bias=bias)
64
+
65
+ def forward(self, c):
66
+ """Calculate forward propagation.
67
+
68
+ Args:
69
+ c (Tensor): Input tensor (B, channels, T).
70
+
71
+ Returns:
72
+ Tensor: Output tensor (B, chennels, T).
73
+
74
+ """
75
+ return self.stack(c) + self.skip_layer(c)