Artrajz commited on
Commit
dc13618
1 Parent(s): 52289ee
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitignore +5 -0
  2. Dockerfile +2 -3
  3. Dockerfile_GPU +2 -3
  4. LICENSE +661 -21
  5. README_zh.md +41 -51
  6. app.py +89 -3
  7. bert_vits2/LICENSE +674 -0
  8. bert_vits2/README.md +5 -0
  9. bert_vits2/__init__.py +2 -0
  10. bert_vits2/attentions.py +352 -0
  11. bert_vits2/bert/chinese-roberta-wwm-ext-large/.gitattributes +9 -0
  12. bert_vits2/bert/chinese-roberta-wwm-ext-large/README.md +57 -0
  13. bert_vits2/bert/chinese-roberta-wwm-ext-large/added_tokens.json +1 -0
  14. bert_vits2/bert/chinese-roberta-wwm-ext-large/config.json +28 -0
  15. chinese_dialect_lexicons/zaonhe_2.ocd2 → bert_vits2/bert/chinese-roberta-wwm-ext-large/flax_model.msgpack +2 -2
  16. bert_vits2/bert/chinese-roberta-wwm-ext-large/pytorch_model.bin +3 -0
  17. bert_vits2/bert/chinese-roberta-wwm-ext-large/special_tokens_map.json +1 -0
  18. bert_vits2/bert/chinese-roberta-wwm-ext-large/tf_model.h5 +3 -0
  19. bert_vits2/bert/chinese-roberta-wwm-ext-large/tokenizer.json +0 -0
  20. bert_vits2/bert/chinese-roberta-wwm-ext-large/tokenizer_config.json +1 -0
  21. bert_vits2/bert/chinese-roberta-wwm-ext-large/vocab.txt +0 -0
  22. bert_vits2/bert_vits2.py +86 -0
  23. bert_vits2/commons.py +161 -0
  24. bert_vits2/models.py +677 -0
  25. bert_vits2/modules.py +459 -0
  26. bert_vits2/requirements.txt +15 -0
  27. bert_vits2/text/__init__.py +29 -0
  28. bert_vits2/text/chinese.py +194 -0
  29. bert_vits2/text/chinese_bert.py +60 -0
  30. bert_vits2/text/cleaner.py +29 -0
  31. bert_vits2/text/cmudict.rep +0 -0
  32. chinese_dialect_lexicons/jyutjyu_2.ocd2 → bert_vits2/text/cmudict_cache.pickle +2 -2
  33. bert_vits2/text/english.py +146 -0
  34. bert_vits2/text/english_bert_mock.py +5 -0
  35. bert_vits2/text/japanese.py +104 -0
  36. bert_vits2/text/opencpop-strict.txt +429 -0
  37. bert_vits2/text/symbols.py +52 -0
  38. bert_vits2/text/tone_sandhi.py +351 -0
  39. bert_vits2/transforms.py +192 -0
  40. bert_vits2/utils.py +292 -0
  41. chinese_dialect_lexicons/changzhou.json +0 -23
  42. chinese_dialect_lexicons/changzhou.ocd2 +0 -0
  43. chinese_dialect_lexicons/changzhou_3.json +0 -23
  44. chinese_dialect_lexicons/changzhou_3.ocd2 +0 -0
  45. chinese_dialect_lexicons/cixi_2.json +0 -23
  46. chinese_dialect_lexicons/cixi_2.ocd2 +0 -0
  47. chinese_dialect_lexicons/fuyang_2.json +0 -23
  48. chinese_dialect_lexicons/fuyang_2.ocd2 +0 -0
  49. chinese_dialect_lexicons/hangzhou_2.json +0 -19
  50. chinese_dialect_lexicons/hangzhou_2.ocd2 +0 -0
.gitignore ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ **/__pycache__
2
+ /Model/
3
+ /bert_vits2/monotonic_align/monotonic_align.egg-info/
4
+ /bert_vits2/monotonic_align/dist/
5
+ /bert_vits2/monotonic_align/build/
Dockerfile CHANGED
@@ -5,6 +5,8 @@ WORKDIR /app
5
 
6
  ENV DEBIAN_FRONTEND=noninteractive
7
 
 
 
8
  RUN apt-get update && \
9
  apt-get install -yq build-essential espeak-ng cmake wget && \
10
  apt-get clean && \
@@ -25,13 +27,10 @@ RUN wget https://raw.githubusercontent.com/Artrajz/archived/main/openjtalk/openj
25
 
26
  RUN pip install torch --index-url https://download.pytorch.org/whl/cpu --no-cache-dir
27
 
28
- COPY requirements.txt /app
29
  RUN pip install -r requirements.txt --no-cache-dir
30
 
31
  RUN pip install gunicorn --no-cache-dir
32
 
33
- COPY . /app
34
-
35
  EXPOSE 23456
36
 
37
  CMD ["gunicorn", "-c", "gunicorn_config.py", "app:app"]
 
5
 
6
  ENV DEBIAN_FRONTEND=noninteractive
7
 
8
+ COPY . /app
9
+
10
  RUN apt-get update && \
11
  apt-get install -yq build-essential espeak-ng cmake wget && \
12
  apt-get clean && \
 
27
 
28
  RUN pip install torch --index-url https://download.pytorch.org/whl/cpu --no-cache-dir
29
 
 
30
  RUN pip install -r requirements.txt --no-cache-dir
31
 
32
  RUN pip install gunicorn --no-cache-dir
33
 
 
 
34
  EXPOSE 23456
35
 
36
  CMD ["gunicorn", "-c", "gunicorn_config.py", "app:app"]
Dockerfile_GPU CHANGED
@@ -5,6 +5,8 @@ WORKDIR /app
5
 
6
  ENV DEBIAN_FRONTEND=noninteractive
7
 
 
 
8
  RUN apt-get update && \
9
  apt-get install -yq build-essential espeak-ng cmake wget && \
10
  apt-get clean && \
@@ -25,13 +27,10 @@ RUN wget https://raw.githubusercontent.com/Artrajz/archived/main/openjtalk/openj
25
 
26
  RUN pip install torch --index-url https://download.pytorch.org/whl/cu117 --no-cache-dir
27
 
28
- COPY requirements.txt /app
29
  RUN pip install -r requirements.txt --no-cache-dir
30
 
31
  RUN pip install gunicorn --no-cache-dir
32
 
33
- COPY . /app
34
-
35
  EXPOSE 23456
36
 
37
  CMD ["gunicorn", "-c", "gunicorn_config.py", "app:app"]
 
5
 
6
  ENV DEBIAN_FRONTEND=noninteractive
7
 
8
+ COPY . /app
9
+
10
  RUN apt-get update && \
11
  apt-get install -yq build-essential espeak-ng cmake wget && \
12
  apt-get clean && \
 
27
 
28
  RUN pip install torch --index-url https://download.pytorch.org/whl/cu117 --no-cache-dir
29
 
 
30
  RUN pip install -r requirements.txt --no-cache-dir
31
 
32
  RUN pip install gunicorn --no-cache-dir
33
 
 
 
34
  EXPOSE 23456
35
 
36
  CMD ["gunicorn", "-c", "gunicorn_config.py", "app:app"]
LICENSE CHANGED
@@ -1,21 +1,661 @@
1
- MIT License
2
-
3
- Copyright (c) 2023 Artrajz
4
-
5
- Permission is hereby granted, free of charge, to any person obtaining a copy
6
- of this software and associated documentation files (the "Software"), to deal
7
- in the Software without restriction, including without limitation the rights
8
- to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
- copies of the Software, and to permit persons to whom the Software is
10
- furnished to do so, subject to the following conditions:
11
-
12
- The above copyright notice and this permission notice shall be included in all
13
- copies or substantial portions of the Software.
14
-
15
- THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
- IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
- FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
- AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
- LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
- OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
- SOFTWARE.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ GNU AFFERO GENERAL PUBLIC LICENSE
2
+ Version 3, 19 November 2007
3
+
4
+ Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
5
+ Everyone is permitted to copy and distribute verbatim copies
6
+ of this license document, but changing it is not allowed.
7
+
8
+ Preamble
9
+
10
+ The GNU Affero General Public License is a free, copyleft license for
11
+ software and other kinds of works, specifically designed to ensure
12
+ cooperation with the community in the case of network server software.
13
+
14
+ The licenses for most software and other practical works are designed
15
+ to take away your freedom to share and change the works. By contrast,
16
+ our General Public Licenses are intended to guarantee your freedom to
17
+ share and change all versions of a program--to make sure it remains free
18
+ software for all its users.
19
+
20
+ When we speak of free software, we are referring to freedom, not
21
+ price. Our General Public Licenses are designed to make sure that you
22
+ have the freedom to distribute copies of free software (and charge for
23
+ them if you wish), that you receive source code or can get it if you
24
+ want it, that you can change the software or use pieces of it in new
25
+ free programs, and that you know you can do these things.
26
+
27
+ Developers that use our General Public Licenses protect your rights
28
+ with two steps: (1) assert copyright on the software, and (2) offer
29
+ you this License which gives you legal permission to copy, distribute
30
+ and/or modify the software.
31
+
32
+ A secondary benefit of defending all users' freedom is that
33
+ improvements made in alternate versions of the program, if they
34
+ receive widespread use, become available for other developers to
35
+ incorporate. Many developers of free software are heartened and
36
+ encouraged by the resulting cooperation. However, in the case of
37
+ software used on network servers, this result may fail to come about.
38
+ The GNU General Public License permits making a modified version and
39
+ letting the public access it on a server without ever releasing its
40
+ source code to the public.
41
+
42
+ The GNU Affero General Public License is designed specifically to
43
+ ensure that, in such cases, the modified source code becomes available
44
+ to the community. It requires the operator of a network server to
45
+ provide the source code of the modified version running there to the
46
+ users of that server. Therefore, public use of a modified version, on
47
+ a publicly accessible server, gives the public access to the source
48
+ code of the modified version.
49
+
50
+ An older license, called the Affero General Public License and
51
+ published by Affero, was designed to accomplish similar goals. This is
52
+ a different license, not a version of the Affero GPL, but Affero has
53
+ released a new version of the Affero GPL which permits relicensing under
54
+ this license.
55
+
56
+ The precise terms and conditions for copying, distribution and
57
+ modification follow.
58
+
59
+ TERMS AND CONDITIONS
60
+
61
+ 0. Definitions.
62
+
63
+ "This License" refers to version 3 of the GNU Affero General Public License.
64
+
65
+ "Copyright" also means copyright-like laws that apply to other kinds of
66
+ works, such as semiconductor masks.
67
+
68
+ "The Program" refers to any copyrightable work licensed under this
69
+ License. Each licensee is addressed as "you". "Licensees" and
70
+ "recipients" may be individuals or organizations.
71
+
72
+ To "modify" a work means to copy from or adapt all or part of the work
73
+ in a fashion requiring copyright permission, other than the making of an
74
+ exact copy. The resulting work is called a "modified version" of the
75
+ earlier work or a work "based on" the earlier work.
76
+
77
+ A "covered work" means either the unmodified Program or a work based
78
+ on the Program.
79
+
80
+ To "propagate" a work means to do anything with it that, without
81
+ permission, would make you directly or secondarily liable for
82
+ infringement under applicable copyright law, except executing it on a
83
+ computer or modifying a private copy. Propagation includes copying,
84
+ distribution (with or without modification), making available to the
85
+ public, and in some countries other activities as well.
86
+
87
+ To "convey" a work means any kind of propagation that enables other
88
+ parties to make or receive copies. Mere interaction with a user through
89
+ a computer network, with no transfer of a copy, is not conveying.
90
+
91
+ An interactive user interface displays "Appropriate Legal Notices"
92
+ to the extent that it includes a convenient and prominently visible
93
+ feature that (1) displays an appropriate copyright notice, and (2)
94
+ tells the user that there is no warranty for the work (except to the
95
+ extent that warranties are provided), that licensees may convey the
96
+ work under this License, and how to view a copy of this License. If
97
+ the interface presents a list of user commands or options, such as a
98
+ menu, a prominent item in the list meets this criterion.
99
+
100
+ 1. Source Code.
101
+
102
+ The "source code" for a work means the preferred form of the work
103
+ for making modifications to it. "Object code" means any non-source
104
+ form of a work.
105
+
106
+ A "Standard Interface" means an interface that either is an official
107
+ standard defined by a recognized standards body, or, in the case of
108
+ interfaces specified for a particular programming language, one that
109
+ is widely used among developers working in that language.
110
+
111
+ The "System Libraries" of an executable work include anything, other
112
+ than the work as a whole, that (a) is included in the normal form of
113
+ packaging a Major Component, but which is not part of that Major
114
+ Component, and (b) serves only to enable use of the work with that
115
+ Major Component, or to implement a Standard Interface for which an
116
+ implementation is available to the public in source code form. A
117
+ "Major Component", in this context, means a major essential component
118
+ (kernel, window system, and so on) of the specific operating system
119
+ (if any) on which the executable work runs, or a compiler used to
120
+ produce the work, or an object code interpreter used to run it.
121
+
122
+ The "Corresponding Source" for a work in object code form means all
123
+ the source code needed to generate, install, and (for an executable
124
+ work) run the object code and to modify the work, including scripts to
125
+ control those activities. However, it does not include the work's
126
+ System Libraries, or general-purpose tools or generally available free
127
+ programs which are used unmodified in performing those activities but
128
+ which are not part of the work. For example, Corresponding Source
129
+ includes interface definition files associated with source files for
130
+ the work, and the source code for shared libraries and dynamically
131
+ linked subprograms that the work is specifically designed to require,
132
+ such as by intimate data communication or control flow between those
133
+ subprograms and other parts of the work.
134
+
135
+ The Corresponding Source need not include anything that users
136
+ can regenerate automatically from other parts of the Corresponding
137
+ Source.
138
+
139
+ The Corresponding Source for a work in source code form is that
140
+ same work.
141
+
142
+ 2. Basic Permissions.
143
+
144
+ All rights granted under this License are granted for the term of
145
+ copyright on the Program, and are irrevocable provided the stated
146
+ conditions are met. This License explicitly affirms your unlimited
147
+ permission to run the unmodified Program. The output from running a
148
+ covered work is covered by this License only if the output, given its
149
+ content, constitutes a covered work. This License acknowledges your
150
+ rights of fair use or other equivalent, as provided by copyright law.
151
+
152
+ You may make, run and propagate covered works that you do not
153
+ convey, without conditions so long as your license otherwise remains
154
+ in force. You may convey covered works to others for the sole purpose
155
+ of having them make modifications exclusively for you, or provide you
156
+ with facilities for running those works, provided that you comply with
157
+ the terms of this License in conveying all material for which you do
158
+ not control copyright. Those thus making or running the covered works
159
+ for you must do so exclusively on your behalf, under your direction
160
+ and control, on terms that prohibit them from making any copies of
161
+ your copyrighted material outside their relationship with you.
162
+
163
+ Conveying under any other circumstances is permitted solely under
164
+ the conditions stated below. Sublicensing is not allowed; section 10
165
+ makes it unnecessary.
166
+
167
+ 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
168
+
169
+ No covered work shall be deemed part of an effective technological
170
+ measure under any applicable law fulfilling obligations under article
171
+ 11 of the WIPO copyright treaty adopted on 20 December 1996, or
172
+ similar laws prohibiting or restricting circumvention of such
173
+ measures.
174
+
175
+ When you convey a covered work, you waive any legal power to forbid
176
+ circumvention of technological measures to the extent such circumvention
177
+ is effected by exercising rights under this License with respect to
178
+ the covered work, and you disclaim any intention to limit operation or
179
+ modification of the work as a means of enforcing, against the work's
180
+ users, your or third parties' legal rights to forbid circumvention of
181
+ technological measures.
182
+
183
+ 4. Conveying Verbatim Copies.
184
+
185
+ You may convey verbatim copies of the Program's source code as you
186
+ receive it, in any medium, provided that you conspicuously and
187
+ appropriately publish on each copy an appropriate copyright notice;
188
+ keep intact all notices stating that this License and any
189
+ non-permissive terms added in accord with section 7 apply to the code;
190
+ keep intact all notices of the absence of any warranty; and give all
191
+ recipients a copy of this License along with the Program.
192
+
193
+ You may charge any price or no price for each copy that you convey,
194
+ and you may offer support or warranty protection for a fee.
195
+
196
+ 5. Conveying Modified Source Versions.
197
+
198
+ You may convey a work based on the Program, or the modifications to
199
+ produce it from the Program, in the form of source code under the
200
+ terms of section 4, provided that you also meet all of these conditions:
201
+
202
+ a) The work must carry prominent notices stating that you modified
203
+ it, and giving a relevant date.
204
+
205
+ b) The work must carry prominent notices stating that it is
206
+ released under this License and any conditions added under section
207
+ 7. This requirement modifies the requirement in section 4 to
208
+ "keep intact all notices".
209
+
210
+ c) You must license the entire work, as a whole, under this
211
+ License to anyone who comes into possession of a copy. This
212
+ License will therefore apply, along with any applicable section 7
213
+ additional terms, to the whole of the work, and all its parts,
214
+ regardless of how they are packaged. This License gives no
215
+ permission to license the work in any other way, but it does not
216
+ invalidate such permission if you have separately received it.
217
+
218
+ d) If the work has interactive user interfaces, each must display
219
+ Appropriate Legal Notices; however, if the Program has interactive
220
+ interfaces that do not display Appropriate Legal Notices, your
221
+ work need not make them do so.
222
+
223
+ A compilation of a covered work with other separate and independent
224
+ works, which are not by their nature extensions of the covered work,
225
+ and which are not combined with it such as to form a larger program,
226
+ in or on a volume of a storage or distribution medium, is called an
227
+ "aggregate" if the compilation and its resulting copyright are not
228
+ used to limit the access or legal rights of the compilation's users
229
+ beyond what the individual works permit. Inclusion of a covered work
230
+ in an aggregate does not cause this License to apply to the other
231
+ parts of the aggregate.
232
+
233
+ 6. Conveying Non-Source Forms.
234
+
235
+ You may convey a covered work in object code form under the terms
236
+ of sections 4 and 5, provided that you also convey the
237
+ machine-readable Corresponding Source under the terms of this License,
238
+ in one of these ways:
239
+
240
+ a) Convey the object code in, or embodied in, a physical product
241
+ (including a physical distribution medium), accompanied by the
242
+ Corresponding Source fixed on a durable physical medium
243
+ customarily used for software interchange.
244
+
245
+ b) Convey the object code in, or embodied in, a physical product
246
+ (including a physical distribution medium), accompanied by a
247
+ written offer, valid for at least three years and valid for as
248
+ long as you offer spare parts or customer support for that product
249
+ model, to give anyone who possesses the object code either (1) a
250
+ copy of the Corresponding Source for all the software in the
251
+ product that is covered by this License, on a durable physical
252
+ medium customarily used for software interchange, for a price no
253
+ more than your reasonable cost of physically performing this
254
+ conveying of source, or (2) access to copy the
255
+ Corresponding Source from a network server at no charge.
256
+
257
+ c) Convey individual copies of the object code with a copy of the
258
+ written offer to provide the Corresponding Source. This
259
+ alternative is allowed only occasionally and noncommercially, and
260
+ only if you received the object code with such an offer, in accord
261
+ with subsection 6b.
262
+
263
+ d) Convey the object code by offering access from a designated
264
+ place (gratis or for a charge), and offer equivalent access to the
265
+ Corresponding Source in the same way through the same place at no
266
+ further charge. You need not require recipients to copy the
267
+ Corresponding Source along with the object code. If the place to
268
+ copy the object code is a network server, the Corresponding Source
269
+ may be on a different server (operated by you or a third party)
270
+ that supports equivalent copying facilities, provided you maintain
271
+ clear directions next to the object code saying where to find the
272
+ Corresponding Source. Regardless of what server hosts the
273
+ Corresponding Source, you remain obligated to ensure that it is
274
+ available for as long as needed to satisfy these requirements.
275
+
276
+ e) Convey the object code using peer-to-peer transmission, provided
277
+ you inform other peers where the object code and Corresponding
278
+ Source of the work are being offered to the general public at no
279
+ charge under subsection 6d.
280
+
281
+ A separable portion of the object code, whose source code is excluded
282
+ from the Corresponding Source as a System Library, need not be
283
+ included in conveying the object code work.
284
+
285
+ A "User Product" is either (1) a "consumer product", which means any
286
+ tangible personal property which is normally used for personal, family,
287
+ or household purposes, or (2) anything designed or sold for incorporation
288
+ into a dwelling. In determining whether a product is a consumer product,
289
+ doubtful cases shall be resolved in favor of coverage. For a particular
290
+ product received by a particular user, "normally used" refers to a
291
+ typical or common use of that class of product, regardless of the status
292
+ of the particular user or of the way in which the particular user
293
+ actually uses, or expects or is expected to use, the product. A product
294
+ is a consumer product regardless of whether the product has substantial
295
+ commercial, industrial or non-consumer uses, unless such uses represent
296
+ the only significant mode of use of the product.
297
+
298
+ "Installation Information" for a User Product means any methods,
299
+ procedures, authorization keys, or other information required to install
300
+ and execute modified versions of a covered work in that User Product from
301
+ a modified version of its Corresponding Source. The information must
302
+ suffice to ensure that the continued functioning of the modified object
303
+ code is in no case prevented or interfered with solely because
304
+ modification has been made.
305
+
306
+ If you convey an object code work under this section in, or with, or
307
+ specifically for use in, a User Product, and the conveying occurs as
308
+ part of a transaction in which the right of possession and use of the
309
+ User Product is transferred to the recipient in perpetuity or for a
310
+ fixed term (regardless of how the transaction is characterized), the
311
+ Corresponding Source conveyed under this section must be accompanied
312
+ by the Installation Information. But this requirement does not apply
313
+ if neither you nor any third party retains the ability to install
314
+ modified object code on the User Product (for example, the work has
315
+ been installed in ROM).
316
+
317
+ The requirement to provide Installation Information does not include a
318
+ requirement to continue to provide support service, warranty, or updates
319
+ for a work that has been modified or installed by the recipient, or for
320
+ the User Product in which it has been modified or installed. Access to a
321
+ network may be denied when the modification itself materially and
322
+ adversely affects the operation of the network or violates the rules and
323
+ protocols for communication across the network.
324
+
325
+ Corresponding Source conveyed, and Installation Information provided,
326
+ in accord with this section must be in a format that is publicly
327
+ documented (and with an implementation available to the public in
328
+ source code form), and must require no special password or key for
329
+ unpacking, reading or copying.
330
+
331
+ 7. Additional Terms.
332
+
333
+ "Additional permissions" are terms that supplement the terms of this
334
+ License by making exceptions from one or more of its conditions.
335
+ Additional permissions that are applicable to the entire Program shall
336
+ be treated as though they were included in this License, to the extent
337
+ that they are valid under applicable law. If additional permissions
338
+ apply only to part of the Program, that part may be used separately
339
+ under those permissions, but the entire Program remains governed by
340
+ this License without regard to the additional permissions.
341
+
342
+ When you convey a copy of a covered work, you may at your option
343
+ remove any additional permissions from that copy, or from any part of
344
+ it. (Additional permissions may be written to require their own
345
+ removal in certain cases when you modify the work.) You may place
346
+ additional permissions on material, added by you to a covered work,
347
+ for which you have or can give appropriate copyright permission.
348
+
349
+ Notwithstanding any other provision of this License, for material you
350
+ add to a covered work, you may (if authorized by the copyright holders of
351
+ that material) supplement the terms of this License with terms:
352
+
353
+ a) Disclaiming warranty or limiting liability differently from the
354
+ terms of sections 15 and 16 of this License; or
355
+
356
+ b) Requiring preservation of specified reasonable legal notices or
357
+ author attributions in that material or in the Appropriate Legal
358
+ Notices displayed by works containing it; or
359
+
360
+ c) Prohibiting misrepresentation of the origin of that material, or
361
+ requiring that modified versions of such material be marked in
362
+ reasonable ways as different from the original version; or
363
+
364
+ d) Limiting the use for publicity purposes of names of licensors or
365
+ authors of the material; or
366
+
367
+ e) Declining to grant rights under trademark law for use of some
368
+ trade names, trademarks, or service marks; or
369
+
370
+ f) Requiring indemnification of licensors and authors of that
371
+ material by anyone who conveys the material (or modified versions of
372
+ it) with contractual assumptions of liability to the recipient, for
373
+ any liability that these contractual assumptions directly impose on
374
+ those licensors and authors.
375
+
376
+ All other non-permissive additional terms are considered "further
377
+ restrictions" within the meaning of section 10. If the Program as you
378
+ received it, or any part of it, contains a notice stating that it is
379
+ governed by this License along with a term that is a further
380
+ restriction, you may remove that term. If a license document contains
381
+ a further restriction but permits relicensing or conveying under this
382
+ License, you may add to a covered work material governed by the terms
383
+ of that license document, provided that the further restriction does
384
+ not survive such relicensing or conveying.
385
+
386
+ If you add terms to a covered work in accord with this section, you
387
+ must place, in the relevant source files, a statement of the
388
+ additional terms that apply to those files, or a notice indicating
389
+ where to find the applicable terms.
390
+
391
+ Additional terms, permissive or non-permissive, may be stated in the
392
+ form of a separately written license, or stated as exceptions;
393
+ the above requirements apply either way.
394
+
395
+ 8. Termination.
396
+
397
+ You may not propagate or modify a covered work except as expressly
398
+ provided under this License. Any attempt otherwise to propagate or
399
+ modify it is void, and will automatically terminate your rights under
400
+ this License (including any patent licenses granted under the third
401
+ paragraph of section 11).
402
+
403
+ However, if you cease all violation of this License, then your
404
+ license from a particular copyright holder is reinstated (a)
405
+ provisionally, unless and until the copyright holder explicitly and
406
+ finally terminates your license, and (b) permanently, if the copyright
407
+ holder fails to notify you of the violation by some reasonable means
408
+ prior to 60 days after the cessation.
409
+
410
+ Moreover, your license from a particular copyright holder is
411
+ reinstated permanently if the copyright holder notifies you of the
412
+ violation by some reasonable means, this is the first time you have
413
+ received notice of violation of this License (for any work) from that
414
+ copyright holder, and you cure the violation prior to 30 days after
415
+ your receipt of the notice.
416
+
417
+ Termination of your rights under this section does not terminate the
418
+ licenses of parties who have received copies or rights from you under
419
+ this License. If your rights have been terminated and not permanently
420
+ reinstated, you do not qualify to receive new licenses for the same
421
+ material under section 10.
422
+
423
+ 9. Acceptance Not Required for Having Copies.
424
+
425
+ You are not required to accept this License in order to receive or
426
+ run a copy of the Program. Ancillary propagation of a covered work
427
+ occurring solely as a consequence of using peer-to-peer transmission
428
+ to receive a copy likewise does not require acceptance. However,
429
+ nothing other than this License grants you permission to propagate or
430
+ modify any covered work. These actions infringe copyright if you do
431
+ not accept this License. Therefore, by modifying or propagating a
432
+ covered work, you indicate your acceptance of this License to do so.
433
+
434
+ 10. Automatic Licensing of Downstream Recipients.
435
+
436
+ Each time you convey a covered work, the recipient automatically
437
+ receives a license from the original licensors, to run, modify and
438
+ propagate that work, subject to this License. You are not responsible
439
+ for enforcing compliance by third parties with this License.
440
+
441
+ An "entity transaction" is a transaction transferring control of an
442
+ organization, or substantially all assets of one, or subdividing an
443
+ organization, or merging organizations. If propagation of a covered
444
+ work results from an entity transaction, each party to that
445
+ transaction who receives a copy of the work also receives whatever
446
+ licenses to the work the party's predecessor in interest had or could
447
+ give under the previous paragraph, plus a right to possession of the
448
+ Corresponding Source of the work from the predecessor in interest, if
449
+ the predecessor has it or can get it with reasonable efforts.
450
+
451
+ You may not impose any further restrictions on the exercise of the
452
+ rights granted or affirmed under this License. For example, you may
453
+ not impose a license fee, royalty, or other charge for exercise of
454
+ rights granted under this License, and you may not initiate litigation
455
+ (including a cross-claim or counterclaim in a lawsuit) alleging that
456
+ any patent claim is infringed by making, using, selling, offering for
457
+ sale, or importing the Program or any portion of it.
458
+
459
+ 11. Patents.
460
+
461
+ A "contributor" is a copyright holder who authorizes use under this
462
+ License of the Program or a work on which the Program is based. The
463
+ work thus licensed is called the contributor's "contributor version".
464
+
465
+ A contributor's "essential patent claims" are all patent claims
466
+ owned or controlled by the contributor, whether already acquired or
467
+ hereafter acquired, that would be infringed by some manner, permitted
468
+ by this License, of making, using, or selling its contributor version,
469
+ but do not include claims that would be infringed only as a
470
+ consequence of further modification of the contributor version. For
471
+ purposes of this definition, "control" includes the right to grant
472
+ patent sublicenses in a manner consistent with the requirements of
473
+ this License.
474
+
475
+ Each contributor grants you a non-exclusive, worldwide, royalty-free
476
+ patent license under the contributor's essential patent claims, to
477
+ make, use, sell, offer for sale, import and otherwise run, modify and
478
+ propagate the contents of its contributor version.
479
+
480
+ In the following three paragraphs, a "patent license" is any express
481
+ agreement or commitment, however denominated, not to enforce a patent
482
+ (such as an express permission to practice a patent or covenant not to
483
+ sue for patent infringement). To "grant" such a patent license to a
484
+ party means to make such an agreement or commitment not to enforce a
485
+ patent against the party.
486
+
487
+ If you convey a covered work, knowingly relying on a patent license,
488
+ and the Corresponding Source of the work is not available for anyone
489
+ to copy, free of charge and under the terms of this License, through a
490
+ publicly available network server or other readily accessible means,
491
+ then you must either (1) cause the Corresponding Source to be so
492
+ available, or (2) arrange to deprive yourself of the benefit of the
493
+ patent license for this particular work, or (3) arrange, in a manner
494
+ consistent with the requirements of this License, to extend the patent
495
+ license to downstream recipients. "Knowingly relying" means you have
496
+ actual knowledge that, but for the patent license, your conveying the
497
+ covered work in a country, or your recipient's use of the covered work
498
+ in a country, would infringe one or more identifiable patents in that
499
+ country that you have reason to believe are valid.
500
+
501
+ If, pursuant to or in connection with a single transaction or
502
+ arrangement, you convey, or propagate by procuring conveyance of, a
503
+ covered work, and grant a patent license to some of the parties
504
+ receiving the covered work authorizing them to use, propagate, modify
505
+ or convey a specific copy of the covered work, then the patent license
506
+ you grant is automatically extended to all recipients of the covered
507
+ work and works based on it.
508
+
509
+ A patent license is "discriminatory" if it does not include within
510
+ the scope of its coverage, prohibits the exercise of, or is
511
+ conditioned on the non-exercise of one or more of the rights that are
512
+ specifically granted under this License. You may not convey a covered
513
+ work if you are a party to an arrangement with a third party that is
514
+ in the business of distributing software, under which you make payment
515
+ to the third party based on the extent of your activity of conveying
516
+ the work, and under which the third party grants, to any of the
517
+ parties who would receive the covered work from you, a discriminatory
518
+ patent license (a) in connection with copies of the covered work
519
+ conveyed by you (or copies made from those copies), or (b) primarily
520
+ for and in connection with specific products or compilations that
521
+ contain the covered work, unless you entered into that arrangement,
522
+ or that patent license was granted, prior to 28 March 2007.
523
+
524
+ Nothing in this License shall be construed as excluding or limiting
525
+ any implied license or other defenses to infringement that may
526
+ otherwise be available to you under applicable patent law.
527
+
528
+ 12. No Surrender of Others' Freedom.
529
+
530
+ If conditions are imposed on you (whether by court order, agreement or
531
+ otherwise) that contradict the conditions of this License, they do not
532
+ excuse you from the conditions of this License. If you cannot convey a
533
+ covered work so as to satisfy simultaneously your obligations under this
534
+ License and any other pertinent obligations, then as a consequence you may
535
+ not convey it at all. For example, if you agree to terms that obligate you
536
+ to collect a royalty for further conveying from those to whom you convey
537
+ the Program, the only way you could satisfy both those terms and this
538
+ License would be to refrain entirely from conveying the Program.
539
+
540
+ 13. Remote Network Interaction; Use with the GNU General Public License.
541
+
542
+ Notwithstanding any other provision of this License, if you modify the
543
+ Program, your modified version must prominently offer all users
544
+ interacting with it remotely through a computer network (if your version
545
+ supports such interaction) an opportunity to receive the Corresponding
546
+ Source of your version by providing access to the Corresponding Source
547
+ from a network server at no charge, through some standard or customary
548
+ means of facilitating copying of software. This Corresponding Source
549
+ shall include the Corresponding Source for any work covered by version 3
550
+ of the GNU General Public License that is incorporated pursuant to the
551
+ following paragraph.
552
+
553
+ Notwithstanding any other provision of this License, you have
554
+ permission to link or combine any covered work with a work licensed
555
+ under version 3 of the GNU General Public License into a single
556
+ combined work, and to convey the resulting work. The terms of this
557
+ License will continue to apply to the part which is the covered work,
558
+ but the work with which it is combined will remain governed by version
559
+ 3 of the GNU General Public License.
560
+
561
+ 14. Revised Versions of this License.
562
+
563
+ The Free Software Foundation may publish revised and/or new versions of
564
+ the GNU Affero General Public License from time to time. Such new versions
565
+ will be similar in spirit to the present version, but may differ in detail to
566
+ address new problems or concerns.
567
+
568
+ Each version is given a distinguishing version number. If the
569
+ Program specifies that a certain numbered version of the GNU Affero General
570
+ Public License "or any later version" applies to it, you have the
571
+ option of following the terms and conditions either of that numbered
572
+ version or of any later version published by the Free Software
573
+ Foundation. If the Program does not specify a version number of the
574
+ GNU Affero General Public License, you may choose any version ever published
575
+ by the Free Software Foundation.
576
+
577
+ If the Program specifies that a proxy can decide which future
578
+ versions of the GNU Affero General Public License can be used, that proxy's
579
+ public statement of acceptance of a version permanently authorizes you
580
+ to choose that version for the Program.
581
+
582
+ Later license versions may give you additional or different
583
+ permissions. However, no additional obligations are imposed on any
584
+ author or copyright holder as a result of your choosing to follow a
585
+ later version.
586
+
587
+ 15. Disclaimer of Warranty.
588
+
589
+ THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
590
+ APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
591
+ HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
592
+ OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
593
+ THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
594
+ PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
595
+ IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
596
+ ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
597
+
598
+ 16. Limitation of Liability.
599
+
600
+ IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
601
+ WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
602
+ THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
603
+ GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
604
+ USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
605
+ DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
606
+ PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
607
+ EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
608
+ SUCH DAMAGES.
609
+
610
+ 17. Interpretation of Sections 15 and 16.
611
+
612
+ If the disclaimer of warranty and limitation of liability provided
613
+ above cannot be given local legal effect according to their terms,
614
+ reviewing courts shall apply local law that most closely approximates
615
+ an absolute waiver of all civil liability in connection with the
616
+ Program, unless a warranty or assumption of liability accompanies a
617
+ copy of the Program in return for a fee.
618
+
619
+ END OF TERMS AND CONDITIONS
620
+
621
+ How to Apply These Terms to Your New Programs
622
+
623
+ If you develop a new program, and you want it to be of the greatest
624
+ possible use to the public, the best way to achieve this is to make it
625
+ free software which everyone can redistribute and change under these terms.
626
+
627
+ To do so, attach the following notices to the program. It is safest
628
+ to attach them to the start of each source file to most effectively
629
+ state the exclusion of warranty; and each file should have at least
630
+ the "copyright" line and a pointer to where the full notice is found.
631
+
632
+ <one line to give the program's name and a brief idea of what it does.>
633
+ Copyright (C) <year> <name of author>
634
+
635
+ This program is free software: you can redistribute it and/or modify
636
+ it under the terms of the GNU Affero General Public License as published
637
+ by the Free Software Foundation, either version 3 of the License, or
638
+ (at your option) any later version.
639
+
640
+ This program is distributed in the hope that it will be useful,
641
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
642
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
643
+ GNU Affero General Public License for more details.
644
+
645
+ You should have received a copy of the GNU Affero General Public License
646
+ along with this program. If not, see <https://www.gnu.org/licenses/>.
647
+
648
+ Also add information on how to contact you by electronic and paper mail.
649
+
650
+ If your software can interact with users remotely through a computer
651
+ network, you should also make sure that it provides a way for users to
652
+ get its source. For example, if your program is a web application, its
653
+ interface could display a "Source" link that leads users to an archive
654
+ of the code. There are many ways you could offer source, and different
655
+ solutions will be better for different programs; see section 13 for the
656
+ specific requirements.
657
+
658
+ You should also get your employer (if you work as a programmer) or school,
659
+ if any, to sign a "copyright disclaimer" for the program, if necessary.
660
+ For more information on this, and how to apply and follow the GNU AGPL, see
661
+ <https://www.gnu.org/licenses/>.
README_zh.md CHANGED
@@ -18,10 +18,11 @@
18
 
19
  # Feature
20
 
21
- - [x] VITS语音合成
22
- - [x] VITS语音转换
23
  - [x] HuBert-soft VITS模型
24
  - [x] W2V2 VITS / emotional-vits维度情感模型
 
 
25
  - [x] 加载多模型
26
  - [x] 自动识别语言并处理,根据模型的cleaner设置语言类型识别的范围,支持自定义语言类型范围
27
  - [x] 自定义默认参数
@@ -29,31 +30,6 @@
29
  - [x] GPU加速推理
30
  - [x] SSML语音合成标记语言(完善中...)
31
 
32
- <details><summary>Update Logs</summary><pre><code>
33
- <h2>2023.6.5</h2>
34
- <p>更换音频编码使用的库,增加flac格式,增加中文对读简单数学公式的支持</p>
35
- <h2>2023.5.24</h2>
36
- <p>添加dimensional_emotion api,从文件夹加载多个npy文件,Docker添加了Linux/ARM64和Linux/ARM64/v8平台</p>
37
- <h2>2023.5.15</h2>
38
- <p>增加english_cleaner,需要额外安装espeak才能使用</p>
39
- <h2>2023.5.12</h2>
40
- <p>增加ssml支持,但仍需完善。重构部分功能,hubert_vits中的speaker_id改为id</p>
41
- <h2>2023.5.2</h2>
42
- <p>增加w2v2-vits/emotional-vits模型支持,修改了speakers映射表并添加了对应模型支持的语言</p>
43
- <h2>2023.4.23</h2>
44
- <p>增加api key鉴权,默认禁用,需要在config.py中启用</p>
45
- <h2>2023.4.17</h2>
46
- <p>修改单语言的cleaner需要标注才会clean,增加GPU加速推理,但需要手动安装gpu推理环境</p>
47
- <h2>2023.4.12</h2>
48
- <p>项目由MoeGoe-Simple-API更名为vits-simple-api,支持长文本批处理,增加长文本分段阈值max</p>
49
- <h2>2023.4.7</h2>
50
- <p>增加配置文件可自定义默认参数,本次更新需要手动更新config.py,具体使用方法见config.py</p>
51
- <h2>2023.4.6</h2>
52
- <p>加入自动识别语种选项auto,lang参数默认修改为auto,自动识别仍有一定缺陷,请自行选择</p>
53
- <p>统一POST请求类型为multipart/form-data</p>
54
- </code></pre></details>
55
-
56
-
57
 
58
  ## demo
59
 
@@ -493,17 +469,17 @@ def voice_dimensional_emotion(upload_path):
493
 
494
  ## VITS语音合成
495
 
496
- | Name | Parameter | Is must | Default | Type | Instruction |
497
- | ------------- | --------- | ------- | ------- | ----- | ------------------------------------------------------------ |
498
- | 合成文本 | text | true | | str | 需要合成语音的文本。 |
499
- | 角色id | id | false | 0 | int | 即说话人id。 |
500
- | 音频格式 | format | false | wav | str | 支持wav,ogg,silk,mp3,flac |
501
- | 文本语言 | lang | false | auto | str | auto为自动识别语言模式,也是默认模式。lang=mix时,文本应该用[ZH] 或 [JA] 包裹。方言无法自动识别。 |
502
- | 语音长度/语速 | length | false | 1.0 | float | 调节语音长度,相当于调节语速,该数值越大语速越慢。 |
503
- | 噪声 | noise | false | 0.33 | float | 样本噪声,控制合成的随机性。 |
504
- | sdp噪声 | noisew | false | 0.4 | float | 随机时长预测器噪声,控制音素发音长度。 |
505
- | 分段阈值 | max | false | 50 | int | 按标点符号分段,加起来大于max时为一段文本。max<=0表示不分段。 |
506
- | 流式响应 | streaming | false | false | bool | 流式合成语音,更快的首包响应。 |
507
 
508
  ## VITS 语音转换
509
 
@@ -524,25 +500,39 @@ def voice_dimensional_emotion(upload_path):
524
  | 噪声 | noise | true | | float | 样本噪声,控制合成的随机性。 |
525
  | sdp噪声 | noisew | true | | float | 随机时长预测器噪声,控制音素发音长度。 |
526
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
527
  ## Dimensional emotion
528
 
529
  | Name | Parameter | Is must | Default | Type | Instruction |
530
  | -------- | --------- | ------- | ------- | ---- | ----------------------------- |
531
  | 上传音频 | upload | true | | file | 返回存储维度情感向量的npy文件 |
532
 
533
- ## W2V2-VITS
534
-
535
- | Name | Parameter | Is must | Default | Type | Instruction |
536
- | ------------- | --------- | ------- | ------- | ----- | ------------------------------------------------------------ |
537
- | 合成文本 | text | true | | str | 需要合成语音的文本。 |
538
- | 角色id | id | false | 0 | int | 即说话人id。 |
539
- | 音频格式 | format | false | wav | str | 支持wav,ogg,silk,mp3,flac |
540
- | 文本语言 | lang | false | auto | str | auto为自动识别语言模式,也是默认模式。lang=mix时,文本应该用[ZH] 或 [JA] 包裹。方言无法自动识别。 |
541
- | 语音长度/语速 | length | false | 1.0 | float | 调节语音长度,相当于调节语速,该数值越大语速越慢 |
542
- | 噪声 | noise | false | 0.33 | float | 样本噪声,控制合成的随机性。 |
543
- | sdp噪声 | noisew | false | 0.4 | float | 随机时长预测器噪声,控制音素发音长度。 |
544
- | 分段阈值 | max | false | 50 | int | 按标点符号分段,加起来大于max时为一段文本。max<=0表示不分段。 |
545
- | 维度情感 | emotion | false | 0 | int | 范围取决于npy情感参考文件,如[innnky](https://huggingface.co/spaces/innnky/nene-emotion/tree/main)的all_emotions.npy模型范围是0-5457 |
546
 
547
  ## SSML语音合成标记语言
548
  目前支持的元素与属性
 
18
 
19
  # Feature
20
 
21
+ - [x] VITS语音合成,语音转换
 
22
  - [x] HuBert-soft VITS模型
23
  - [x] W2V2 VITS / emotional-vits维度情感模型
24
+ - [x] [vits_chinese](https://github.com/PlayVoice/vits_chinese)
25
+ - [x] [Bert-VITS2](https://github.com/Stardust-minus/Bert-VITS2)
26
  - [x] 加载多模型
27
  - [x] 自动识别语言并处理,根据模型的cleaner设置语言类型识别的范围,支持自定义语言类型范围
28
  - [x] 自定义默认参数
 
30
  - [x] GPU加速推理
31
  - [x] SSML语音合成标记语言(完善中...)
32
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
33
 
34
  ## demo
35
 
 
469
 
470
  ## VITS语音合成
471
 
472
+ | Name | Parameter | Is must | Default | Type | Instruction |
473
+ | ------------- | --------- | ------- | ------------------- | ----- | ------------------------------------------------------------ |
474
+ | 合成文本 | text | true | | str | 需要合成语音的文本。 |
475
+ | 角色id | id | false | 从`config.py`中获取 | int | 即说话人id。 |
476
+ | 音频格式 | format | false | 从`config.py`中获取 | str | 支持wav,ogg,silk,mp3,flac |
477
+ | 文本语言 | lang | false | 从`config.py`中获取 | str | auto为自动识别语言模式,也是默认模式。lang=mix时,文本应该用[ZH] 或 [JA] 包裹。方言无法自动识别。 |
478
+ | 语音长度/语速 | length | false | 从`config.py`中获取 | float | 调节语音长度,相当于调节语速,该数值越大语速越慢。 |
479
+ | 噪声 | noise | false | 从`config.py`中获取 | float | 样本噪声,控制合成的随机性。 |
480
+ | sdp噪声 | noisew | false | 从`config.py`中获取 | float | 随机时长预测器噪声,控制音素发音长度。 |
481
+ | 分段阈值 | max | false | 从`config.py`中获取 | int | 按标点符号分段,加起来大于max时为一段文本。max<=0表示不分段。 |
482
+ | 流式响应 | streaming | false | false | bool | 流式合成语音,更快的首包响应。 |
483
 
484
  ## VITS 语音转换
485
 
 
500
  | 噪声 | noise | true | | float | 样本噪声,控制合成的随机性。 |
501
  | sdp噪声 | noisew | true | | float | 随机时长预测器噪声,控制音素发音长度。 |
502
 
503
+ ## W2V2-VITS
504
+
505
+ | Name | Parameter | Is must | Default | Type | Instruction |
506
+ | ------------- | --------- | ------- | ------------------- | ----- | ------------------------------------------------------------ |
507
+ | 合成文本 | text | true | | str | 需要合成语音的文本。 |
508
+ | 角色id | id | false | 从`config.py`中获取 | int | 即说话人id。 |
509
+ | 音频格式 | format | false | 从`config.py`中获取 | str | 支持wav,ogg,silk,mp3,flac |
510
+ | 文本语言 | lang | false | 从`config.py`中获取 | str | auto为自动识别语言模式,也是默认模式。lang=mix时,文本应该用[ZH] 或 [JA] 包裹。方言无法自动识别。 |
511
+ | 语音长度/语速 | length | false | 从`config.py`中获取 | float | 调节语音长度,相当于调节语速,该数值越大语速越慢 |
512
+ | 噪声 | noise | false | 从`config.py`中获取 | float | 样本噪声,控制合成的随机性。 |
513
+ | sdp噪声 | noisew | false | 从`config.py`中获取 | float | 随机时长预测器噪声,控制音素发音长度。 |
514
+ | 分段阈值 | max | false | 从`config.py`中获取 | int | 按标点符号分段,加起来大于max时为一段文本。max<=0表示不分段。 |
515
+ | 维度情感 | emotion | false | 0 | int | 范围取决于npy情感参考文件,如[innnky](https://huggingface.co/spaces/innnky/nene-emotion/tree/main)的all_emotions.npy模型范围是0-5457 |
516
+
517
  ## Dimensional emotion
518
 
519
  | Name | Parameter | Is must | Default | Type | Instruction |
520
  | -------- | --------- | ------- | ------- | ---- | ----------------------------- |
521
  | 上传音频 | upload | true | | file | 返回存储维度情感向量的npy文件 |
522
 
523
+ ## Bert-VITS2语音合成
524
+
525
+ | Name | Parameter | Is must | Default | Type | Instruction |
526
+ | ------------- | --------- | ------- | ------------------- | ----- | ------------------------------------------------------------ |
527
+ | 合成文本 | text | true | | str | 需要合成语音的文本。 |
528
+ | 角色id | id | false | 从`config.py`中获取 | int | 即说话人id。 |
529
+ | 音频格式 | format | false | 从`config.py`中获取 | str | 支持wav,ogg,silk,mp3,flac |
530
+ | 文本语言 | lang | false | 从`config.py`中获取 | str | 目前只有中文。 |
531
+ | 语音长度/语速 | length | false | 从`config.py`中获取 | float | 调节语音长度,相当于调节语速,该数值越大语速越慢。 |
532
+ | 噪声 | noise | false | 从`config.py`中获取 | float | 样本噪声,控制合成的随机性。 |
533
+ | sdp噪声 | noisew | false | 从`config.py`中获取 | float | 随机时长预测器噪声,控制音素发音长度。 |
534
+ | 分段阈值 | max | false | 从`config.py`中获取 | int | 按标点符号分段,加起来大于max时为一段文本。max<=0表示不分段。 |
535
+ | SDP/DP混合比 | sdp_ratio | false | 从`config.py`中获取 | int | SDP在合成时的占比,理论上此比率越高,合成的语音语调方差越大。 |
536
 
537
  ## SSML语音合成标记语言
538
  目前支持的元素与属性
app.py CHANGED
@@ -48,9 +48,10 @@ def index():
48
  kwargs = {
49
  "speakers": tts.voice_speakers,
50
  "speakers_count": tts.speakers_count,
51
- "vits_speakers_count":tts._vits_speakers_count,
52
- "w2v2_speakers_count":tts._w2v2_speakers_count,
53
- "w2v2_emotion_count":tts._w2v2_emotion_count
 
54
  }
55
  return render_template("index.html", **kwargs)
56
 
@@ -370,6 +371,7 @@ def ssml():
370
 
371
 
372
  @app.route('/voice/dimension-emotion', methods=["POST"])
 
373
  def dimensional_emotion():
374
  if request.method == "POST":
375
  try:
@@ -394,6 +396,90 @@ def dimensional_emotion():
394
  return send_file(path_or_file=audio, mimetype=file_type, download_name=fname)
395
 
396
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
397
  @app.route('/voice/check', methods=["GET", "POST"])
398
  def check():
399
  try:
 
48
  kwargs = {
49
  "speakers": tts.voice_speakers,
50
  "speakers_count": tts.speakers_count,
51
+ "vits_speakers_count": tts.vits_speakers_count,
52
+ "w2v2_speakers_count": tts.w2v2_speakers_count,
53
+ "w2v2_emotion_count": tts.w2v2_emotion_count,
54
+ "bert_vits2_speakers_count": tts.bert_vits2_speakers_count
55
  }
56
  return render_template("index.html", **kwargs)
57
 
 
371
 
372
 
373
  @app.route('/voice/dimension-emotion', methods=["POST"])
374
+ @require_api_key
375
  def dimensional_emotion():
376
  if request.method == "POST":
377
  try:
 
396
  return send_file(path_or_file=audio, mimetype=file_type, download_name=fname)
397
 
398
 
399
+ @app.route('/voice/bert-vits2', methods=["GET", "POST"])
400
+ @require_api_key
401
+ def voice_bert_vits2_api():
402
+ try:
403
+ if request.method == "GET":
404
+ text = request.args.get("text", "")
405
+ id = int(request.args.get("id", app.config.get("ID", 0)))
406
+ format = request.args.get("format", app.config.get("FORMAT", "wav"))
407
+ # lang = request.args.get("lang", app.config.get("LANG", "auto"))
408
+ lang = "ZH"
409
+ length = float(request.args.get("length", app.config.get("LENGTH", 1)))
410
+ noise = float(request.args.get("noise", app.config.get("NOISE", 0.5)))
411
+ noisew = float(request.args.get("noisew", app.config.get("NOISEW", 0.6)))
412
+ sdp_ratio = float(request.args.get("sdp_ratio", 0.2))
413
+ max = int(request.args.get("max", app.config.get("MAX", 50)))
414
+ elif request.method == "POST":
415
+ content_type = request.headers.get('Content-Type')
416
+ if content_type == 'application/json':
417
+ data = request.get_json()
418
+ else:
419
+ data = request.form
420
+ text = data.get("text", "")
421
+ id = int(data.get("id", app.config.get("ID", 0)))
422
+ format = data.get("format", app.config.get("FORMAT", "wav"))
423
+ # lang = data.get("lang", app.config.get("LANG", "auto"))
424
+ lang = "ZH"
425
+ length = float(data.get("length", app.config.get("LENGTH", 1)))
426
+ noise = float(data.get("noise", app.config.get("NOISE", 0.667)))
427
+ noisew = float(data.get("noisew", app.config.get("NOISEW", 0.8)))
428
+ sdp_ratio = float(data.get("noisew", app.config.get("SDP_RATIO", 0.2)))
429
+ max = int(data.get("max", app.config.get("MAX", 50)))
430
+ except Exception as e:
431
+ logger.error(f"[Bert-VITS2] {e}")
432
+ return make_response("parameter error", 400)
433
+
434
+ logger.info(f"[Bert-VITS2] id:{id} format:{format} lang:{lang} length:{length} noise:{noise} noisew:{noisew} sdp_ratio:{sdp_ratio}")
435
+ logger.info(f"[Bert-VITS2] len:{len(text)} text:{text}")
436
+
437
+ if check_is_none(text):
438
+ logger.info(f"[Bert-VITS2] text is empty")
439
+ return make_response(jsonify({"status": "error", "message": "text is empty"}), 400)
440
+
441
+ if check_is_none(id):
442
+ logger.info(f"[Bert-VITS2] speaker id is empty")
443
+ return make_response(jsonify({"status": "error", "message": "speaker id is empty"}), 400)
444
+
445
+ if id < 0 or id >= tts.bert_vits2_speakers_count:
446
+ logger.info(f"[Bert-VITS2] speaker id {id} does not exist")
447
+ return make_response(jsonify({"status": "error", "message": f"id {id} does not exist"}), 400)
448
+
449
+ # 校验模型是否支持输入的语言
450
+ speaker_lang = tts.voice_speakers["BERT-VITS2"][id].get('lang')
451
+ if lang.upper() != "AUTO" and lang.upper() != "MIX" and len(speaker_lang) != 1 and lang not in speaker_lang:
452
+ logger.info(f"[Bert-VITS2] lang \"{lang}\" is not in {speaker_lang}")
453
+ return make_response(jsonify({"status": "error", "message": f"lang '{lang}' is not in {speaker_lang}"}), 400)
454
+
455
+ # 如果配置文件中设置了LANGUAGE_AUTOMATIC_DETECT则强制将speaker_lang设置为LANGUAGE_AUTOMATIC_DETECT
456
+ if app.config.get("LANGUAGE_AUTOMATIC_DETECT", []) != []:
457
+ speaker_lang = app.config.get("LANGUAGE_AUTOMATIC_DETECT")
458
+
459
+ fname = f"{str(uuid.uuid1())}.{format}"
460
+ file_type = f"audio/{format}"
461
+ task = {"text": text,
462
+ "id": id,
463
+ "format": format,
464
+ "length": length,
465
+ "noise": noise,
466
+ "noisew": noisew,
467
+ "sdp_ratio": sdp_ratio,
468
+ "max": max,
469
+ "lang": lang,
470
+ "speaker_lang": speaker_lang}
471
+
472
+ if app.config.get("SAVE_AUDIO", False):
473
+ logger.debug(f"[Bert-VITS2] {fname}")
474
+
475
+
476
+ t1 = time.time()
477
+ audio = tts.bert_vits2_infer(task, fname)
478
+ t2 = time.time()
479
+ logger.info(f"[Bert-VITS2] finish in {(t2 - t1):.2f}s")
480
+ return send_file(path_or_file=audio, mimetype=file_type, download_name=fname)
481
+
482
+
483
  @app.route('/voice/check', methods=["GET", "POST"])
484
  def check():
485
  try:
bert_vits2/LICENSE ADDED
@@ -0,0 +1,674 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ GNU GENERAL PUBLIC LICENSE
2
+ Version 3, 29 June 2007
3
+
4
+ Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
5
+ Everyone is permitted to copy and distribute verbatim copies
6
+ of this license document, but changing it is not allowed.
7
+
8
+ Preamble
9
+
10
+ The GNU General Public License is a free, copyleft license for
11
+ software and other kinds of works.
12
+
13
+ The licenses for most software and other practical works are designed
14
+ to take away your freedom to share and change the works. By contrast,
15
+ the GNU General Public License is intended to guarantee your freedom to
16
+ share and change all versions of a program--to make sure it remains free
17
+ software for all its users. We, the Free Software Foundation, use the
18
+ GNU General Public License for most of our software; it applies also to
19
+ any other work released this way by its authors. You can apply it to
20
+ your programs, too.
21
+
22
+ When we speak of free software, we are referring to freedom, not
23
+ price. Our General Public Licenses are designed to make sure that you
24
+ have the freedom to distribute copies of free software (and charge for
25
+ them if you wish), that you receive source code or can get it if you
26
+ want it, that you can change the software or use pieces of it in new
27
+ free programs, and that you know you can do these things.
28
+
29
+ To protect your rights, we need to prevent others from denying you
30
+ these rights or asking you to surrender the rights. Therefore, you have
31
+ certain responsibilities if you distribute copies of the software, or if
32
+ you modify it: responsibilities to respect the freedom of others.
33
+
34
+ For example, if you distribute copies of such a program, whether
35
+ gratis or for a fee, you must pass on to the recipients the same
36
+ freedoms that you received. You must make sure that they, too, receive
37
+ or can get the source code. And you must show them these terms so they
38
+ know their rights.
39
+
40
+ Developers that use the GNU GPL protect your rights with two steps:
41
+ (1) assert copyright on the software, and (2) offer you this License
42
+ giving you legal permission to copy, distribute and/or modify it.
43
+
44
+ For the developers' and authors' protection, the GPL clearly explains
45
+ that there is no warranty for this free software. For both users' and
46
+ authors' sake, the GPL requires that modified versions be marked as
47
+ changed, so that their problems will not be attributed erroneously to
48
+ authors of previous versions.
49
+
50
+ Some devices are designed to deny users access to install or run
51
+ modified versions of the software inside them, although the manufacturer
52
+ can do so. This is fundamentally incompatible with the aim of
53
+ protecting users' freedom to change the software. The systematic
54
+ pattern of such abuse occurs in the area of products for individuals to
55
+ use, which is precisely where it is most unacceptable. Therefore, we
56
+ have designed this version of the GPL to prohibit the practice for those
57
+ products. If such problems arise substantially in other domains, we
58
+ stand ready to extend this provision to those domains in future versions
59
+ of the GPL, as needed to protect the freedom of users.
60
+
61
+ Finally, every program is threatened constantly by software patents.
62
+ States should not allow patents to restrict development and use of
63
+ software on general-purpose computers, but in those that do, we wish to
64
+ avoid the special danger that patents applied to a free program could
65
+ make it effectively proprietary. To prevent this, the GPL assures that
66
+ patents cannot be used to render the program non-free.
67
+
68
+ The precise terms and conditions for copying, distribution and
69
+ modification follow.
70
+
71
+ TERMS AND CONDITIONS
72
+
73
+ 0. Definitions.
74
+
75
+ "This License" refers to version 3 of the GNU General Public License.
76
+
77
+ "Copyright" also means copyright-like laws that apply to other kinds of
78
+ works, such as semiconductor masks.
79
+
80
+ "The Program" refers to any copyrightable work licensed under this
81
+ License. Each licensee is addressed as "you". "Licensees" and
82
+ "recipients" may be individuals or organizations.
83
+
84
+ To "modify" a work means to copy from or adapt all or part of the work
85
+ in a fashion requiring copyright permission, other than the making of an
86
+ exact copy. The resulting work is called a "modified version" of the
87
+ earlier work or a work "based on" the earlier work.
88
+
89
+ A "covered work" means either the unmodified Program or a work based
90
+ on the Program.
91
+
92
+ To "propagate" a work means to do anything with it that, without
93
+ permission, would make you directly or secondarily liable for
94
+ infringement under applicable copyright law, except executing it on a
95
+ computer or modifying a private copy. Propagation includes copying,
96
+ distribution (with or without modification), making available to the
97
+ public, and in some countries other activities as well.
98
+
99
+ To "convey" a work means any kind of propagation that enables other
100
+ parties to make or receive copies. Mere interaction with a user through
101
+ a computer network, with no transfer of a copy, is not conveying.
102
+
103
+ An interactive user interface displays "Appropriate Legal Notices"
104
+ to the extent that it includes a convenient and prominently visible
105
+ feature that (1) displays an appropriate copyright notice, and (2)
106
+ tells the user that there is no warranty for the work (except to the
107
+ extent that warranties are provided), that licensees may convey the
108
+ work under this License, and how to view a copy of this License. If
109
+ the interface presents a list of user commands or options, such as a
110
+ menu, a prominent item in the list meets this criterion.
111
+
112
+ 1. Source Code.
113
+
114
+ The "source code" for a work means the preferred form of the work
115
+ for making modifications to it. "Object code" means any non-source
116
+ form of a work.
117
+
118
+ A "Standard Interface" means an interface that either is an official
119
+ standard defined by a recognized standards body, or, in the case of
120
+ interfaces specified for a particular programming language, one that
121
+ is widely used among developers working in that language.
122
+
123
+ The "System Libraries" of an executable work include anything, other
124
+ than the work as a whole, that (a) is included in the normal form of
125
+ packaging a Major Component, but which is not part of that Major
126
+ Component, and (b) serves only to enable use of the work with that
127
+ Major Component, or to implement a Standard Interface for which an
128
+ implementation is available to the public in source code form. A
129
+ "Major Component", in this context, means a major essential component
130
+ (kernel, window system, and so on) of the specific operating system
131
+ (if any) on which the executable work runs, or a compiler used to
132
+ produce the work, or an object code interpreter used to run it.
133
+
134
+ The "Corresponding Source" for a work in object code form means all
135
+ the source code needed to generate, install, and (for an executable
136
+ work) run the object code and to modify the work, including scripts to
137
+ control those activities. However, it does not include the work's
138
+ System Libraries, or general-purpose tools or generally available free
139
+ programs which are used unmodified in performing those activities but
140
+ which are not part of the work. For example, Corresponding Source
141
+ includes interface definition files associated with source files for
142
+ the work, and the source code for shared libraries and dynamically
143
+ linked subprograms that the work is specifically designed to require,
144
+ such as by intimate data communication or control flow between those
145
+ subprograms and other parts of the work.
146
+
147
+ The Corresponding Source need not include anything that users
148
+ can regenerate automatically from other parts of the Corresponding
149
+ Source.
150
+
151
+ The Corresponding Source for a work in source code form is that
152
+ same work.
153
+
154
+ 2. Basic Permissions.
155
+
156
+ All rights granted under this License are granted for the term of
157
+ copyright on the Program, and are irrevocable provided the stated
158
+ conditions are met. This License explicitly affirms your unlimited
159
+ permission to run the unmodified Program. The output from running a
160
+ covered work is covered by this License only if the output, given its
161
+ content, constitutes a covered work. This License acknowledges your
162
+ rights of fair use or other equivalent, as provided by copyright law.
163
+
164
+ You may make, run and propagate covered works that you do not
165
+ convey, without conditions so long as your license otherwise remains
166
+ in force. You may convey covered works to others for the sole purpose
167
+ of having them make modifications exclusively for you, or provide you
168
+ with facilities for running those works, provided that you comply with
169
+ the terms of this License in conveying all material for which you do
170
+ not control copyright. Those thus making or running the covered works
171
+ for you must do so exclusively on your behalf, under your direction
172
+ and control, on terms that prohibit them from making any copies of
173
+ your copyrighted material outside their relationship with you.
174
+
175
+ Conveying under any other circumstances is permitted solely under
176
+ the conditions stated below. Sublicensing is not allowed; section 10
177
+ makes it unnecessary.
178
+
179
+ 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
180
+
181
+ No covered work shall be deemed part of an effective technological
182
+ measure under any applicable law fulfilling obligations under article
183
+ 11 of the WIPO copyright treaty adopted on 20 December 1996, or
184
+ similar laws prohibiting or restricting circumvention of such
185
+ measures.
186
+
187
+ When you convey a covered work, you waive any legal power to forbid
188
+ circumvention of technological measures to the extent such circumvention
189
+ is effected by exercising rights under this License with respect to
190
+ the covered work, and you disclaim any intention to limit operation or
191
+ modification of the work as a means of enforcing, against the work's
192
+ users, your or third parties' legal rights to forbid circumvention of
193
+ technological measures.
194
+
195
+ 4. Conveying Verbatim Copies.
196
+
197
+ You may convey verbatim copies of the Program's source code as you
198
+ receive it, in any medium, provided that you conspicuously and
199
+ appropriately publish on each copy an appropriate copyright notice;
200
+ keep intact all notices stating that this License and any
201
+ non-permissive terms added in accord with section 7 apply to the code;
202
+ keep intact all notices of the absence of any warranty; and give all
203
+ recipients a copy of this License along with the Program.
204
+
205
+ You may charge any price or no price for each copy that you convey,
206
+ and you may offer support or warranty protection for a fee.
207
+
208
+ 5. Conveying Modified Source Versions.
209
+
210
+ You may convey a work based on the Program, or the modifications to
211
+ produce it from the Program, in the form of source code under the
212
+ terms of section 4, provided that you also meet all of these conditions:
213
+
214
+ a) The work must carry prominent notices stating that you modified
215
+ it, and giving a relevant date.
216
+
217
+ b) The work must carry prominent notices stating that it is
218
+ released under this License and any conditions added under section
219
+ 7. This requirement modifies the requirement in section 4 to
220
+ "keep intact all notices".
221
+
222
+ c) You must license the entire work, as a whole, under this
223
+ License to anyone who comes into possession of a copy. This
224
+ License will therefore apply, along with any applicable section 7
225
+ additional terms, to the whole of the work, and all its parts,
226
+ regardless of how they are packaged. This License gives no
227
+ permission to license the work in any other way, but it does not
228
+ invalidate such permission if you have separately received it.
229
+
230
+ d) If the work has interactive user interfaces, each must display
231
+ Appropriate Legal Notices; however, if the Program has interactive
232
+ interfaces that do not display Appropriate Legal Notices, your
233
+ work need not make them do so.
234
+
235
+ A compilation of a covered work with other separate and independent
236
+ works, which are not by their nature extensions of the covered work,
237
+ and which are not combined with it such as to form a larger program,
238
+ in or on a volume of a storage or distribution medium, is called an
239
+ "aggregate" if the compilation and its resulting copyright are not
240
+ used to limit the access or legal rights of the compilation's users
241
+ beyond what the individual works permit. Inclusion of a covered work
242
+ in an aggregate does not cause this License to apply to the other
243
+ parts of the aggregate.
244
+
245
+ 6. Conveying Non-Source Forms.
246
+
247
+ You may convey a covered work in object code form under the terms
248
+ of sections 4 and 5, provided that you also convey the
249
+ machine-readable Corresponding Source under the terms of this License,
250
+ in one of these ways:
251
+
252
+ a) Convey the object code in, or embodied in, a physical product
253
+ (including a physical distribution medium), accompanied by the
254
+ Corresponding Source fixed on a durable physical medium
255
+ customarily used for software interchange.
256
+
257
+ b) Convey the object code in, or embodied in, a physical product
258
+ (including a physical distribution medium), accompanied by a
259
+ written offer, valid for at least three years and valid for as
260
+ long as you offer spare parts or customer support for that product
261
+ model, to give anyone who possesses the object code either (1) a
262
+ copy of the Corresponding Source for all the software in the
263
+ product that is covered by this License, on a durable physical
264
+ medium customarily used for software interchange, for a price no
265
+ more than your reasonable cost of physically performing this
266
+ conveying of source, or (2) access to copy the
267
+ Corresponding Source from a network server at no charge.
268
+
269
+ c) Convey individual copies of the object code with a copy of the
270
+ written offer to provide the Corresponding Source. This
271
+ alternative is allowed only occasionally and noncommercially, and
272
+ only if you received the object code with such an offer, in accord
273
+ with subsection 6b.
274
+
275
+ d) Convey the object code by offering access from a designated
276
+ place (gratis or for a charge), and offer equivalent access to the
277
+ Corresponding Source in the same way through the same place at no
278
+ further charge. You need not require recipients to copy the
279
+ Corresponding Source along with the object code. If the place to
280
+ copy the object code is a network server, the Corresponding Source
281
+ may be on a different server (operated by you or a third party)
282
+ that supports equivalent copying facilities, provided you maintain
283
+ clear directions next to the object code saying where to find the
284
+ Corresponding Source. Regardless of what server hosts the
285
+ Corresponding Source, you remain obligated to ensure that it is
286
+ available for as long as needed to satisfy these requirements.
287
+
288
+ e) Convey the object code using peer-to-peer transmission, provided
289
+ you inform other peers where the object code and Corresponding
290
+ Source of the work are being offered to the general public at no
291
+ charge under subsection 6d.
292
+
293
+ A separable portion of the object code, whose source code is excluded
294
+ from the Corresponding Source as a System Library, need not be
295
+ included in conveying the object code work.
296
+
297
+ A "User Product" is either (1) a "consumer product", which means any
298
+ tangible personal property which is normally used for personal, family,
299
+ or household purposes, or (2) anything designed or sold for incorporation
300
+ into a dwelling. In determining whether a product is a consumer product,
301
+ doubtful cases shall be resolved in favor of coverage. For a particular
302
+ product received by a particular user, "normally used" refers to a
303
+ typical or common use of that class of product, regardless of the status
304
+ of the particular user or of the way in which the particular user
305
+ actually uses, or expects or is expected to use, the product. A product
306
+ is a consumer product regardless of whether the product has substantial
307
+ commercial, industrial or non-consumer uses, unless such uses represent
308
+ the only significant mode of use of the product.
309
+
310
+ "Installation Information" for a User Product means any methods,
311
+ procedures, authorization keys, or other information required to install
312
+ and execute modified versions of a covered work in that User Product from
313
+ a modified version of its Corresponding Source. The information must
314
+ suffice to ensure that the continued functioning of the modified object
315
+ code is in no case prevented or interfered with solely because
316
+ modification has been made.
317
+
318
+ If you convey an object code work under this section in, or with, or
319
+ specifically for use in, a User Product, and the conveying occurs as
320
+ part of a transaction in which the right of possession and use of the
321
+ User Product is transferred to the recipient in perpetuity or for a
322
+ fixed term (regardless of how the transaction is characterized), the
323
+ Corresponding Source conveyed under this section must be accompanied
324
+ by the Installation Information. But this requirement does not apply
325
+ if neither you nor any third party retains the ability to install
326
+ modified object code on the User Product (for example, the work has
327
+ been installed in ROM).
328
+
329
+ The requirement to provide Installation Information does not include a
330
+ requirement to continue to provide support service, warranty, or updates
331
+ for a work that has been modified or installed by the recipient, or for
332
+ the User Product in which it has been modified or installed. Access to a
333
+ network may be denied when the modification itself materially and
334
+ adversely affects the operation of the network or violates the rules and
335
+ protocols for communication across the network.
336
+
337
+ Corresponding Source conveyed, and Installation Information provided,
338
+ in accord with this section must be in a format that is publicly
339
+ documented (and with an implementation available to the public in
340
+ source code form), and must require no special password or key for
341
+ unpacking, reading or copying.
342
+
343
+ 7. Additional Terms.
344
+
345
+ "Additional permissions" are terms that supplement the terms of this
346
+ License by making exceptions from one or more of its conditions.
347
+ Additional permissions that are applicable to the entire Program shall
348
+ be treated as though they were included in this License, to the extent
349
+ that they are valid under applicable law. If additional permissions
350
+ apply only to part of the Program, that part may be used separately
351
+ under those permissions, but the entire Program remains governed by
352
+ this License without regard to the additional permissions.
353
+
354
+ When you convey a copy of a covered work, you may at your option
355
+ remove any additional permissions from that copy, or from any part of
356
+ it. (Additional permissions may be written to require their own
357
+ removal in certain cases when you modify the work.) You may place
358
+ additional permissions on material, added by you to a covered work,
359
+ for which you have or can give appropriate copyright permission.
360
+
361
+ Notwithstanding any other provision of this License, for material you
362
+ add to a covered work, you may (if authorized by the copyright holders of
363
+ that material) supplement the terms of this License with terms:
364
+
365
+ a) Disclaiming warranty or limiting liability differently from the
366
+ terms of sections 15 and 16 of this License; or
367
+
368
+ b) Requiring preservation of specified reasonable legal notices or
369
+ author attributions in that material or in the Appropriate Legal
370
+ Notices displayed by works containing it; or
371
+
372
+ c) Prohibiting misrepresentation of the origin of that material, or
373
+ requiring that modified versions of such material be marked in
374
+ reasonable ways as different from the original version; or
375
+
376
+ d) Limiting the use for publicity purposes of names of licensors or
377
+ authors of the material; or
378
+
379
+ e) Declining to grant rights under trademark law for use of some
380
+ trade names, trademarks, or service marks; or
381
+
382
+ f) Requiring indemnification of licensors and authors of that
383
+ material by anyone who conveys the material (or modified versions of
384
+ it) with contractual assumptions of liability to the recipient, for
385
+ any liability that these contractual assumptions directly impose on
386
+ those licensors and authors.
387
+
388
+ All other non-permissive additional terms are considered "further
389
+ restrictions" within the meaning of section 10. If the Program as you
390
+ received it, or any part of it, contains a notice stating that it is
391
+ governed by this License along with a term that is a further
392
+ restriction, you may remove that term. If a license document contains
393
+ a further restriction but permits relicensing or conveying under this
394
+ License, you may add to a covered work material governed by the terms
395
+ of that license document, provided that the further restriction does
396
+ not survive such relicensing or conveying.
397
+
398
+ If you add terms to a covered work in accord with this section, you
399
+ must place, in the relevant source files, a statement of the
400
+ additional terms that apply to those files, or a notice indicating
401
+ where to find the applicable terms.
402
+
403
+ Additional terms, permissive or non-permissive, may be stated in the
404
+ form of a separately written license, or stated as exceptions;
405
+ the above requirements apply either way.
406
+
407
+ 8. Termination.
408
+
409
+ You may not propagate or modify a covered work except as expressly
410
+ provided under this License. Any attempt otherwise to propagate or
411
+ modify it is void, and will automatically terminate your rights under
412
+ this License (including any patent licenses granted under the third
413
+ paragraph of section 11).
414
+
415
+ However, if you cease all violation of this License, then your
416
+ license from a particular copyright holder is reinstated (a)
417
+ provisionally, unless and until the copyright holder explicitly and
418
+ finally terminates your license, and (b) permanently, if the copyright
419
+ holder fails to notify you of the violation by some reasonable means
420
+ prior to 60 days after the cessation.
421
+
422
+ Moreover, your license from a particular copyright holder is
423
+ reinstated permanently if the copyright holder notifies you of the
424
+ violation by some reasonable means, this is the first time you have
425
+ received notice of violation of this License (for any work) from that
426
+ copyright holder, and you cure the violation prior to 30 days after
427
+ your receipt of the notice.
428
+
429
+ Termination of your rights under this section does not terminate the
430
+ licenses of parties who have received copies or rights from you under
431
+ this License. If your rights have been terminated and not permanently
432
+ reinstated, you do not qualify to receive new licenses for the same
433
+ material under section 10.
434
+
435
+ 9. Acceptance Not Required for Having Copies.
436
+
437
+ You are not required to accept this License in order to receive or
438
+ run a copy of the Program. Ancillary propagation of a covered work
439
+ occurring solely as a consequence of using peer-to-peer transmission
440
+ to receive a copy likewise does not require acceptance. However,
441
+ nothing other than this License grants you permission to propagate or
442
+ modify any covered work. These actions infringe copyright if you do
443
+ not accept this License. Therefore, by modifying or propagating a
444
+ covered work, you indicate your acceptance of this License to do so.
445
+
446
+ 10. Automatic Licensing of Downstream Recipients.
447
+
448
+ Each time you convey a covered work, the recipient automatically
449
+ receives a license from the original licensors, to run, modify and
450
+ propagate that work, subject to this License. You are not responsible
451
+ for enforcing compliance by third parties with this License.
452
+
453
+ An "entity transaction" is a transaction transferring control of an
454
+ organization, or substantially all assets of one, or subdividing an
455
+ organization, or merging organizations. If propagation of a covered
456
+ work results from an entity transaction, each party to that
457
+ transaction who receives a copy of the work also receives whatever
458
+ licenses to the work the party's predecessor in interest had or could
459
+ give under the previous paragraph, plus a right to possession of the
460
+ Corresponding Source of the work from the predecessor in interest, if
461
+ the predecessor has it or can get it with reasonable efforts.
462
+
463
+ You may not impose any further restrictions on the exercise of the
464
+ rights granted or affirmed under this License. For example, you may
465
+ not impose a license fee, royalty, or other charge for exercise of
466
+ rights granted under this License, and you may not initiate litigation
467
+ (including a cross-claim or counterclaim in a lawsuit) alleging that
468
+ any patent claim is infringed by making, using, selling, offering for
469
+ sale, or importing the Program or any portion of it.
470
+
471
+ 11. Patents.
472
+
473
+ A "contributor" is a copyright holder who authorizes use under this
474
+ License of the Program or a work on which the Program is based. The
475
+ work thus licensed is called the contributor's "contributor version".
476
+
477
+ A contributor's "essential patent claims" are all patent claims
478
+ owned or controlled by the contributor, whether already acquired or
479
+ hereafter acquired, that would be infringed by some manner, permitted
480
+ by this License, of making, using, or selling its contributor version,
481
+ but do not include claims that would be infringed only as a
482
+ consequence of further modification of the contributor version. For
483
+ purposes of this definition, "control" includes the right to grant
484
+ patent sublicenses in a manner consistent with the requirements of
485
+ this License.
486
+
487
+ Each contributor grants you a non-exclusive, worldwide, royalty-free
488
+ patent license under the contributor's essential patent claims, to
489
+ make, use, sell, offer for sale, import and otherwise run, modify and
490
+ propagate the contents of its contributor version.
491
+
492
+ In the following three paragraphs, a "patent license" is any express
493
+ agreement or commitment, however denominated, not to enforce a patent
494
+ (such as an express permission to practice a patent or covenant not to
495
+ sue for patent infringement). To "grant" such a patent license to a
496
+ party means to make such an agreement or commitment not to enforce a
497
+ patent against the party.
498
+
499
+ If you convey a covered work, knowingly relying on a patent license,
500
+ and the Corresponding Source of the work is not available for anyone
501
+ to copy, free of charge and under the terms of this License, through a
502
+ publicly available network server or other readily accessible means,
503
+ then you must either (1) cause the Corresponding Source to be so
504
+ available, or (2) arrange to deprive yourself of the benefit of the
505
+ patent license for this particular work, or (3) arrange, in a manner
506
+ consistent with the requirements of this License, to extend the patent
507
+ license to downstream recipients. "Knowingly relying" means you have
508
+ actual knowledge that, but for the patent license, your conveying the
509
+ covered work in a country, or your recipient's use of the covered work
510
+ in a country, would infringe one or more identifiable patents in that
511
+ country that you have reason to believe are valid.
512
+
513
+ If, pursuant to or in connection with a single transaction or
514
+ arrangement, you convey, or propagate by procuring conveyance of, a
515
+ covered work, and grant a patent license to some of the parties
516
+ receiving the covered work authorizing them to use, propagate, modify
517
+ or convey a specific copy of the covered work, then the patent license
518
+ you grant is automatically extended to all recipients of the covered
519
+ work and works based on it.
520
+
521
+ A patent license is "discriminatory" if it does not include within
522
+ the scope of its coverage, prohibits the exercise of, or is
523
+ conditioned on the non-exercise of one or more of the rights that are
524
+ specifically granted under this License. You may not convey a covered
525
+ work if you are a party to an arrangement with a third party that is
526
+ in the business of distributing software, under which you make payment
527
+ to the third party based on the extent of your activity of conveying
528
+ the work, and under which the third party grants, to any of the
529
+ parties who would receive the covered work from you, a discriminatory
530
+ patent license (a) in connection with copies of the covered work
531
+ conveyed by you (or copies made from those copies), or (b) primarily
532
+ for and in connection with specific products or compilations that
533
+ contain the covered work, unless you entered into that arrangement,
534
+ or that patent license was granted, prior to 28 March 2007.
535
+
536
+ Nothing in this License shall be construed as excluding or limiting
537
+ any implied license or other defenses to infringement that may
538
+ otherwise be available to you under applicable patent law.
539
+
540
+ 12. No Surrender of Others' Freedom.
541
+
542
+ If conditions are imposed on you (whether by court order, agreement or
543
+ otherwise) that contradict the conditions of this License, they do not
544
+ excuse you from the conditions of this License. If you cannot convey a
545
+ covered work so as to satisfy simultaneously your obligations under this
546
+ License and any other pertinent obligations, then as a consequence you may
547
+ not convey it at all. For example, if you agree to terms that obligate you
548
+ to collect a royalty for further conveying from those to whom you convey
549
+ the Program, the only way you could satisfy both those terms and this
550
+ License would be to refrain entirely from conveying the Program.
551
+
552
+ 13. Use with the GNU Affero General Public License.
553
+
554
+ Notwithstanding any other provision of this License, you have
555
+ permission to link or combine any covered work with a work licensed
556
+ under version 3 of the GNU Affero General Public License into a single
557
+ combined work, and to convey the resulting work. The terms of this
558
+ License will continue to apply to the part which is the covered work,
559
+ but the special requirements of the GNU Affero General Public License,
560
+ section 13, concerning interaction through a network will apply to the
561
+ combination as such.
562
+
563
+ 14. Revised Versions of this License.
564
+
565
+ The Free Software Foundation may publish revised and/or new versions of
566
+ the GNU General Public License from time to time. Such new versions will
567
+ be similar in spirit to the present version, but may differ in detail to
568
+ address new problems or concerns.
569
+
570
+ Each version is given a distinguishing version number. If the
571
+ Program specifies that a certain numbered version of the GNU General
572
+ Public License "or any later version" applies to it, you have the
573
+ option of following the terms and conditions either of that numbered
574
+ version or of any later version published by the Free Software
575
+ Foundation. If the Program does not specify a version number of the
576
+ GNU General Public License, you may choose any version ever published
577
+ by the Free Software Foundation.
578
+
579
+ If the Program specifies that a proxy can decide which future
580
+ versions of the GNU General Public License can be used, that proxy's
581
+ public statement of acceptance of a version permanently authorizes you
582
+ to choose that version for the Program.
583
+
584
+ Later license versions may give you additional or different
585
+ permissions. However, no additional obligations are imposed on any
586
+ author or copyright holder as a result of your choosing to follow a
587
+ later version.
588
+
589
+ 15. Disclaimer of Warranty.
590
+
591
+ THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
592
+ APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
593
+ HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
594
+ OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
595
+ THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
596
+ PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
597
+ IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
598
+ ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
599
+
600
+ 16. Limitation of Liability.
601
+
602
+ IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
603
+ WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
604
+ THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
605
+ GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
606
+ USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
607
+ DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
608
+ PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
609
+ EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
610
+ SUCH DAMAGES.
611
+
612
+ 17. Interpretation of Sections 15 and 16.
613
+
614
+ If the disclaimer of warranty and limitation of liability provided
615
+ above cannot be given local legal effect according to their terms,
616
+ reviewing courts shall apply local law that most closely approximates
617
+ an absolute waiver of all civil liability in connection with the
618
+ Program, unless a warranty or assumption of liability accompanies a
619
+ copy of the Program in return for a fee.
620
+
621
+ END OF TERMS AND CONDITIONS
622
+
623
+ How to Apply These Terms to Your New Programs
624
+
625
+ If you develop a new program, and you want it to be of the greatest
626
+ possible use to the public, the best way to achieve this is to make it
627
+ free software which everyone can redistribute and change under these terms.
628
+
629
+ To do so, attach the following notices to the program. It is safest
630
+ to attach them to the start of each source file to most effectively
631
+ state the exclusion of warranty; and each file should have at least
632
+ the "copyright" line and a pointer to where the full notice is found.
633
+
634
+ <one line to give the program's name and a brief idea of what it does.>
635
+ Copyright (C) <year> <name of author>
636
+
637
+ This program is free software: you can redistribute it and/or modify
638
+ it under the terms of the GNU General Public License as published by
639
+ the Free Software Foundation, either version 3 of the License, or
640
+ (at your option) any later version.
641
+
642
+ This program is distributed in the hope that it will be useful,
643
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
644
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
645
+ GNU General Public License for more details.
646
+
647
+ You should have received a copy of the GNU General Public License
648
+ along with this program. If not, see <https://www.gnu.org/licenses/>.
649
+
650
+ Also add information on how to contact you by electronic and paper mail.
651
+
652
+ If the program does terminal interaction, make it output a short
653
+ notice like this when it starts in an interactive mode:
654
+
655
+ <program> Copyright (C) <year> <name of author>
656
+ This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
657
+ This is free software, and you are welcome to redistribute it
658
+ under certain conditions; type `show c' for details.
659
+
660
+ The hypothetical commands `show w' and `show c' should show the appropriate
661
+ parts of the General Public License. Of course, your program's commands
662
+ might be different; for a GUI interface, you would use an "about box".
663
+
664
+ You should also get your employer (if you work as a programmer) or school,
665
+ if any, to sign a "copyright disclaimer" for the program, if necessary.
666
+ For more information on this, and how to apply and follow the GNU GPL, see
667
+ <https://www.gnu.org/licenses/>.
668
+
669
+ The GNU General Public License does not permit incorporating your program
670
+ into proprietary programs. If your program is a subroutine library, you
671
+ may consider it more useful to permit linking proprietary applications with
672
+ the library. If this is what you want to do, use the GNU Lesser General
673
+ Public License instead of this License. But first, please read
674
+ <https://www.gnu.org/licenses/why-not-lgpl.html>.
bert_vits2/README.md ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ # Bert-VITS2
2
+
3
+ VITS2 Backbone with bert
4
+ ## 成熟的旅行者/开拓者/舰长/博士/sensei/猎魔人/喵喵露/V应该参阅代码自己学习如何训练。
5
+ ### 严禁将此项目用于一切违反《中华人民共和国宪法》,《中华人民共和国刑法》,《中华人民共和国治安管理处罚法》和《中华人民共和国民法典》之用途。
bert_vits2/__init__.py ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ from bert_vits2.bert_vits2 import Bert_VITS2
2
+ from bert_vits2 import text
bert_vits2/attentions.py ADDED
@@ -0,0 +1,352 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+ import torch
3
+ from torch import nn
4
+ from torch.nn import functional as F
5
+ from bert_vits2 import commons
6
+ from torch.nn.utils import weight_norm, remove_weight_norm
7
+
8
+
9
+ class LayerNorm(nn.Module):
10
+ def __init__(self, channels, eps=1e-5):
11
+ super().__init__()
12
+ self.channels = channels
13
+ self.eps = eps
14
+
15
+ self.gamma = nn.Parameter(torch.ones(channels))
16
+ self.beta = nn.Parameter(torch.zeros(channels))
17
+
18
+ def forward(self, x):
19
+ x = x.transpose(1, -1)
20
+ x = F.layer_norm(x, (self.channels,), self.gamma, self.beta, self.eps)
21
+ return x.transpose(1, -1)
22
+
23
+
24
+ @torch.jit.script
25
+ def fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels):
26
+ n_channels_int = n_channels[0]
27
+ in_act = input_a + input_b
28
+ t_act = torch.tanh(in_act[:, :n_channels_int, :])
29
+ s_act = torch.sigmoid(in_act[:, n_channels_int:, :])
30
+ acts = t_act * s_act
31
+ return acts
32
+
33
+
34
+ class Encoder(nn.Module):
35
+ def __init__(self, hidden_channels, filter_channels, n_heads, n_layers, kernel_size=1, p_dropout=0., window_size=4,
36
+ isflow=True, **kwargs):
37
+ super().__init__()
38
+ self.hidden_channels = hidden_channels
39
+ self.filter_channels = filter_channels
40
+ self.n_heads = n_heads
41
+ self.n_layers = n_layers
42
+ self.kernel_size = kernel_size
43
+ self.p_dropout = p_dropout
44
+ self.window_size = window_size
45
+ if isflow:
46
+ cond_layer = torch.nn.Conv1d(256, 2 * hidden_channels * n_layers, 1)
47
+ self.cond_pre = torch.nn.Conv1d(hidden_channels, 2 * hidden_channels, 1)
48
+ self.cond_layer = weight_norm(cond_layer, name='weight')
49
+ self.gin_channels = 256
50
+ self.cond_layer_idx = self.n_layers
51
+ if 'gin_channels' in kwargs:
52
+ self.gin_channels = kwargs['gin_channels']
53
+ if self.gin_channels != 0:
54
+ self.spk_emb_linear = nn.Linear(self.gin_channels, self.hidden_channels)
55
+ # vits2 says 3rd block, so idx is 2 by default
56
+ self.cond_layer_idx = kwargs['cond_layer_idx'] if 'cond_layer_idx' in kwargs else 2
57
+ # print(self.gin_channels, self.cond_layer_idx)
58
+ assert self.cond_layer_idx < self.n_layers, 'cond_layer_idx should be less than n_layers'
59
+ self.drop = nn.Dropout(p_dropout)
60
+ self.attn_layers = nn.ModuleList()
61
+ self.norm_layers_1 = nn.ModuleList()
62
+ self.ffn_layers = nn.ModuleList()
63
+ self.norm_layers_2 = nn.ModuleList()
64
+ for i in range(self.n_layers):
65
+ self.attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout,
66
+ window_size=window_size))
67
+ self.norm_layers_1.append(LayerNorm(hidden_channels))
68
+ self.ffn_layers.append(
69
+ FFN(hidden_channels, hidden_channels, filter_channels, kernel_size, p_dropout=p_dropout))
70
+ self.norm_layers_2.append(LayerNorm(hidden_channels))
71
+
72
+ def forward(self, x, x_mask, g=None):
73
+ attn_mask = x_mask.unsqueeze(2) * x_mask.unsqueeze(-1)
74
+ x = x * x_mask
75
+ for i in range(self.n_layers):
76
+ if i == self.cond_layer_idx and g is not None:
77
+ g = self.spk_emb_linear(g.transpose(1, 2))
78
+ g = g.transpose(1, 2)
79
+ x = x + g
80
+ x = x * x_mask
81
+ y = self.attn_layers[i](x, x, attn_mask)
82
+ y = self.drop(y)
83
+ x = self.norm_layers_1[i](x + y)
84
+
85
+ y = self.ffn_layers[i](x, x_mask)
86
+ y = self.drop(y)
87
+ x = self.norm_layers_2[i](x + y)
88
+ x = x * x_mask
89
+ return x
90
+
91
+
92
+ class Decoder(nn.Module):
93
+ def __init__(self, hidden_channels, filter_channels, n_heads, n_layers, kernel_size=1, p_dropout=0.,
94
+ proximal_bias=False, proximal_init=True, **kwargs):
95
+ super().__init__()
96
+ self.hidden_channels = hidden_channels
97
+ self.filter_channels = filter_channels
98
+ self.n_heads = n_heads
99
+ self.n_layers = n_layers
100
+ self.kernel_size = kernel_size
101
+ self.p_dropout = p_dropout
102
+ self.proximal_bias = proximal_bias
103
+ self.proximal_init = proximal_init
104
+
105
+ self.drop = nn.Dropout(p_dropout)
106
+ self.self_attn_layers = nn.ModuleList()
107
+ self.norm_layers_0 = nn.ModuleList()
108
+ self.encdec_attn_layers = nn.ModuleList()
109
+ self.norm_layers_1 = nn.ModuleList()
110
+ self.ffn_layers = nn.ModuleList()
111
+ self.norm_layers_2 = nn.ModuleList()
112
+ for i in range(self.n_layers):
113
+ self.self_attn_layers.append(
114
+ MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout,
115
+ proximal_bias=proximal_bias, proximal_init=proximal_init))
116
+ self.norm_layers_0.append(LayerNorm(hidden_channels))
117
+ self.encdec_attn_layers.append(
118
+ MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout))
119
+ self.norm_layers_1.append(LayerNorm(hidden_channels))
120
+ self.ffn_layers.append(
121
+ FFN(hidden_channels, hidden_channels, filter_channels, kernel_size, p_dropout=p_dropout, causal=True))
122
+ self.norm_layers_2.append(LayerNorm(hidden_channels))
123
+
124
+ def forward(self, x, x_mask, h, h_mask):
125
+ """
126
+ x: decoder input
127
+ h: encoder output
128
+ """
129
+ self_attn_mask = commons.subsequent_mask(x_mask.size(2)).to(device=x.device, dtype=x.dtype)
130
+ encdec_attn_mask = h_mask.unsqueeze(2) * x_mask.unsqueeze(-1)
131
+ x = x * x_mask
132
+ for i in range(self.n_layers):
133
+ y = self.self_attn_layers[i](x, x, self_attn_mask)
134
+ y = self.drop(y)
135
+ x = self.norm_layers_0[i](x + y)
136
+
137
+ y = self.encdec_attn_layers[i](x, h, encdec_attn_mask)
138
+ y = self.drop(y)
139
+ x = self.norm_layers_1[i](x + y)
140
+
141
+ y = self.ffn_layers[i](x, x_mask)
142
+ y = self.drop(y)
143
+ x = self.norm_layers_2[i](x + y)
144
+ x = x * x_mask
145
+ return x
146
+
147
+
148
+ class MultiHeadAttention(nn.Module):
149
+ def __init__(self, channels, out_channels, n_heads, p_dropout=0., window_size=None, heads_share=True,
150
+ block_length=None, proximal_bias=False, proximal_init=False):
151
+ super().__init__()
152
+ assert channels % n_heads == 0
153
+
154
+ self.channels = channels
155
+ self.out_channels = out_channels
156
+ self.n_heads = n_heads
157
+ self.p_dropout = p_dropout
158
+ self.window_size = window_size
159
+ self.heads_share = heads_share
160
+ self.block_length = block_length
161
+ self.proximal_bias = proximal_bias
162
+ self.proximal_init = proximal_init
163
+ self.attn = None
164
+
165
+ self.k_channels = channels // n_heads
166
+ self.conv_q = nn.Conv1d(channels, channels, 1)
167
+ self.conv_k = nn.Conv1d(channels, channels, 1)
168
+ self.conv_v = nn.Conv1d(channels, channels, 1)
169
+ self.conv_o = nn.Conv1d(channels, out_channels, 1)
170
+ self.drop = nn.Dropout(p_dropout)
171
+
172
+ if window_size is not None:
173
+ n_heads_rel = 1 if heads_share else n_heads
174
+ rel_stddev = self.k_channels ** -0.5
175
+ self.emb_rel_k = nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev)
176
+ self.emb_rel_v = nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev)
177
+
178
+ nn.init.xavier_uniform_(self.conv_q.weight)
179
+ nn.init.xavier_uniform_(self.conv_k.weight)
180
+ nn.init.xavier_uniform_(self.conv_v.weight)
181
+ if proximal_init:
182
+ with torch.no_grad():
183
+ self.conv_k.weight.copy_(self.conv_q.weight)
184
+ self.conv_k.bias.copy_(self.conv_q.bias)
185
+
186
+ def forward(self, x, c, attn_mask=None):
187
+ q = self.conv_q(x)
188
+ k = self.conv_k(c)
189
+ v = self.conv_v(c)
190
+
191
+ x, self.attn = self.attention(q, k, v, mask=attn_mask)
192
+
193
+ x = self.conv_o(x)
194
+ return x
195
+
196
+ def attention(self, query, key, value, mask=None):
197
+ # reshape [b, d, t] -> [b, n_h, t, d_k]
198
+ b, d, t_s, t_t = (*key.size(), query.size(2))
199
+ query = query.view(b, self.n_heads, self.k_channels, t_t).transpose(2, 3)
200
+ key = key.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3)
201
+ value = value.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3)
202
+
203
+ scores = torch.matmul(query / math.sqrt(self.k_channels), key.transpose(-2, -1))
204
+ if self.window_size is not None:
205
+ assert t_s == t_t, "Relative attention is only available for self-attention."
206
+ key_relative_embeddings = self._get_relative_embeddings(self.emb_rel_k, t_s)
207
+ rel_logits = self._matmul_with_relative_keys(query / math.sqrt(self.k_channels), key_relative_embeddings)
208
+ scores_local = self._relative_position_to_absolute_position(rel_logits)
209
+ scores = scores + scores_local
210
+ if self.proximal_bias:
211
+ assert t_s == t_t, "Proximal bias is only available for self-attention."
212
+ scores = scores + self._attention_bias_proximal(t_s).to(device=scores.device, dtype=scores.dtype)
213
+ if mask is not None:
214
+ scores = scores.masked_fill(mask == 0, -1e4)
215
+ if self.block_length is not None:
216
+ assert t_s == t_t, "Local attention is only available for self-attention."
217
+ block_mask = torch.ones_like(scores).triu(-self.block_length).tril(self.block_length)
218
+ scores = scores.masked_fill(block_mask == 0, -1e4)
219
+ p_attn = F.softmax(scores, dim=-1) # [b, n_h, t_t, t_s]
220
+ p_attn = self.drop(p_attn)
221
+ output = torch.matmul(p_attn, value)
222
+ if self.window_size is not None:
223
+ relative_weights = self._absolute_position_to_relative_position(p_attn)
224
+ value_relative_embeddings = self._get_relative_embeddings(self.emb_rel_v, t_s)
225
+ output = output + self._matmul_with_relative_values(relative_weights, value_relative_embeddings)
226
+ output = output.transpose(2, 3).contiguous().view(b, d, t_t) # [b, n_h, t_t, d_k] -> [b, d, t_t]
227
+ return output, p_attn
228
+
229
+ def _matmul_with_relative_values(self, x, y):
230
+ """
231
+ x: [b, h, l, m]
232
+ y: [h or 1, m, d]
233
+ ret: [b, h, l, d]
234
+ """
235
+ ret = torch.matmul(x, y.unsqueeze(0))
236
+ return ret
237
+
238
+ def _matmul_with_relative_keys(self, x, y):
239
+ """
240
+ x: [b, h, l, d]
241
+ y: [h or 1, m, d]
242
+ ret: [b, h, l, m]
243
+ """
244
+ ret = torch.matmul(x, y.unsqueeze(0).transpose(-2, -1))
245
+ return ret
246
+
247
+ def _get_relative_embeddings(self, relative_embeddings, length):
248
+ max_relative_position = 2 * self.window_size + 1
249
+ # Pad first before slice to avoid using cond ops.
250
+ pad_length = max(length - (self.window_size + 1), 0)
251
+ slice_start_position = max((self.window_size + 1) - length, 0)
252
+ slice_end_position = slice_start_position + 2 * length - 1
253
+ if pad_length > 0:
254
+ padded_relative_embeddings = F.pad(
255
+ relative_embeddings,
256
+ commons.convert_pad_shape([[0, 0], [pad_length, pad_length], [0, 0]]))
257
+ else:
258
+ padded_relative_embeddings = relative_embeddings
259
+ used_relative_embeddings = padded_relative_embeddings[:, slice_start_position:slice_end_position]
260
+ return used_relative_embeddings
261
+
262
+ def _relative_position_to_absolute_position(self, x):
263
+ """
264
+ x: [b, h, l, 2*l-1]
265
+ ret: [b, h, l, l]
266
+ """
267
+ batch, heads, length, _ = x.size()
268
+ # Concat columns of pad to shift from relative to absolute indexing.
269
+ x = F.pad(x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, 1]]))
270
+
271
+ # Concat extra elements so to add up to shape (len+1, 2*len-1).
272
+ x_flat = x.view([batch, heads, length * 2 * length])
273
+ x_flat = F.pad(x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [0, length - 1]]))
274
+
275
+ # Reshape and slice out the padded elements.
276
+ x_final = x_flat.view([batch, heads, length + 1, 2 * length - 1])[:, :, :length, length - 1:]
277
+ return x_final
278
+
279
+ def _absolute_position_to_relative_position(self, x):
280
+ """
281
+ x: [b, h, l, l]
282
+ ret: [b, h, l, 2*l-1]
283
+ """
284
+ batch, heads, length, _ = x.size()
285
+ # padd along column
286
+ x = F.pad(x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, length - 1]]))
287
+ x_flat = x.view([batch, heads, length ** 2 + length * (length - 1)])
288
+ # add 0's in the beginning that will skew the elements after reshape
289
+ x_flat = F.pad(x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [length, 0]]))
290
+ x_final = x_flat.view([batch, heads, length, 2 * length])[:, :, :, 1:]
291
+ return x_final
292
+
293
+ def _attention_bias_proximal(self, length):
294
+ """Bias for self-attention to encourage attention to close positions.
295
+ Args:
296
+ length: an integer scalar.
297
+ Returns:
298
+ a Tensor with shape [1, 1, length, length]
299
+ """
300
+ r = torch.arange(length, dtype=torch.float32)
301
+ diff = torch.unsqueeze(r, 0) - torch.unsqueeze(r, 1)
302
+ return torch.unsqueeze(torch.unsqueeze(-torch.log1p(torch.abs(diff)), 0), 0)
303
+
304
+
305
+ class FFN(nn.Module):
306
+ def __init__(self, in_channels, out_channels, filter_channels, kernel_size, p_dropout=0., activation=None,
307
+ causal=False):
308
+ super().__init__()
309
+ self.in_channels = in_channels
310
+ self.out_channels = out_channels
311
+ self.filter_channels = filter_channels
312
+ self.kernel_size = kernel_size
313
+ self.p_dropout = p_dropout
314
+ self.activation = activation
315
+ self.causal = causal
316
+
317
+ if causal:
318
+ self.padding = self._causal_padding
319
+ else:
320
+ self.padding = self._same_padding
321
+
322
+ self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size)
323
+ self.conv_2 = nn.Conv1d(filter_channels, out_channels, kernel_size)
324
+ self.drop = nn.Dropout(p_dropout)
325
+
326
+ def forward(self, x, x_mask):
327
+ x = self.conv_1(self.padding(x * x_mask))
328
+ if self.activation == "gelu":
329
+ x = x * torch.sigmoid(1.702 * x)
330
+ else:
331
+ x = torch.relu(x)
332
+ x = self.drop(x)
333
+ x = self.conv_2(self.padding(x * x_mask))
334
+ return x * x_mask
335
+
336
+ def _causal_padding(self, x):
337
+ if self.kernel_size == 1:
338
+ return x
339
+ pad_l = self.kernel_size - 1
340
+ pad_r = 0
341
+ padding = [[0, 0], [0, 0], [pad_l, pad_r]]
342
+ x = F.pad(x, commons.convert_pad_shape(padding))
343
+ return x
344
+
345
+ def _same_padding(self, x):
346
+ if self.kernel_size == 1:
347
+ return x
348
+ pad_l = (self.kernel_size - 1) // 2
349
+ pad_r = self.kernel_size // 2
350
+ padding = [[0, 0], [0, 0], [pad_l, pad_r]]
351
+ x = F.pad(x, commons.convert_pad_shape(padding))
352
+ return x
bert_vits2/bert/chinese-roberta-wwm-ext-large/.gitattributes ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ *.bin.* filter=lfs diff=lfs merge=lfs -text
2
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.h5 filter=lfs diff=lfs merge=lfs -text
5
+ *.tflite filter=lfs diff=lfs merge=lfs -text
6
+ *.tar.gz filter=lfs diff=lfs merge=lfs -text
7
+ *.ot filter=lfs diff=lfs merge=lfs -text
8
+ *.onnx filter=lfs diff=lfs merge=lfs -text
9
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
bert_vits2/bert/chinese-roberta-wwm-ext-large/README.md ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ language:
3
+ - zh
4
+ tags:
5
+ - bert
6
+ license: "apache-2.0"
7
+ ---
8
+
9
+ # Please use 'Bert' related functions to load this model!
10
+
11
+ ## Chinese BERT with Whole Word Masking
12
+ For further accelerating Chinese natural language processing, we provide **Chinese pre-trained BERT with Whole Word Masking**.
13
+
14
+ **[Pre-Training with Whole Word Masking for Chinese BERT](https://arxiv.org/abs/1906.08101)**
15
+ Yiming Cui, Wanxiang Che, Ting Liu, Bing Qin, Ziqing Yang, Shijin Wang, Guoping Hu
16
+
17
+ This repository is developed based on:https://github.com/google-research/bert
18
+
19
+ You may also interested in,
20
+ - Chinese BERT series: https://github.com/ymcui/Chinese-BERT-wwm
21
+ - Chinese MacBERT: https://github.com/ymcui/MacBERT
22
+ - Chinese ELECTRA: https://github.com/ymcui/Chinese-ELECTRA
23
+ - Chinese XLNet: https://github.com/ymcui/Chinese-XLNet
24
+ - Knowledge Distillation Toolkit - TextBrewer: https://github.com/airaria/TextBrewer
25
+
26
+ More resources by HFL: https://github.com/ymcui/HFL-Anthology
27
+
28
+ ## Citation
29
+ If you find the technical report or resource is useful, please cite the following technical report in your paper.
30
+ - Primary: https://arxiv.org/abs/2004.13922
31
+ ```
32
+ @inproceedings{cui-etal-2020-revisiting,
33
+ title = "Revisiting Pre-Trained Models for {C}hinese Natural Language Processing",
34
+ author = "Cui, Yiming and
35
+ Che, Wanxiang and
36
+ Liu, Ting and
37
+ Qin, Bing and
38
+ Wang, Shijin and
39
+ Hu, Guoping",
40
+ booktitle = "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing: Findings",
41
+ month = nov,
42
+ year = "2020",
43
+ address = "Online",
44
+ publisher = "Association for Computational Linguistics",
45
+ url = "https://www.aclweb.org/anthology/2020.findings-emnlp.58",
46
+ pages = "657--668",
47
+ }
48
+ ```
49
+ - Secondary: https://arxiv.org/abs/1906.08101
50
+ ```
51
+ @article{chinese-bert-wwm,
52
+ title={Pre-Training with Whole Word Masking for Chinese BERT},
53
+ author={Cui, Yiming and Che, Wanxiang and Liu, Ting and Qin, Bing and Yang, Ziqing and Wang, Shijin and Hu, Guoping},
54
+ journal={arXiv preprint arXiv:1906.08101},
55
+ year={2019}
56
+ }
57
+ ```
bert_vits2/bert/chinese-roberta-wwm-ext-large/added_tokens.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {}
bert_vits2/bert/chinese-roberta-wwm-ext-large/config.json ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "BertForMaskedLM"
4
+ ],
5
+ "attention_probs_dropout_prob": 0.1,
6
+ "bos_token_id": 0,
7
+ "directionality": "bidi",
8
+ "eos_token_id": 2,
9
+ "hidden_act": "gelu",
10
+ "hidden_dropout_prob": 0.1,
11
+ "hidden_size": 1024,
12
+ "initializer_range": 0.02,
13
+ "intermediate_size": 4096,
14
+ "layer_norm_eps": 1e-12,
15
+ "max_position_embeddings": 512,
16
+ "model_type": "bert",
17
+ "num_attention_heads": 16,
18
+ "num_hidden_layers": 24,
19
+ "output_past": true,
20
+ "pad_token_id": 0,
21
+ "pooler_fc_size": 768,
22
+ "pooler_num_attention_heads": 12,
23
+ "pooler_num_fc_layers": 3,
24
+ "pooler_size_per_head": 128,
25
+ "pooler_type": "first_token_transform",
26
+ "type_vocab_size": 2,
27
+ "vocab_size": 21128
28
+ }
chinese_dialect_lexicons/zaonhe_2.ocd2 → bert_vits2/bert/chinese-roberta-wwm-ext-large/flax_model.msgpack RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a71b5a97eb49699f440137391565d208ea82156f0765986b7f3e16909e15672e
3
- size 4095228
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a46a510fe646213c728b80c9d0d5691d05235523d67f9ac3c3ce4e67deabf926
3
+ size 1302196529
bert_vits2/bert/chinese-roberta-wwm-ext-large/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4ac62d49144d770c5ca9a5d1d3039c4995665a080febe63198189857c6bd11cd
3
+ size 1306484351
bert_vits2/bert/chinese-roberta-wwm-ext-large/special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]"}
bert_vits2/bert/chinese-roberta-wwm-ext-large/tf_model.h5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:72d18616fb285b720cb869c25aa9f4d7371033dfd5d8ba82aca448fdd28132bf
3
+ size 1302594480
bert_vits2/bert/chinese-roberta-wwm-ext-large/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
bert_vits2/bert/chinese-roberta-wwm-ext-large/tokenizer_config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"init_inputs": []}
bert_vits2/bert/chinese-roberta-wwm-ext-large/vocab.txt ADDED
The diff for this file is too large to render. See raw diff
 
bert_vits2/bert_vits2.py ADDED
@@ -0,0 +1,86 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import torch
3
+
4
+ from bert_vits2 import utils, commons
5
+ from bert_vits2.models import SynthesizerTrn
6
+ from bert_vits2.text import symbols, cleaned_text_to_sequence, get_bert
7
+ from bert_vits2.text.cleaner import clean_text
8
+ from utils.nlp import sentence_split, cut
9
+
10
+
11
+ class Bert_VITS2:
12
+ def __init__(self, model, config, device=torch.device("cpu")):
13
+ self.hps_ms = utils.get_hparams_from_file(config)
14
+ self.n_speakers = getattr(self.hps_ms.data, 'n_speakers', 0)
15
+ self.speakers = [item[0] for item in
16
+ sorted(list(getattr(self.hps_ms.data, 'spk2id', {'0': 0}).items()), key=lambda x: x[1])]
17
+ self.net_g = SynthesizerTrn(
18
+ len(symbols),
19
+ self.hps_ms.data.filter_length // 2 + 1,
20
+ self.hps_ms.train.segment_size // self.hps_ms.data.hop_length,
21
+ n_speakers=self.hps_ms.data.n_speakers,
22
+ **self.hps_ms.model).to(device)
23
+ _ = self.net_g.eval()
24
+ self.device = device
25
+ self.load_model(model)
26
+
27
+ def load_model(self, model):
28
+ utils.load_checkpoint(model, self.net_g, None, skip_optimizer=True)
29
+
30
+ def get_speakers(self):
31
+ return self.speakers
32
+
33
+ def get_text(self, text, language_str, hps):
34
+ norm_text, phone, tone, word2ph = clean_text(text, language_str)
35
+ # print([f"{p}{t}" for p, t in zip(phone, tone)])
36
+ phone, tone, language = cleaned_text_to_sequence(phone, tone, language_str)
37
+
38
+ if hps.data.add_blank:
39
+ phone = commons.intersperse(phone, 0)
40
+ tone = commons.intersperse(tone, 0)
41
+ language = commons.intersperse(language, 0)
42
+ for i in range(len(word2ph)):
43
+ word2ph[i] = word2ph[i] * 2
44
+ word2ph[0] += 1
45
+ bert = get_bert(norm_text, word2ph, language_str)
46
+
47
+ assert bert.shape[-1] == len(phone)
48
+
49
+ phone = torch.LongTensor(phone)
50
+ tone = torch.LongTensor(tone)
51
+ language = torch.LongTensor(language)
52
+
53
+ return bert, phone, tone, language
54
+
55
+ def infer(self, text, sdp_ratio, noise_scale, noise_scale_w, length_scale, sid):
56
+ bert, phones, tones, lang_ids = self.get_text(text, "ZH", self.hps_ms)
57
+ with torch.no_grad():
58
+ x_tst = phones.to(self.device).unsqueeze(0)
59
+ tones = tones.to(self.device).unsqueeze(0)
60
+ lang_ids = lang_ids.to(self.device).unsqueeze(0)
61
+ bert = bert.to(self.device).unsqueeze(0)
62
+ x_tst_lengths = torch.LongTensor([phones.size(0)]).to(self.device)
63
+ speakers = torch.LongTensor([int(sid)]).to(self.device)
64
+ audio = self.net_g.infer(x_tst, x_tst_lengths, speakers, tones, lang_ids, bert, sdp_ratio=sdp_ratio
65
+ , noise_scale=noise_scale, noise_scale_w=noise_scale_w, length_scale=length_scale)[
66
+ 0][0, 0].data.cpu().float().numpy()
67
+
68
+ torch.cuda.empty_cache()
69
+ return audio
70
+
71
+ def get_audio(self, voice, auto_break=False):
72
+ text = voice.get("text", None)
73
+ sdp_ratio = voice.get("sdp_ratio", 0.2)
74
+ noise_scale = voice.get("noise", 0.5)
75
+ noise_scale_w = voice.get("noisew", 0.6)
76
+ length_scale = voice.get("length", 1)
77
+ sid = voice.get("id", 0)
78
+ max = voice.get("max", 50)
79
+ # sentence_list = sentence_split(text, max, "ZH", ["zh"])
80
+ sentence_list = cut(text, max)
81
+ audios = []
82
+ for sentence in sentence_list:
83
+ audio = self.infer(sentence, sdp_ratio, noise_scale, noise_scale_w, length_scale, sid)
84
+ audios.append(audio)
85
+ audio = np.concatenate(audios)
86
+ return audio
bert_vits2/commons.py ADDED
@@ -0,0 +1,161 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+ import numpy as np
3
+ import torch
4
+ from torch import nn
5
+ from torch.nn import functional as F
6
+
7
+
8
+ def init_weights(m, mean=0.0, std=0.01):
9
+ classname = m.__class__.__name__
10
+ if classname.find("Conv") != -1:
11
+ m.weight.data.normal_(mean, std)
12
+
13
+
14
+ def get_padding(kernel_size, dilation=1):
15
+ return int((kernel_size * dilation - dilation) / 2)
16
+
17
+
18
+ def convert_pad_shape(pad_shape):
19
+ l = pad_shape[::-1]
20
+ pad_shape = [item for sublist in l for item in sublist]
21
+ return pad_shape
22
+
23
+
24
+ def intersperse(lst, item):
25
+ result = [item] * (len(lst) * 2 + 1)
26
+ result[1::2] = lst
27
+ return result
28
+
29
+
30
+ def kl_divergence(m_p, logs_p, m_q, logs_q):
31
+ """KL(P||Q)"""
32
+ kl = (logs_q - logs_p) - 0.5
33
+ kl += 0.5 * (torch.exp(2. * logs_p) + ((m_p - m_q) ** 2)) * torch.exp(-2. * logs_q)
34
+ return kl
35
+
36
+
37
+ def rand_gumbel(shape):
38
+ """Sample from the Gumbel distribution, protect from overflows."""
39
+ uniform_samples = torch.rand(shape) * 0.99998 + 0.00001
40
+ return -torch.log(-torch.log(uniform_samples))
41
+
42
+
43
+ def rand_gumbel_like(x):
44
+ g = rand_gumbel(x.size()).to(dtype=x.dtype, device=x.device)
45
+ return g
46
+
47
+
48
+ def slice_segments(x, ids_str, segment_size=4):
49
+ ret = torch.zeros_like(x[:, :, :segment_size])
50
+ for i in range(x.size(0)):
51
+ idx_str = ids_str[i]
52
+ idx_end = idx_str + segment_size
53
+ ret[i] = x[i, :, idx_str:idx_end]
54
+ return ret
55
+
56
+
57
+ def rand_slice_segments(x, x_lengths=None, segment_size=4):
58
+ b, d, t = x.size()
59
+ if x_lengths is None:
60
+ x_lengths = t
61
+ ids_str_max = x_lengths - segment_size + 1
62
+ ids_str = (torch.rand([b]).to(device=x.device) * ids_str_max).to(dtype=torch.long)
63
+ ret = slice_segments(x, ids_str, segment_size)
64
+ return ret, ids_str
65
+
66
+
67
+ def get_timing_signal_1d(
68
+ length, channels, min_timescale=1.0, max_timescale=1.0e4):
69
+ position = torch.arange(length, dtype=torch.float)
70
+ num_timescales = channels // 2
71
+ log_timescale_increment = (
72
+ math.log(float(max_timescale) / float(min_timescale)) /
73
+ (num_timescales - 1))
74
+ inv_timescales = min_timescale * torch.exp(
75
+ torch.arange(num_timescales, dtype=torch.float) * -log_timescale_increment)
76
+ scaled_time = position.unsqueeze(0) * inv_timescales.unsqueeze(1)
77
+ signal = torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)], 0)
78
+ signal = F.pad(signal, [0, 0, 0, channels % 2])
79
+ signal = signal.view(1, channels, length)
80
+ return signal
81
+
82
+
83
+ def add_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4):
84
+ b, channels, length = x.size()
85
+ signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale)
86
+ return x + signal.to(dtype=x.dtype, device=x.device)
87
+
88
+
89
+ def cat_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4, axis=1):
90
+ b, channels, length = x.size()
91
+ signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale)
92
+ return torch.cat([x, signal.to(dtype=x.dtype, device=x.device)], axis)
93
+
94
+
95
+ def subsequent_mask(length):
96
+ mask = torch.tril(torch.ones(length, length)).unsqueeze(0).unsqueeze(0)
97
+ return mask
98
+
99
+
100
+ @torch.jit.script
101
+ def fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels):
102
+ n_channels_int = n_channels[0]
103
+ in_act = input_a + input_b
104
+ t_act = torch.tanh(in_act[:, :n_channels_int, :])
105
+ s_act = torch.sigmoid(in_act[:, n_channels_int:, :])
106
+ acts = t_act * s_act
107
+ return acts
108
+
109
+
110
+ def convert_pad_shape(pad_shape):
111
+ l = pad_shape[::-1]
112
+ pad_shape = [item for sublist in l for item in sublist]
113
+ return pad_shape
114
+
115
+
116
+ def shift_1d(x):
117
+ x = F.pad(x, convert_pad_shape([[0, 0], [0, 0], [1, 0]]))[:, :, :-1]
118
+ return x
119
+
120
+
121
+ def sequence_mask(length, max_length=None):
122
+ if max_length is None:
123
+ max_length = length.max()
124
+ x = torch.arange(max_length, dtype=length.dtype, device=length.device)
125
+ return x.unsqueeze(0) < length.unsqueeze(1)
126
+
127
+
128
+ def generate_path(duration, mask):
129
+ """
130
+ duration: [b, 1, t_x]
131
+ mask: [b, 1, t_y, t_x]
132
+ """
133
+ device = duration.device
134
+
135
+ b, _, t_y, t_x = mask.shape
136
+ cum_duration = torch.cumsum(duration, -1)
137
+
138
+ cum_duration_flat = cum_duration.view(b * t_x)
139
+ path = sequence_mask(cum_duration_flat, t_y).to(mask.dtype)
140
+ path = path.view(b, t_x, t_y)
141
+ path = path - F.pad(path, convert_pad_shape([[0, 0], [1, 0], [0, 0]]))[:, :-1]
142
+ path = path.unsqueeze(1).transpose(2, 3) * mask
143
+ return path
144
+
145
+
146
+ def clip_grad_value_(parameters, clip_value, norm_type=2):
147
+ if isinstance(parameters, torch.Tensor):
148
+ parameters = [parameters]
149
+ parameters = list(filter(lambda p: p.grad is not None, parameters))
150
+ norm_type = float(norm_type)
151
+ if clip_value is not None:
152
+ clip_value = float(clip_value)
153
+
154
+ total_norm = 0
155
+ for p in parameters:
156
+ param_norm = p.grad.data.norm(norm_type)
157
+ total_norm += param_norm.item() ** norm_type
158
+ if clip_value is not None:
159
+ p.grad.data.clamp_(min=-clip_value, max=clip_value)
160
+ total_norm = total_norm ** (1. / norm_type)
161
+ return total_norm
bert_vits2/models.py ADDED
@@ -0,0 +1,677 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+ import torch
3
+ from torch import nn
4
+ from torch.nn import functional as F
5
+
6
+ from bert_vits2 import commons
7
+ from bert_vits2 import modules
8
+ from bert_vits2 import attentions
9
+
10
+ from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d
11
+ from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm
12
+
13
+ from bert_vits2.commons import init_weights, get_padding
14
+ from bert_vits2.text import symbols, num_tones, num_languages
15
+
16
+
17
+ class DurationDiscriminator(nn.Module): # vits2
18
+ def __init__(self, in_channels, filter_channels, kernel_size, p_dropout, gin_channels=0):
19
+ super().__init__()
20
+
21
+ self.in_channels = in_channels
22
+ self.filter_channels = filter_channels
23
+ self.kernel_size = kernel_size
24
+ self.p_dropout = p_dropout
25
+ self.gin_channels = gin_channels
26
+
27
+ self.drop = nn.Dropout(p_dropout)
28
+ self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size, padding=kernel_size // 2)
29
+ # self.norm_1 = modules.LayerNorm(filter_channels)
30
+ self.conv_2 = nn.Conv1d(filter_channels, filter_channels, kernel_size, padding=kernel_size // 2)
31
+ # self.norm_2 = modules.LayerNorm(filter_channels)
32
+ self.dur_proj = nn.Conv1d(1, filter_channels, 1)
33
+
34
+ self.pre_out_conv_1 = nn.Conv1d(2 * filter_channels, filter_channels, kernel_size, padding=kernel_size // 2)
35
+ self.pre_out_norm_1 = modules.LayerNorm(filter_channels)
36
+ self.pre_out_conv_2 = nn.Conv1d(filter_channels, filter_channels, kernel_size, padding=kernel_size // 2)
37
+ self.pre_out_norm_2 = modules.LayerNorm(filter_channels)
38
+
39
+ # if gin_channels != 0:
40
+ # self.cond = nn.Conv1d(gin_channels, in_channels, 1)
41
+
42
+ self.output_layer = nn.Sequential(
43
+ nn.Linear(filter_channels, 1),
44
+ nn.Sigmoid()
45
+ )
46
+
47
+ def forward_probability(self, x, x_mask, dur, g=None):
48
+ dur = self.dur_proj(dur)
49
+ x = torch.cat([x, dur], dim=1)
50
+ x = self.pre_out_conv_1(x * x_mask)
51
+ # x = torch.relu(x)
52
+ # x = self.pre_out_norm_1(x)
53
+ # x = self.drop(x)
54
+ x = self.pre_out_conv_2(x * x_mask)
55
+ # x = torch.relu(x)
56
+ # x = self.pre_out_norm_2(x)
57
+ # x = self.drop(x)
58
+ x = x * x_mask
59
+ x = x.transpose(1, 2)
60
+ output_prob = self.output_layer(x)
61
+ return output_prob
62
+
63
+ def forward(self, x, x_mask, dur_r, dur_hat, g=None):
64
+ x = torch.detach(x)
65
+ # if g is not None:
66
+ # g = torch.detach(g)
67
+ # x = x + self.cond(g)
68
+ x = self.conv_1(x * x_mask)
69
+ # x = torch.relu(x)
70
+ # x = self.norm_1(x)
71
+ # x = self.drop(x)
72
+ x = self.conv_2(x * x_mask)
73
+ # x = torch.relu(x)
74
+ # x = self.norm_2(x)
75
+ # x = self.drop(x)
76
+
77
+ output_probs = []
78
+ for dur in [dur_r, dur_hat]:
79
+ output_prob = self.forward_probability(x, x_mask, dur, g)
80
+ output_probs.append(output_prob)
81
+
82
+ return output_probs
83
+
84
+
85
+ class TransformerCouplingBlock(nn.Module):
86
+ def __init__(self,
87
+ channels,
88
+ hidden_channels,
89
+ filter_channels,
90
+ n_heads,
91
+ n_layers,
92
+ kernel_size,
93
+ p_dropout,
94
+ n_flows=4,
95
+ gin_channels=0,
96
+ share_parameter=False
97
+ ):
98
+
99
+ super().__init__()
100
+ self.channels = channels
101
+ self.hidden_channels = hidden_channels
102
+ self.kernel_size = kernel_size
103
+ self.n_layers = n_layers
104
+ self.n_flows = n_flows
105
+ self.gin_channels = gin_channels
106
+
107
+ self.flows = nn.ModuleList()
108
+
109
+ self.wn = attentions.FFT(hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout,
110
+ isflow=True, gin_channels=self.gin_channels) if share_parameter else None
111
+
112
+ for i in range(n_flows):
113
+ self.flows.append(
114
+ modules.TransformerCouplingLayer(channels, hidden_channels, kernel_size, n_layers, n_heads, p_dropout,
115
+ filter_channels, mean_only=True, wn_sharing_parameter=self.wn,
116
+ gin_channels=self.gin_channels))
117
+ self.flows.append(modules.Flip())
118
+
119
+ def forward(self, x, x_mask, g=None, reverse=False):
120
+ if not reverse:
121
+ for flow in self.flows:
122
+ x, _ = flow(x, x_mask, g=g, reverse=reverse)
123
+ else:
124
+ for flow in reversed(self.flows):
125
+ x = flow(x, x_mask, g=g, reverse=reverse)
126
+ return x
127
+
128
+
129
+ class StochasticDurationPredictor(nn.Module):
130
+ def __init__(self, in_channels, filter_channels, kernel_size, p_dropout, n_flows=4, gin_channels=0):
131
+ super().__init__()
132
+ filter_channels = in_channels # it needs to be removed from future version.
133
+ self.in_channels = in_channels
134
+ self.filter_channels = filter_channels
135
+ self.kernel_size = kernel_size
136
+ self.p_dropout = p_dropout
137
+ self.n_flows = n_flows
138
+ self.gin_channels = gin_channels
139
+
140
+ self.log_flow = modules.Log()
141
+ self.flows = nn.ModuleList()
142
+ self.flows.append(modules.ElementwiseAffine(2))
143
+ for i in range(n_flows):
144
+ self.flows.append(modules.ConvFlow(2, filter_channels, kernel_size, n_layers=3))
145
+ self.flows.append(modules.Flip())
146
+
147
+ self.post_pre = nn.Conv1d(1, filter_channels, 1)
148
+ self.post_proj = nn.Conv1d(filter_channels, filter_channels, 1)
149
+ self.post_convs = modules.DDSConv(filter_channels, kernel_size, n_layers=3, p_dropout=p_dropout)
150
+ self.post_flows = nn.ModuleList()
151
+ self.post_flows.append(modules.ElementwiseAffine(2))
152
+ for i in range(4):
153
+ self.post_flows.append(modules.ConvFlow(2, filter_channels, kernel_size, n_layers=3))
154
+ self.post_flows.append(modules.Flip())
155
+
156
+ self.pre = nn.Conv1d(in_channels, filter_channels, 1)
157
+ self.proj = nn.Conv1d(filter_channels, filter_channels, 1)
158
+ self.convs = modules.DDSConv(filter_channels, kernel_size, n_layers=3, p_dropout=p_dropout)
159
+ if gin_channels != 0:
160
+ self.cond = nn.Conv1d(gin_channels, filter_channels, 1)
161
+
162
+ def forward(self, x, x_mask, w=None, g=None, reverse=False, noise_scale=1.0):
163
+ x = torch.detach(x)
164
+ x = self.pre(x)
165
+ if g is not None:
166
+ g = torch.detach(g)
167
+ x = x + self.cond(g)
168
+ x = self.convs(x, x_mask)
169
+ x = self.proj(x) * x_mask
170
+
171
+ if not reverse:
172
+ flows = self.flows
173
+ assert w is not None
174
+
175
+ logdet_tot_q = 0
176
+ h_w = self.post_pre(w)
177
+ h_w = self.post_convs(h_w, x_mask)
178
+ h_w = self.post_proj(h_w) * x_mask
179
+ e_q = torch.randn(w.size(0), 2, w.size(2)).to(device=x.device, dtype=x.dtype) * x_mask
180
+ z_q = e_q
181
+ for flow in self.post_flows:
182
+ z_q, logdet_q = flow(z_q, x_mask, g=(x + h_w))
183
+ logdet_tot_q += logdet_q
184
+ z_u, z1 = torch.split(z_q, [1, 1], 1)
185
+ u = torch.sigmoid(z_u) * x_mask
186
+ z0 = (w - u) * x_mask
187
+ logdet_tot_q += torch.sum((F.logsigmoid(z_u) + F.logsigmoid(-z_u)) * x_mask, [1, 2])
188
+ logq = torch.sum(-0.5 * (math.log(2 * math.pi) + (e_q ** 2)) * x_mask, [1, 2]) - logdet_tot_q
189
+
190
+ logdet_tot = 0
191
+ z0, logdet = self.log_flow(z0, x_mask)
192
+ logdet_tot += logdet
193
+ z = torch.cat([z0, z1], 1)
194
+ for flow in flows:
195
+ z, logdet = flow(z, x_mask, g=x, reverse=reverse)
196
+ logdet_tot = logdet_tot + logdet
197
+ nll = torch.sum(0.5 * (math.log(2 * math.pi) + (z ** 2)) * x_mask, [1, 2]) - logdet_tot
198
+ return nll + logq # [b]
199
+ else:
200
+ flows = list(reversed(self.flows))
201
+ flows = flows[:-2] + [flows[-1]] # remove a useless vflow
202
+ z = torch.randn(x.size(0), 2, x.size(2)).to(device=x.device, dtype=x.dtype) * noise_scale
203
+ for flow in flows:
204
+ z = flow(z, x_mask, g=x, reverse=reverse)
205
+ z0, z1 = torch.split(z, [1, 1], 1)
206
+ logw = z0
207
+ return logw
208
+
209
+
210
+ class DurationPredictor(nn.Module):
211
+ def __init__(self, in_channels, filter_channels, kernel_size, p_dropout, gin_channels=0):
212
+ super().__init__()
213
+
214
+ self.in_channels = in_channels
215
+ self.filter_channels = filter_channels
216
+ self.kernel_size = kernel_size
217
+ self.p_dropout = p_dropout
218
+ self.gin_channels = gin_channels
219
+
220
+ self.drop = nn.Dropout(p_dropout)
221
+ self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size, padding=kernel_size // 2)
222
+ self.norm_1 = modules.LayerNorm(filter_channels)
223
+ self.conv_2 = nn.Conv1d(filter_channels, filter_channels, kernel_size, padding=kernel_size // 2)
224
+ self.norm_2 = modules.LayerNorm(filter_channels)
225
+ self.proj = nn.Conv1d(filter_channels, 1, 1)
226
+
227
+ if gin_channels != 0:
228
+ self.cond = nn.Conv1d(gin_channels, in_channels, 1)
229
+
230
+ def forward(self, x, x_mask, g=None):
231
+ x = torch.detach(x)
232
+ if g is not None:
233
+ g = torch.detach(g)
234
+ x = x + self.cond(g)
235
+ x = self.conv_1(x * x_mask)
236
+ x = torch.relu(x)
237
+ x = self.norm_1(x)
238
+ x = self.drop(x)
239
+ x = self.conv_2(x * x_mask)
240
+ x = torch.relu(x)
241
+ x = self.norm_2(x)
242
+ x = self.drop(x)
243
+ x = self.proj(x * x_mask)
244
+ return x * x_mask
245
+
246
+
247
+ class TextEncoder(nn.Module):
248
+ def __init__(self,
249
+ n_vocab,
250
+ out_channels,
251
+ hidden_channels,
252
+ filter_channels,
253
+ n_heads,
254
+ n_layers,
255
+ kernel_size,
256
+ p_dropout,
257
+ gin_channels=0):
258
+ super().__init__()
259
+ self.n_vocab = n_vocab
260
+ self.out_channels = out_channels
261
+ self.hidden_channels = hidden_channels
262
+ self.filter_channels = filter_channels
263
+ self.n_heads = n_heads
264
+ self.n_layers = n_layers
265
+ self.kernel_size = kernel_size
266
+ self.p_dropout = p_dropout
267
+ self.gin_channels = gin_channels
268
+ self.emb = nn.Embedding(len(symbols), hidden_channels)
269
+ nn.init.normal_(self.emb.weight, 0.0, hidden_channels ** -0.5)
270
+ self.tone_emb = nn.Embedding(num_tones, hidden_channels)
271
+ nn.init.normal_(self.tone_emb.weight, 0.0, hidden_channels ** -0.5)
272
+ self.language_emb = nn.Embedding(num_languages, hidden_channels)
273
+ nn.init.normal_(self.language_emb.weight, 0.0, hidden_channels ** -0.5)
274
+ self.bert_proj = nn.Conv1d(1024, hidden_channels, 1)
275
+
276
+ self.encoder = attentions.Encoder(
277
+ hidden_channels,
278
+ filter_channels,
279
+ n_heads,
280
+ n_layers,
281
+ kernel_size,
282
+ p_dropout,
283
+ gin_channels=self.gin_channels)
284
+ self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
285
+
286
+ def forward(self, x, x_lengths, tone, language, bert, g=None):
287
+ x = (self.emb(x) + self.tone_emb(tone) + self.language_emb(language) + self.bert_proj(bert).transpose(1,
288
+ 2)) * math.sqrt(
289
+ self.hidden_channels) # [b, t, h]
290
+ x = torch.transpose(x, 1, -1) # [b, h, t]
291
+ x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype)
292
+
293
+ x = self.encoder(x * x_mask, x_mask, g=g)
294
+ stats = self.proj(x) * x_mask
295
+
296
+ m, logs = torch.split(stats, self.out_channels, dim=1)
297
+ return x, m, logs, x_mask
298
+
299
+
300
+ class ResidualCouplingBlock(nn.Module):
301
+ def __init__(self,
302
+ channels,
303
+ hidden_channels,
304
+ kernel_size,
305
+ dilation_rate,
306
+ n_layers,
307
+ n_flows=4,
308
+ gin_channels=0):
309
+ super().__init__()
310
+ self.channels = channels
311
+ self.hidden_channels = hidden_channels
312
+ self.kernel_size = kernel_size
313
+ self.dilation_rate = dilation_rate
314
+ self.n_layers = n_layers
315
+ self.n_flows = n_flows
316
+ self.gin_channels = gin_channels
317
+
318
+ self.flows = nn.ModuleList()
319
+ for i in range(n_flows):
320
+ self.flows.append(
321
+ modules.ResidualCouplingLayer(channels, hidden_channels, kernel_size, dilation_rate, n_layers,
322
+ gin_channels=gin_channels, mean_only=True))
323
+ self.flows.append(modules.Flip())
324
+
325
+ def forward(self, x, x_mask, g=None, reverse=False):
326
+ if not reverse:
327
+ for flow in self.flows:
328
+ x, _ = flow(x, x_mask, g=g, reverse=reverse)
329
+ else:
330
+ for flow in reversed(self.flows):
331
+ x = flow(x, x_mask, g=g, reverse=reverse)
332
+ return x
333
+
334
+
335
+ class PosteriorEncoder(nn.Module):
336
+ def __init__(self,
337
+ in_channels,
338
+ out_channels,
339
+ hidden_channels,
340
+ kernel_size,
341
+ dilation_rate,
342
+ n_layers,
343
+ gin_channels=0):
344
+ super().__init__()
345
+ self.in_channels = in_channels
346
+ self.out_channels = out_channels
347
+ self.hidden_channels = hidden_channels
348
+ self.kernel_size = kernel_size
349
+ self.dilation_rate = dilation_rate
350
+ self.n_layers = n_layers
351
+ self.gin_channels = gin_channels
352
+
353
+ self.pre = nn.Conv1d(in_channels, hidden_channels, 1)
354
+ self.enc = modules.WN(hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=gin_channels)
355
+ self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
356
+
357
+ def forward(self, x, x_lengths, g=None):
358
+ x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype)
359
+ x = self.pre(x) * x_mask
360
+ x = self.enc(x, x_mask, g=g)
361
+ stats = self.proj(x) * x_mask
362
+ m, logs = torch.split(stats, self.out_channels, dim=1)
363
+ z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask
364
+ return z, m, logs, x_mask
365
+
366
+
367
+ class Generator(torch.nn.Module):
368
+ def __init__(self, initial_channel, resblock, resblock_kernel_sizes, resblock_dilation_sizes, upsample_rates,
369
+ upsample_initial_channel, upsample_kernel_sizes, gin_channels=0):
370
+ super(Generator, self).__init__()
371
+ self.num_kernels = len(resblock_kernel_sizes)
372
+ self.num_upsamples = len(upsample_rates)
373
+ self.conv_pre = Conv1d(initial_channel, upsample_initial_channel, 7, 1, padding=3)
374
+ resblock = modules.ResBlock1 if resblock == '1' else modules.ResBlock2
375
+
376
+ self.ups = nn.ModuleList()
377
+ for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)):
378
+ self.ups.append(weight_norm(
379
+ ConvTranspose1d(upsample_initial_channel // (2 ** i), upsample_initial_channel // (2 ** (i + 1)),
380
+ k, u, padding=(k - u) // 2)))
381
+
382
+ self.resblocks = nn.ModuleList()
383
+ for i in range(len(self.ups)):
384
+ ch = upsample_initial_channel // (2 ** (i + 1))
385
+ for j, (k, d) in enumerate(zip(resblock_kernel_sizes, resblock_dilation_sizes)):
386
+ self.resblocks.append(resblock(ch, k, d))
387
+
388
+ self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False)
389
+ self.ups.apply(init_weights)
390
+
391
+ if gin_channels != 0:
392
+ self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1)
393
+
394
+ def forward(self, x, g=None):
395
+ x = self.conv_pre(x)
396
+ if g is not None:
397
+ x = x + self.cond(g)
398
+
399
+ for i in range(self.num_upsamples):
400
+ x = F.leaky_relu(x, modules.LRELU_SLOPE)
401
+ x = self.ups[i](x)
402
+ xs = None
403
+ for j in range(self.num_kernels):
404
+ if xs is None:
405
+ xs = self.resblocks[i * self.num_kernels + j](x)
406
+ else:
407
+ xs += self.resblocks[i * self.num_kernels + j](x)
408
+ x = xs / self.num_kernels
409
+ x = F.leaky_relu(x)
410
+ x = self.conv_post(x)
411
+ x = torch.tanh(x)
412
+
413
+ return x
414
+
415
+ def remove_weight_norm(self):
416
+ print('Removing weight norm...')
417
+ for l in self.ups:
418
+ remove_weight_norm(l)
419
+ for l in self.resblocks:
420
+ l.remove_weight_norm()
421
+
422
+
423
+ class DiscriminatorP(torch.nn.Module):
424
+ def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False):
425
+ super(DiscriminatorP, self).__init__()
426
+ self.period = period
427
+ self.use_spectral_norm = use_spectral_norm
428
+ norm_f = weight_norm if use_spectral_norm == False else spectral_norm
429
+ self.convs = nn.ModuleList([
430
+ norm_f(Conv2d(1, 32, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))),
431
+ norm_f(Conv2d(32, 128, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))),
432
+ norm_f(Conv2d(128, 512, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))),
433
+ norm_f(Conv2d(512, 1024, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))),
434
+ norm_f(Conv2d(1024, 1024, (kernel_size, 1), 1, padding=(get_padding(kernel_size, 1), 0))),
435
+ ])
436
+ self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0)))
437
+
438
+ def forward(self, x):
439
+ fmap = []
440
+
441
+ # 1d to 2d
442
+ b, c, t = x.shape
443
+ if t % self.period != 0: # pad first
444
+ n_pad = self.period - (t % self.period)
445
+ x = F.pad(x, (0, n_pad), "reflect")
446
+ t = t + n_pad
447
+ x = x.view(b, c, t // self.period, self.period)
448
+
449
+ for l in self.convs:
450
+ x = l(x)
451
+ x = F.leaky_relu(x, modules.LRELU_SLOPE)
452
+ fmap.append(x)
453
+ x = self.conv_post(x)
454
+ fmap.append(x)
455
+ x = torch.flatten(x, 1, -1)
456
+
457
+ return x, fmap
458
+
459
+
460
+ class DiscriminatorS(torch.nn.Module):
461
+ def __init__(self, use_spectral_norm=False):
462
+ super(DiscriminatorS, self).__init__()
463
+ norm_f = weight_norm if use_spectral_norm == False else spectral_norm
464
+ self.convs = nn.ModuleList([
465
+ norm_f(Conv1d(1, 16, 15, 1, padding=7)),
466
+ norm_f(Conv1d(16, 64, 41, 4, groups=4, padding=20)),
467
+ norm_f(Conv1d(64, 256, 41, 4, groups=16, padding=20)),
468
+ norm_f(Conv1d(256, 1024, 41, 4, groups=64, padding=20)),
469
+ norm_f(Conv1d(1024, 1024, 41, 4, groups=256, padding=20)),
470
+ norm_f(Conv1d(1024, 1024, 5, 1, padding=2)),
471
+ ])
472
+ self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1))
473
+
474
+ def forward(self, x):
475
+ fmap = []
476
+
477
+ for l in self.convs:
478
+ x = l(x)
479
+ x = F.leaky_relu(x, modules.LRELU_SLOPE)
480
+ fmap.append(x)
481
+ x = self.conv_post(x)
482
+ fmap.append(x)
483
+ x = torch.flatten(x, 1, -1)
484
+
485
+ return x, fmap
486
+
487
+
488
+ class MultiPeriodDiscriminator(torch.nn.Module):
489
+ def __init__(self, use_spectral_norm=False):
490
+ super(MultiPeriodDiscriminator, self).__init__()
491
+ periods = [2, 3, 5, 7, 11]
492
+
493
+ discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)]
494
+ discs = discs + [DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods]
495
+ self.discriminators = nn.ModuleList(discs)
496
+
497
+ def forward(self, y, y_hat):
498
+ y_d_rs = []
499
+ y_d_gs = []
500
+ fmap_rs = []
501
+ fmap_gs = []
502
+ for i, d in enumerate(self.discriminators):
503
+ y_d_r, fmap_r = d(y)
504
+ y_d_g, fmap_g = d(y_hat)
505
+ y_d_rs.append(y_d_r)
506
+ y_d_gs.append(y_d_g)
507
+ fmap_rs.append(fmap_r)
508
+ fmap_gs.append(fmap_g)
509
+
510
+ return y_d_rs, y_d_gs, fmap_rs, fmap_gs
511
+
512
+
513
+ class ReferenceEncoder(nn.Module):
514
+ '''
515
+ inputs --- [N, Ty/r, n_mels*r] mels
516
+ outputs --- [N, ref_enc_gru_size]
517
+ '''
518
+
519
+ def __init__(self, spec_channels, gin_channels=0):
520
+
521
+ super().__init__()
522
+ self.spec_channels = spec_channels
523
+ ref_enc_filters = [32, 32, 64, 64, 128, 128]
524
+ K = len(ref_enc_filters)
525
+ filters = [1] + ref_enc_filters
526
+ convs = [weight_norm(nn.Conv2d(in_channels=filters[i],
527
+ out_channels=filters[i + 1],
528
+ kernel_size=(3, 3),
529
+ stride=(2, 2),
530
+ padding=(1, 1))) for i in range(K)]
531
+ self.convs = nn.ModuleList(convs)
532
+ # self.wns = nn.ModuleList([weight_norm(num_features=ref_enc_filters[i]) for i in range(K)])
533
+
534
+ out_channels = self.calculate_channels(spec_channels, 3, 2, 1, K)
535
+ self.gru = nn.GRU(input_size=ref_enc_filters[-1] * out_channels,
536
+ hidden_size=256 // 2,
537
+ batch_first=True)
538
+ self.proj = nn.Linear(128, gin_channels)
539
+
540
+ def forward(self, inputs, mask=None):
541
+ N = inputs.size(0)
542
+ out = inputs.view(N, 1, -1, self.spec_channels) # [N, 1, Ty, n_freqs]
543
+ for conv in self.convs:
544
+ out = conv(out)
545
+ # out = wn(out)
546
+ out = F.relu(out) # [N, 128, Ty//2^K, n_mels//2^K]
547
+
548
+ out = out.transpose(1, 2) # [N, Ty//2^K, 128, n_mels//2^K]
549
+ T = out.size(1)
550
+ N = out.size(0)
551
+ out = out.contiguous().view(N, T, -1) # [N, Ty//2^K, 128*n_mels//2^K]
552
+
553
+ self.gru.flatten_parameters()
554
+ memory, out = self.gru(out) # out --- [1, N, 128]
555
+
556
+ return self.proj(out.squeeze(0))
557
+
558
+ def calculate_channels(self, L, kernel_size, stride, pad, n_convs):
559
+ for i in range(n_convs):
560
+ L = (L - kernel_size + 2 * pad) // stride + 1
561
+ return L
562
+
563
+
564
+ class SynthesizerTrn(nn.Module):
565
+ """
566
+ Synthesizer for Training
567
+ """
568
+
569
+ def __init__(self,
570
+ n_vocab,
571
+ spec_channels,
572
+ segment_size,
573
+ inter_channels,
574
+ hidden_channels,
575
+ filter_channels,
576
+ n_heads,
577
+ n_layers,
578
+ kernel_size,
579
+ p_dropout,
580
+ resblock,
581
+ resblock_kernel_sizes,
582
+ resblock_dilation_sizes,
583
+ upsample_rates,
584
+ upsample_initial_channel,
585
+ upsample_kernel_sizes,
586
+ n_speakers=256,
587
+ gin_channels=256,
588
+ use_sdp=True,
589
+ n_flow_layer=4,
590
+ n_layers_trans_flow=3,
591
+ flow_share_parameter=False,
592
+ use_transformer_flow=True,
593
+ **kwargs):
594
+
595
+ super().__init__()
596
+ self.n_vocab = n_vocab
597
+ self.spec_channels = spec_channels
598
+ self.inter_channels = inter_channels
599
+ self.hidden_channels = hidden_channels
600
+ self.filter_channels = filter_channels
601
+ self.n_heads = n_heads
602
+ self.n_layers = n_layers
603
+ self.kernel_size = kernel_size
604
+ self.p_dropout = p_dropout
605
+ self.resblock = resblock
606
+ self.resblock_kernel_sizes = resblock_kernel_sizes
607
+ self.resblock_dilation_sizes = resblock_dilation_sizes
608
+ self.upsample_rates = upsample_rates
609
+ self.upsample_initial_channel = upsample_initial_channel
610
+ self.upsample_kernel_sizes = upsample_kernel_sizes
611
+ self.segment_size = segment_size
612
+ self.n_speakers = n_speakers
613
+ self.gin_channels = gin_channels
614
+ self.n_layers_trans_flow = n_layers_trans_flow
615
+ self.use_spk_conditioned_encoder = kwargs.get("use_spk_conditioned_encoder", True)
616
+ self.use_sdp = use_sdp
617
+ self.use_noise_scaled_mas = kwargs.get("use_noise_scaled_mas", False)
618
+ self.mas_noise_scale_initial = kwargs.get("mas_noise_scale_initial", 0.01)
619
+ self.noise_scale_delta = kwargs.get("noise_scale_delta", 2e-6)
620
+ self.current_mas_noise_scale = self.mas_noise_scale_initial
621
+ if self.use_spk_conditioned_encoder and gin_channels > 0:
622
+ self.enc_gin_channels = gin_channels
623
+ self.enc_p = TextEncoder(n_vocab,
624
+ inter_channels,
625
+ hidden_channels,
626
+ filter_channels,
627
+ n_heads,
628
+ n_layers,
629
+ kernel_size,
630
+ p_dropout,
631
+ gin_channels=self.enc_gin_channels)
632
+ self.dec = Generator(inter_channels, resblock, resblock_kernel_sizes, resblock_dilation_sizes, upsample_rates,
633
+ upsample_initial_channel, upsample_kernel_sizes, gin_channels=gin_channels)
634
+ self.enc_q = PosteriorEncoder(spec_channels, inter_channels, hidden_channels, 5, 1, 16,
635
+ gin_channels=gin_channels)
636
+ if use_transformer_flow:
637
+ self.flow = TransformerCouplingBlock(inter_channels, hidden_channels, filter_channels, n_heads,
638
+ n_layers_trans_flow, 5, p_dropout, n_flow_layer,
639
+ gin_channels=gin_channels, share_parameter=flow_share_parameter)
640
+ else:
641
+ self.flow = ResidualCouplingBlock(inter_channels, hidden_channels, 5, 1, n_flow_layer,
642
+ gin_channels=gin_channels)
643
+ self.sdp = StochasticDurationPredictor(hidden_channels, 192, 3, 0.5, 4, gin_channels=gin_channels)
644
+ self.dp = DurationPredictor(hidden_channels, 256, 3, 0.5, gin_channels=gin_channels)
645
+
646
+ if n_speakers > 1:
647
+ self.emb_g = nn.Embedding(n_speakers, gin_channels)
648
+ else:
649
+ self.ref_enc = ReferenceEncoder(spec_channels, gin_channels)
650
+
651
+ def infer(self, x, x_lengths, sid, tone, language, bert, noise_scale=.667, length_scale=1, noise_scale_w=0.8,
652
+ max_len=None, sdp_ratio=0, y=None):
653
+ # x, m_p, logs_p, x_mask = self.enc_p(x, x_lengths, tone, language, bert)
654
+ # g = self.gst(y)
655
+ if self.n_speakers > 0:
656
+ g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1]
657
+ else:
658
+ g = self.ref_enc(y.transpose(1, 2)).unsqueeze(-1)
659
+ x, m_p, logs_p, x_mask = self.enc_p(x, x_lengths, tone, language, bert, g=g)
660
+ logw = self.sdp(x, x_mask, g=g, reverse=True, noise_scale=noise_scale_w) * (sdp_ratio) + self.dp(x, x_mask,
661
+ g=g) * (
662
+ 1 - sdp_ratio)
663
+ w = torch.exp(logw) * x_mask * length_scale
664
+ w_ceil = torch.ceil(w)
665
+ y_lengths = torch.clamp_min(torch.sum(w_ceil, [1, 2]), 1).long()
666
+ y_mask = torch.unsqueeze(commons.sequence_mask(y_lengths, None), 1).to(x_mask.dtype)
667
+ attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1)
668
+ attn = commons.generate_path(w_ceil, attn_mask)
669
+
670
+ m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose(1, 2) # [b, t', t], [b, t, d] -> [b, d, t']
671
+ logs_p = torch.matmul(attn.squeeze(1), logs_p.transpose(1, 2)).transpose(1,
672
+ 2) # [b, t', t], [b, t, d] -> [b, d, t']
673
+
674
+ z_p = m_p + torch.randn_like(m_p) * torch.exp(logs_p) * noise_scale
675
+ z = self.flow(z_p, y_mask, g=g, reverse=True)
676
+ o = self.dec((z * y_mask)[:, :, :max_len], g=g)
677
+ return o, attn, y_mask, (z, z_p, m_p, logs_p)
bert_vits2/modules.py ADDED
@@ -0,0 +1,459 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import copy
2
+ import math
3
+ import numpy as np
4
+ import scipy
5
+ import torch
6
+ from torch import nn
7
+ from torch.nn import functional as F
8
+
9
+ from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d
10
+ from torch.nn.utils import weight_norm, remove_weight_norm
11
+
12
+ from bert_vits2 import commons
13
+ from bert_vits2.commons import init_weights, get_padding
14
+ from bert_vits2.transforms import piecewise_rational_quadratic_transform
15
+ from bert_vits2.attentions import Encoder
16
+
17
+ LRELU_SLOPE = 0.1
18
+
19
+
20
+ class LayerNorm(nn.Module):
21
+ def __init__(self, channels, eps=1e-5):
22
+ super().__init__()
23
+ self.channels = channels
24
+ self.eps = eps
25
+
26
+ self.gamma = nn.Parameter(torch.ones(channels))
27
+ self.beta = nn.Parameter(torch.zeros(channels))
28
+
29
+ def forward(self, x):
30
+ x = x.transpose(1, -1)
31
+ x = F.layer_norm(x, (self.channels,), self.gamma, self.beta, self.eps)
32
+ return x.transpose(1, -1)
33
+
34
+
35
+ class ConvReluNorm(nn.Module):
36
+ def __init__(self, in_channels, hidden_channels, out_channels, kernel_size, n_layers, p_dropout):
37
+ super().__init__()
38
+ self.in_channels = in_channels
39
+ self.hidden_channels = hidden_channels
40
+ self.out_channels = out_channels
41
+ self.kernel_size = kernel_size
42
+ self.n_layers = n_layers
43
+ self.p_dropout = p_dropout
44
+ assert n_layers > 1, "Number of layers should be larger than 0."
45
+
46
+ self.conv_layers = nn.ModuleList()
47
+ self.norm_layers = nn.ModuleList()
48
+ self.conv_layers.append(nn.Conv1d(in_channels, hidden_channels, kernel_size, padding=kernel_size // 2))
49
+ self.norm_layers.append(LayerNorm(hidden_channels))
50
+ self.relu_drop = nn.Sequential(
51
+ nn.ReLU(),
52
+ nn.Dropout(p_dropout))
53
+ for _ in range(n_layers - 1):
54
+ self.conv_layers.append(nn.Conv1d(hidden_channels, hidden_channels, kernel_size, padding=kernel_size // 2))
55
+ self.norm_layers.append(LayerNorm(hidden_channels))
56
+ self.proj = nn.Conv1d(hidden_channels, out_channels, 1)
57
+ self.proj.weight.data.zero_()
58
+ self.proj.bias.data.zero_()
59
+
60
+ def forward(self, x, x_mask):
61
+ x_org = x
62
+ for i in range(self.n_layers):
63
+ x = self.conv_layers[i](x * x_mask)
64
+ x = self.norm_layers[i](x)
65
+ x = self.relu_drop(x)
66
+ x = x_org + self.proj(x)
67
+ return x * x_mask
68
+
69
+
70
+ class DDSConv(nn.Module):
71
+ """
72
+ Dialted and Depth-Separable Convolution
73
+ """
74
+
75
+ def __init__(self, channels, kernel_size, n_layers, p_dropout=0.):
76
+ super().__init__()
77
+ self.channels = channels
78
+ self.kernel_size = kernel_size
79
+ self.n_layers = n_layers
80
+ self.p_dropout = p_dropout
81
+
82
+ self.drop = nn.Dropout(p_dropout)
83
+ self.convs_sep = nn.ModuleList()
84
+ self.convs_1x1 = nn.ModuleList()
85
+ self.norms_1 = nn.ModuleList()
86
+ self.norms_2 = nn.ModuleList()
87
+ for i in range(n_layers):
88
+ dilation = kernel_size ** i
89
+ padding = (kernel_size * dilation - dilation) // 2
90
+ self.convs_sep.append(nn.Conv1d(channels, channels, kernel_size,
91
+ groups=channels, dilation=dilation, padding=padding
92
+ ))
93
+ self.convs_1x1.append(nn.Conv1d(channels, channels, 1))
94
+ self.norms_1.append(LayerNorm(channels))
95
+ self.norms_2.append(LayerNorm(channels))
96
+
97
+ def forward(self, x, x_mask, g=None):
98
+ if g is not None:
99
+ x = x + g
100
+ for i in range(self.n_layers):
101
+ y = self.convs_sep[i](x * x_mask)
102
+ y = self.norms_1[i](y)
103
+ y = F.gelu(y)
104
+ y = self.convs_1x1[i](y)
105
+ y = self.norms_2[i](y)
106
+ y = F.gelu(y)
107
+ y = self.drop(y)
108
+ x = x + y
109
+ return x * x_mask
110
+
111
+
112
+ class WN(torch.nn.Module):
113
+ def __init__(self, hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=0, p_dropout=0):
114
+ super(WN, self).__init__()
115
+ assert (kernel_size % 2 == 1)
116
+ self.hidden_channels = hidden_channels
117
+ self.kernel_size = kernel_size,
118
+ self.dilation_rate = dilation_rate
119
+ self.n_layers = n_layers
120
+ self.gin_channels = gin_channels
121
+ self.p_dropout = p_dropout
122
+
123
+ self.in_layers = torch.nn.ModuleList()
124
+ self.res_skip_layers = torch.nn.ModuleList()
125
+ self.drop = nn.Dropout(p_dropout)
126
+
127
+ if gin_channels != 0:
128
+ cond_layer = torch.nn.Conv1d(gin_channels, 2 * hidden_channels * n_layers, 1)
129
+ self.cond_layer = torch.nn.utils.weight_norm(cond_layer, name='weight')
130
+
131
+ for i in range(n_layers):
132
+ dilation = dilation_rate ** i
133
+ padding = int((kernel_size * dilation - dilation) / 2)
134
+ in_layer = torch.nn.Conv1d(hidden_channels, 2 * hidden_channels, kernel_size,
135
+ dilation=dilation, padding=padding)
136
+ in_layer = torch.nn.utils.weight_norm(in_layer, name='weight')
137
+ self.in_layers.append(in_layer)
138
+
139
+ # last one is not necessary
140
+ if i < n_layers - 1:
141
+ res_skip_channels = 2 * hidden_channels
142
+ else:
143
+ res_skip_channels = hidden_channels
144
+
145
+ res_skip_layer = torch.nn.Conv1d(hidden_channels, res_skip_channels, 1)
146
+ res_skip_layer = torch.nn.utils.weight_norm(res_skip_layer, name='weight')
147
+ self.res_skip_layers.append(res_skip_layer)
148
+
149
+ def forward(self, x, x_mask, g=None, **kwargs):
150
+ output = torch.zeros_like(x)
151
+ n_channels_tensor = torch.IntTensor([self.hidden_channels])
152
+
153
+ if g is not None:
154
+ g = self.cond_layer(g)
155
+
156
+ for i in range(self.n_layers):
157
+ x_in = self.in_layers[i](x)
158
+ if g is not None:
159
+ cond_offset = i * 2 * self.hidden_channels
160
+ g_l = g[:, cond_offset:cond_offset + 2 * self.hidden_channels, :]
161
+ else:
162
+ g_l = torch.zeros_like(x_in)
163
+
164
+ acts = commons.fused_add_tanh_sigmoid_multiply(
165
+ x_in,
166
+ g_l,
167
+ n_channels_tensor)
168
+ acts = self.drop(acts)
169
+
170
+ res_skip_acts = self.res_skip_layers[i](acts)
171
+ if i < self.n_layers - 1:
172
+ res_acts = res_skip_acts[:, :self.hidden_channels, :]
173
+ x = (x + res_acts) * x_mask
174
+ output = output + res_skip_acts[:, self.hidden_channels:, :]
175
+ else:
176
+ output = output + res_skip_acts
177
+ return output * x_mask
178
+
179
+ def remove_weight_norm(self):
180
+ if self.gin_channels != 0:
181
+ torch.nn.utils.remove_weight_norm(self.cond_layer)
182
+ for l in self.in_layers:
183
+ torch.nn.utils.remove_weight_norm(l)
184
+ for l in self.res_skip_layers:
185
+ torch.nn.utils.remove_weight_norm(l)
186
+
187
+
188
+ class ResBlock1(torch.nn.Module):
189
+ def __init__(self, channels, kernel_size=3, dilation=(1, 3, 5)):
190
+ super(ResBlock1, self).__init__()
191
+ self.convs1 = nn.ModuleList([
192
+ weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0],
193
+ padding=get_padding(kernel_size, dilation[0]))),
194
+ weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1],
195
+ padding=get_padding(kernel_size, dilation[1]))),
196
+ weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[2],
197
+ padding=get_padding(kernel_size, dilation[2])))
198
+ ])
199
+ self.convs1.apply(init_weights)
200
+
201
+ self.convs2 = nn.ModuleList([
202
+ weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1,
203
+ padding=get_padding(kernel_size, 1))),
204
+ weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1,
205
+ padding=get_padding(kernel_size, 1))),
206
+ weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1,
207
+ padding=get_padding(kernel_size, 1)))
208
+ ])
209
+ self.convs2.apply(init_weights)
210
+
211
+ def forward(self, x, x_mask=None):
212
+ for c1, c2 in zip(self.convs1, self.convs2):
213
+ xt = F.leaky_relu(x, LRELU_SLOPE)
214
+ if x_mask is not None:
215
+ xt = xt * x_mask
216
+ xt = c1(xt)
217
+ xt = F.leaky_relu(xt, LRELU_SLOPE)
218
+ if x_mask is not None:
219
+ xt = xt * x_mask
220
+ xt = c2(xt)
221
+ x = xt + x
222
+ if x_mask is not None:
223
+ x = x * x_mask
224
+ return x
225
+
226
+ def remove_weight_norm(self):
227
+ for l in self.convs1:
228
+ remove_weight_norm(l)
229
+ for l in self.convs2:
230
+ remove_weight_norm(l)
231
+
232
+
233
+ class ResBlock2(torch.nn.Module):
234
+ def __init__(self, channels, kernel_size=3, dilation=(1, 3)):
235
+ super(ResBlock2, self).__init__()
236
+ self.convs = nn.ModuleList([
237
+ weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0],
238
+ padding=get_padding(kernel_size, dilation[0]))),
239
+ weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1],
240
+ padding=get_padding(kernel_size, dilation[1])))
241
+ ])
242
+ self.convs.apply(init_weights)
243
+
244
+ def forward(self, x, x_mask=None):
245
+ for c in self.convs:
246
+ xt = F.leaky_relu(x, LRELU_SLOPE)
247
+ if x_mask is not None:
248
+ xt = xt * x_mask
249
+ xt = c(xt)
250
+ x = xt + x
251
+ if x_mask is not None:
252
+ x = x * x_mask
253
+ return x
254
+
255
+ def remove_weight_norm(self):
256
+ for l in self.convs:
257
+ remove_weight_norm(l)
258
+
259
+
260
+ class Log(nn.Module):
261
+ def forward(self, x, x_mask, reverse=False, **kwargs):
262
+ if not reverse:
263
+ y = torch.log(torch.clamp_min(x, 1e-5)) * x_mask
264
+ logdet = torch.sum(-y, [1, 2])
265
+ return y, logdet
266
+ else:
267
+ x = torch.exp(x) * x_mask
268
+ return x
269
+
270
+
271
+ class Flip(nn.Module):
272
+ def forward(self, x, *args, reverse=False, **kwargs):
273
+ x = torch.flip(x, [1])
274
+ if not reverse:
275
+ logdet = torch.zeros(x.size(0)).to(dtype=x.dtype, device=x.device)
276
+ return x, logdet
277
+ else:
278
+ return x
279
+
280
+
281
+ class ElementwiseAffine(nn.Module):
282
+ def __init__(self, channels):
283
+ super().__init__()
284
+ self.channels = channels
285
+ self.m = nn.Parameter(torch.zeros(channels, 1))
286
+ self.logs = nn.Parameter(torch.zeros(channels, 1))
287
+
288
+ def forward(self, x, x_mask, reverse=False, **kwargs):
289
+ if not reverse:
290
+ y = self.m + torch.exp(self.logs) * x
291
+ y = y * x_mask
292
+ logdet = torch.sum(self.logs * x_mask, [1, 2])
293
+ return y, logdet
294
+ else:
295
+ x = (x - self.m) * torch.exp(-self.logs) * x_mask
296
+ return x
297
+
298
+
299
+ class ResidualCouplingLayer(nn.Module):
300
+ def __init__(self,
301
+ channels,
302
+ hidden_channels,
303
+ kernel_size,
304
+ dilation_rate,
305
+ n_layers,
306
+ p_dropout=0,
307
+ gin_channels=0,
308
+ mean_only=False):
309
+ assert channels % 2 == 0, "channels should be divisible by 2"
310
+ super().__init__()
311
+ self.channels = channels
312
+ self.hidden_channels = hidden_channels
313
+ self.kernel_size = kernel_size
314
+ self.dilation_rate = dilation_rate
315
+ self.n_layers = n_layers
316
+ self.half_channels = channels // 2
317
+ self.mean_only = mean_only
318
+
319
+ self.pre = nn.Conv1d(self.half_channels, hidden_channels, 1)
320
+ self.enc = WN(hidden_channels, kernel_size, dilation_rate, n_layers, p_dropout=p_dropout,
321
+ gin_channels=gin_channels)
322
+ self.post = nn.Conv1d(hidden_channels, self.half_channels * (2 - mean_only), 1)
323
+ self.post.weight.data.zero_()
324
+ self.post.bias.data.zero_()
325
+
326
+ def forward(self, x, x_mask, g=None, reverse=False):
327
+ x0, x1 = torch.split(x, [self.half_channels] * 2, 1)
328
+ h = self.pre(x0) * x_mask
329
+ h = self.enc(h, x_mask, g=g)
330
+ stats = self.post(h) * x_mask
331
+ if not self.mean_only:
332
+ m, logs = torch.split(stats, [self.half_channels] * 2, 1)
333
+ else:
334
+ m = stats
335
+ logs = torch.zeros_like(m)
336
+
337
+ if not reverse:
338
+ x1 = m + x1 * torch.exp(logs) * x_mask
339
+ x = torch.cat([x0, x1], 1)
340
+ logdet = torch.sum(logs, [1, 2])
341
+ return x, logdet
342
+ else:
343
+ x1 = (x1 - m) * torch.exp(-logs) * x_mask
344
+ x = torch.cat([x0, x1], 1)
345
+ return x
346
+
347
+
348
+ class ConvFlow(nn.Module):
349
+ def __init__(self, in_channels, filter_channels, kernel_size, n_layers, num_bins=10, tail_bound=5.0):
350
+ super().__init__()
351
+ self.in_channels = in_channels
352
+ self.filter_channels = filter_channels
353
+ self.kernel_size = kernel_size
354
+ self.n_layers = n_layers
355
+ self.num_bins = num_bins
356
+ self.tail_bound = tail_bound
357
+ self.half_channels = in_channels // 2
358
+
359
+ self.pre = nn.Conv1d(self.half_channels, filter_channels, 1)
360
+ self.convs = DDSConv(filter_channels, kernel_size, n_layers, p_dropout=0.)
361
+ self.proj = nn.Conv1d(filter_channels, self.half_channels * (num_bins * 3 - 1), 1)
362
+ self.proj.weight.data.zero_()
363
+ self.proj.bias.data.zero_()
364
+
365
+ def forward(self, x, x_mask, g=None, reverse=False):
366
+ x0, x1 = torch.split(x, [self.half_channels] * 2, 1)
367
+ h = self.pre(x0)
368
+ h = self.convs(h, x_mask, g=g)
369
+ h = self.proj(h) * x_mask
370
+
371
+ b, c, t = x0.shape
372
+ h = h.reshape(b, c, -1, t).permute(0, 1, 3, 2) # [b, cx?, t] -> [b, c, t, ?]
373
+
374
+ unnormalized_widths = h[..., :self.num_bins] / math.sqrt(self.filter_channels)
375
+ unnormalized_heights = h[..., self.num_bins:2 * self.num_bins] / math.sqrt(self.filter_channels)
376
+ unnormalized_derivatives = h[..., 2 * self.num_bins:]
377
+
378
+ x1, logabsdet = piecewise_rational_quadratic_transform(x1,
379
+ unnormalized_widths,
380
+ unnormalized_heights,
381
+ unnormalized_derivatives,
382
+ inverse=reverse,
383
+ tails='linear',
384
+ tail_bound=self.tail_bound
385
+ )
386
+
387
+ x = torch.cat([x0, x1], 1) * x_mask
388
+ logdet = torch.sum(logabsdet * x_mask, [1, 2])
389
+ if not reverse:
390
+ return x, logdet
391
+ else:
392
+ return x
393
+
394
+
395
+ class TransformerCouplingLayer(nn.Module):
396
+ def __init__(self,
397
+ channels,
398
+ hidden_channels,
399
+ kernel_size,
400
+ n_layers,
401
+ n_heads,
402
+ p_dropout=0,
403
+ filter_channels=0,
404
+ mean_only=False,
405
+ wn_sharing_parameter=None,
406
+ gin_channels=0
407
+ ):
408
+ assert channels % 2 == 0, "channels should be divisible by 2"
409
+ super().__init__()
410
+ self.channels = channels
411
+ self.hidden_channels = hidden_channels
412
+ self.kernel_size = kernel_size
413
+ self.n_layers = n_layers
414
+ self.half_channels = channels // 2
415
+ self.mean_only = mean_only
416
+
417
+ self.pre = nn.Conv1d(self.half_channels, hidden_channels, 1)
418
+ self.enc = Encoder(hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout, isflow=True,
419
+ gin_channels=gin_channels) if wn_sharing_parameter is None else wn_sharing_parameter
420
+ self.post = nn.Conv1d(hidden_channels, self.half_channels * (2 - mean_only), 1)
421
+ self.post.weight.data.zero_()
422
+ self.post.bias.data.zero_()
423
+
424
+ def forward(self, x, x_mask, g=None, reverse=False):
425
+ x0, x1 = torch.split(x, [self.half_channels] * 2, 1)
426
+ h = self.pre(x0) * x_mask
427
+ h = self.enc(h, x_mask, g=g)
428
+ stats = self.post(h) * x_mask
429
+ if not self.mean_only:
430
+ m, logs = torch.split(stats, [self.half_channels] * 2, 1)
431
+ else:
432
+ m = stats
433
+ logs = torch.zeros_like(m)
434
+
435
+ if not reverse:
436
+ x1 = m + x1 * torch.exp(logs) * x_mask
437
+ x = torch.cat([x0, x1], 1)
438
+ logdet = torch.sum(logs, [1, 2])
439
+ return x, logdet
440
+ else:
441
+ x1 = (x1 - m) * torch.exp(-logs) * x_mask
442
+ x = torch.cat([x0, x1], 1)
443
+ return x
444
+
445
+ x1, logabsdet = piecewise_rational_quadratic_transform(x1,
446
+ unnormalized_widths,
447
+ unnormalized_heights,
448
+ unnormalized_derivatives,
449
+ inverse=reverse,
450
+ tails='linear',
451
+ tail_bound=self.tail_bound
452
+ )
453
+
454
+ x = torch.cat([x0, x1], 1) * x_mask
455
+ logdet = torch.sum(logabsdet * x_mask, [1, 2])
456
+ if not reverse:
457
+ return x, logdet
458
+ else:
459
+ return x
bert_vits2/requirements.txt ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Cython
2
+ librosa==0.9.1
3
+ matplotlib==3.3.1
4
+ numpy
5
+ phonemizer
6
+ scipy
7
+ tensorboard
8
+ torch
9
+ torchvision
10
+ Unidecode
11
+ amfm_decompy
12
+ jieba
13
+ transformers
14
+ pypinyin
15
+ cn2an
bert_vits2/text/__init__.py ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from bert_vits2.text.symbols import *
2
+ from .chinese_bert import get_bert_feature as zh_bert
3
+ from .english_bert_mock import get_bert_feature as en_bert
4
+
5
+ _symbol_to_id = {s: i for i, s in enumerate(symbols)}
6
+
7
+
8
+ def cleaned_text_to_sequence(cleaned_text, tones, language):
9
+ '''Converts a string of text to a sequence of IDs corresponding to the symbols in the text.
10
+ Args:
11
+ text: string to convert to a sequence
12
+ Returns:
13
+ List of integers corresponding to the symbols in the text
14
+ '''
15
+ phones = [_symbol_to_id[symbol] for symbol in cleaned_text]
16
+ tone_start = language_tone_start_map[language]
17
+ tones = [i + tone_start for i in tones]
18
+ lang_id = language_id_map[language]
19
+ lang_ids = [lang_id for i in phones]
20
+ return phones, tones, lang_ids
21
+
22
+
23
+ def get_bert(norm_text, word2ph, language):
24
+ lang_bert_func_map = {
25
+ 'ZH': zh_bert,
26
+ 'EN': en_bert
27
+ }
28
+ bert = lang_bert_func_map[language](norm_text, word2ph)
29
+ return bert
bert_vits2/text/chinese.py ADDED
@@ -0,0 +1,194 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import re
3
+
4
+ import cn2an
5
+ from pypinyin import lazy_pinyin, Style
6
+
7
+ from bert_vits2.text.symbols import punctuation
8
+ from bert_vits2.text.tone_sandhi import ToneSandhi
9
+
10
+ current_file_path = os.path.dirname(__file__)
11
+ pinyin_to_symbol_map = {line.split("\t")[0]: line.strip().split("\t")[1] for line in
12
+ open(os.path.join(current_file_path, 'opencpop-strict.txt')).readlines()}
13
+
14
+ import jieba.posseg as psg
15
+
16
+ rep_map = {
17
+ ':': ',',
18
+ ';': ',',
19
+ ',': ',',
20
+ '。': '.',
21
+ '!': '!',
22
+ '?': '?',
23
+ '\n': '.',
24
+ "·": ",",
25
+ '、': ",",
26
+ '...': '…',
27
+ '$': '.',
28
+ '“': "'",
29
+ '”': "'",
30
+ '‘': "'",
31
+ '’': "'",
32
+ '(': "'",
33
+ ')': "'",
34
+ '(': "'",
35
+ ')': "'",
36
+ '《': "'",
37
+ '》': "'",
38
+ '【': "'",
39
+ '】': "'",
40
+ '[': "'",
41
+ ']': "'",
42
+ '—': "-",
43
+ '~': "-",
44
+ '~': "-",
45
+ '「': "'",
46
+ '」': "'",
47
+
48
+ }
49
+
50
+ tone_modifier = ToneSandhi()
51
+
52
+
53
+ def replace_punctuation(text):
54
+ text = text.replace("嗯", "恩").replace("呣", "母")
55
+ pattern = re.compile('|'.join(re.escape(p) for p in rep_map.keys()))
56
+
57
+ replaced_text = pattern.sub(lambda x: rep_map[x.group()], text)
58
+
59
+ replaced_text = re.sub(r'[^\u4e00-\u9fa5' + "".join(punctuation) + r']+', '', replaced_text)
60
+
61
+ return replaced_text
62
+
63
+
64
+ def g2p(text):
65
+ pattern = r'(?<=[{0}])\s*'.format(''.join(punctuation))
66
+ sentences = [i for i in re.split(pattern, text) if i.strip() != '']
67
+ phones, tones, word2ph = _g2p(sentences)
68
+ assert sum(word2ph) == len(phones)
69
+ assert len(word2ph) == len(text) # Sometimes it will crash,you can add a try-catch.
70
+ phones = ['_'] + phones + ["_"]
71
+ tones = [0] + tones + [0]
72
+ word2ph = [1] + word2ph + [1]
73
+ return phones, tones, word2ph
74
+
75
+
76
+ def _get_initials_finals(word):
77
+ initials = []
78
+ finals = []
79
+ orig_initials = lazy_pinyin(
80
+ word, neutral_tone_with_five=True, style=Style.INITIALS)
81
+ orig_finals = lazy_pinyin(
82
+ word, neutral_tone_with_five=True, style=Style.FINALS_TONE3)
83
+ for c, v in zip(orig_initials, orig_finals):
84
+ initials.append(c)
85
+ finals.append(v)
86
+ return initials, finals
87
+
88
+
89
+ def _g2p(segments):
90
+ phones_list = []
91
+ tones_list = []
92
+ word2ph = []
93
+ for seg in segments:
94
+ pinyins = []
95
+ # Replace all English words in the sentence
96
+ seg = re.sub('[a-zA-Z]+', '', seg)
97
+ seg_cut = psg.lcut(seg)
98
+ initials = []
99
+ finals = []
100
+ seg_cut = tone_modifier.pre_merge_for_modify(seg_cut)
101
+ for word, pos in seg_cut:
102
+ if pos == 'eng':
103
+ continue
104
+ sub_initials, sub_finals = _get_initials_finals(word)
105
+ sub_finals = tone_modifier.modified_tone(word, pos,
106
+ sub_finals)
107
+ initials.append(sub_initials)
108
+ finals.append(sub_finals)
109
+
110
+ # assert len(sub_initials) == len(sub_finals) == len(word)
111
+ initials = sum(initials, [])
112
+ finals = sum(finals, [])
113
+ #
114
+ for c, v in zip(initials, finals):
115
+ raw_pinyin = c + v
116
+ # NOTE: post process for pypinyin outputs
117
+ # we discriminate i, ii and iii
118
+ if c == v:
119
+ assert c in punctuation
120
+ phone = [c]
121
+ tone = '0'
122
+ word2ph.append(1)
123
+ else:
124
+ v_without_tone = v[:-1]
125
+ tone = v[-1]
126
+
127
+ pinyin = c + v_without_tone
128
+ assert tone in '12345'
129
+
130
+ if c:
131
+ # 多音节
132
+ v_rep_map = {
133
+ "uei": 'ui',
134
+ 'iou': 'iu',
135
+ 'uen': 'un',
136
+ }
137
+ if v_without_tone in v_rep_map.keys():
138
+ pinyin = c + v_rep_map[v_without_tone]
139
+ else:
140
+ # 单音节
141
+ pinyin_rep_map = {
142
+ 'ing': 'ying',
143
+ 'i': 'yi',
144
+ 'in': 'yin',
145
+ 'u': 'wu',
146
+ }
147
+ if pinyin in pinyin_rep_map.keys():
148
+ pinyin = pinyin_rep_map[pinyin]
149
+ else:
150
+ single_rep_map = {
151
+ 'v': 'yu',
152
+ 'e': 'e',
153
+ 'i': 'y',
154
+ 'u': 'w',
155
+ }
156
+ if pinyin[0] in single_rep_map.keys():
157
+ pinyin = single_rep_map[pinyin[0]] + pinyin[1:]
158
+
159
+ assert pinyin in pinyin_to_symbol_map.keys(), (pinyin, seg, raw_pinyin)
160
+ phone = pinyin_to_symbol_map[pinyin].split(' ')
161
+ word2ph.append(len(phone))
162
+
163
+ phones_list += phone
164
+ tones_list += [int(tone)] * len(phone)
165
+ return phones_list, tones_list, word2ph
166
+
167
+
168
+ def text_normalize(text):
169
+ numbers = re.findall(r'\d+(?:\.?\d+)?', text)
170
+ for number in numbers:
171
+ text = text.replace(number, cn2an.an2cn(number), 1)
172
+ text = replace_punctuation(text)
173
+ return text
174
+
175
+
176
+ def get_bert_feature(text, word2ph):
177
+ from bert_vits2.text import chinese_bert
178
+ return chinese_bert.get_bert_feature(text, word2ph)
179
+
180
+
181
+ if __name__ == '__main__':
182
+ from bert_vits2.text import get_bert_feature
183
+
184
+ text = "啊!但是《原神》是由,米哈\游自主, [研发]的一款全.新开放世界.冒险游戏"
185
+ text = text_normalize(text)
186
+ print(text)
187
+ phones, tones, word2ph = g2p(text)
188
+ bert = get_bert_feature(text, word2ph)
189
+
190
+ print(phones, tones, word2ph, bert.shape)
191
+
192
+ # # 示例用法
193
+ # text = "这是一个示例文本:,你好!这是一个测试...."
194
+ # print(g2p_paddle(text)) # 输出: 这是一个示例文本你好这是一个测试
bert_vits2/text/chinese_bert.py ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import config
2
+ import torch
3
+ from transformers import AutoTokenizer, AutoModelForMaskedLM
4
+ from logger import logger
5
+
6
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
7
+
8
+ try:
9
+ logger.info("Loading chinese-roberta-wwm-ext-large...")
10
+ tokenizer = AutoTokenizer.from_pretrained(config.ABS_PATH + "/bert_vits2/bert/chinese-roberta-wwm-ext-large")
11
+ model = AutoModelForMaskedLM.from_pretrained(config.ABS_PATH + "/bert_vits2/bert/chinese-roberta-wwm-ext-large").to(
12
+ device)
13
+ logger.info("Loading finished.")
14
+ except Exception as e:
15
+ logger.error(e)
16
+ logger.error(f"Please download model from hfl/chinese-roberta-wwm-ext-large.")
17
+
18
+
19
+ def get_bert_feature(text, word2ph):
20
+ with torch.no_grad():
21
+ inputs = tokenizer(text, return_tensors='pt')
22
+ for i in inputs:
23
+ inputs[i] = inputs[i].to(device)
24
+ res = model(**inputs, output_hidden_states=True)
25
+ res = torch.cat(res['hidden_states'][-3:-2], -1)[0].cpu()
26
+
27
+ assert len(word2ph) == len(text) + 2
28
+ word2phone = word2ph
29
+ phone_level_feature = []
30
+ for i in range(len(word2phone)):
31
+ repeat_feature = res[i].repeat(word2phone[i], 1)
32
+ phone_level_feature.append(repeat_feature)
33
+
34
+ phone_level_feature = torch.cat(phone_level_feature, dim=0)
35
+
36
+ return phone_level_feature.T
37
+
38
+
39
+ if __name__ == '__main__':
40
+ # feature = get_bert_feature('你好,我是说的道理。')
41
+ import torch
42
+
43
+ word_level_feature = torch.rand(38, 1024) # 12个词,每个词1024维特征
44
+ word2phone = [1, 2, 1, 2, 2, 1, 2, 2, 1, 2, 2, 1, 2, 2, 2, 2, 2, 1, 1, 2, 2, 1, 2, 2, 2, 2, 1, 2, 2, 2, 2, 2, 1, 2,
45
+ 2, 2, 2, 1]
46
+
47
+ # 计算总帧数
48
+ total_frames = sum(word2phone)
49
+ print(word_level_feature.shape)
50
+ print(word2phone)
51
+ phone_level_feature = []
52
+ for i in range(len(word2phone)):
53
+ print(word_level_feature[i].shape)
54
+
55
+ # 对每个词重复word2phone[i]次
56
+ repeat_feature = word_level_feature[i].repeat(word2phone[i], 1)
57
+ phone_level_feature.append(repeat_feature)
58
+
59
+ phone_level_feature = torch.cat(phone_level_feature, dim=0)
60
+ print(phone_level_feature.shape) # torch.Size([36, 1024])
bert_vits2/text/cleaner.py ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from bert_vits2.text import chinese, cleaned_text_to_sequence
2
+
3
+ language_module_map = {
4
+ 'ZH': chinese
5
+ }
6
+
7
+
8
+ def clean_text(text, language):
9
+ language_module = language_module_map[language]
10
+ norm_text = language_module.text_normalize(text)
11
+ phones, tones, word2ph = language_module.g2p(norm_text)
12
+ return norm_text, phones, tones, word2ph
13
+
14
+
15
+ def clean_text_bert(text, language):
16
+ language_module = language_module_map[language]
17
+ norm_text = language_module.text_normalize(text)
18
+ phones, tones, word2ph = language_module.g2p(norm_text)
19
+ bert = language_module.get_bert_feature(norm_text, word2ph)
20
+ return phones, tones, bert
21
+
22
+
23
+ def text_to_sequence(text, language):
24
+ norm_text, phones, tones, word2ph = clean_text(text, language)
25
+ return cleaned_text_to_sequence(phones, tones, language)
26
+
27
+
28
+ if __name__ == '__main__':
29
+ pass
bert_vits2/text/cmudict.rep ADDED
The diff for this file is too large to render. See raw diff
 
chinese_dialect_lexicons/jyutjyu_2.ocd2 → bert_vits2/text/cmudict_cache.pickle RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:aea11bfe51b184b3f000d20ab49757979b216219203839d2b2e3c1f990a13fa5
3
- size 2432991
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b9b21b20325471934ba92f2e4a5976989e7d920caa32e7a286eacb027d197949
3
+ size 6212655
bert_vits2/text/english.py ADDED
@@ -0,0 +1,146 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pickle
2
+ import os
3
+ import re
4
+ from g2p_en import G2p
5
+
6
+ from bert_vits2.text import symbols
7
+
8
+ current_file_path = os.path.dirname(__file__)
9
+ CMU_DICT_PATH = os.path.join(current_file_path, 'cmudict.rep')
10
+ CACHE_PATH = os.path.join(current_file_path, 'cmudict_cache.pickle')
11
+ _g2p = G2p()
12
+
13
+ arpa = {'AH0', 'S', 'AH1', 'EY2', 'AE2', 'EH0', 'OW2', 'UH0', 'NG', 'B', 'G', 'AY0', 'M', 'AA0', 'F', 'AO0', 'ER2',
14
+ 'UH1', 'IY1', 'AH2', 'DH', 'IY0', 'EY1', 'IH0', 'K', 'N', 'W', 'IY2', 'T', 'AA1', 'ER1', 'EH2', 'OY0', 'UH2',
15
+ 'UW1', 'Z', 'AW2', 'AW1', 'V', 'UW2', 'AA2', 'ER', 'AW0', 'UW0', 'R', 'OW1', 'EH1', 'ZH', 'AE0', 'IH2', 'IH',
16
+ 'Y', 'JH', 'P', 'AY1', 'EY0', 'OY2', 'TH', 'HH', 'D', 'ER0', 'CH', 'AO1', 'AE1', 'AO2', 'OY1', 'AY2', 'IH1',
17
+ 'OW0', 'L', 'SH'}
18
+
19
+
20
+ def post_replace_ph(ph):
21
+ rep_map = {
22
+ ':': ',',
23
+ ';': ',',
24
+ ',': ',',
25
+ '。': '.',
26
+ '!': '!',
27
+ '?': '?',
28
+ '\n': '.',
29
+ "·": ",",
30
+ '、': ",",
31
+ '...': '…',
32
+ 'v': "V"
33
+ }
34
+ if ph in rep_map.keys():
35
+ ph = rep_map[ph]
36
+ if ph in symbols:
37
+ return ph
38
+ if ph not in symbols:
39
+ ph = 'UNK'
40
+ return ph
41
+
42
+
43
+ def read_dict():
44
+ g2p_dict = {}
45
+ start_line = 49
46
+ with open(CMU_DICT_PATH) as f:
47
+ line = f.readline()
48
+ line_index = 1
49
+ while line:
50
+ if line_index >= start_line:
51
+ line = line.strip()
52
+ word_split = line.split(' ')
53
+ word = word_split[0]
54
+
55
+ syllable_split = word_split[1].split(' - ')
56
+ g2p_dict[word] = []
57
+ for syllable in syllable_split:
58
+ phone_split = syllable.split(' ')
59
+ g2p_dict[word].append(phone_split)
60
+
61
+ line_index = line_index + 1
62
+ line = f.readline()
63
+
64
+ return g2p_dict
65
+
66
+
67
+ def cache_dict(g2p_dict, file_path):
68
+ with open(file_path, 'wb') as pickle_file:
69
+ pickle.dump(g2p_dict, pickle_file)
70
+
71
+
72
+ def get_dict():
73
+ if os.path.exists(CACHE_PATH):
74
+ with open(CACHE_PATH, 'rb') as pickle_file:
75
+ g2p_dict = pickle.load(pickle_file)
76
+ else:
77
+ g2p_dict = read_dict()
78
+ cache_dict(g2p_dict, CACHE_PATH)
79
+
80
+ return g2p_dict
81
+
82
+
83
+ eng_dict = get_dict()
84
+
85
+
86
+ def refine_ph(phn):
87
+ tone = 0
88
+ if re.search(r'\d$', phn):
89
+ tone = int(phn[-1]) + 1
90
+ phn = phn[:-1]
91
+ return phn.lower(), tone
92
+
93
+
94
+ def refine_syllables(syllables):
95
+ tones = []
96
+ phonemes = []
97
+ for phn_list in syllables:
98
+ for i in range(len(phn_list)):
99
+ phn = phn_list[i]
100
+ phn, tone = refine_ph(phn)
101
+ phonemes.append(phn)
102
+ tones.append(tone)
103
+ return phonemes, tones
104
+
105
+
106
+ def text_normalize(text):
107
+
108
+ return text
109
+
110
+
111
+ def g2p(text):
112
+ phones = []
113
+ tones = []
114
+ words = re.split(r"([,;.\-\?\!\s+])", text)
115
+ for w in words:
116
+ if w.upper() in eng_dict:
117
+ phns, tns = refine_syllables(eng_dict[w.upper()])
118
+ phones += phns
119
+ tones += tns
120
+ else:
121
+ phone_list = list(filter(lambda p: p != " ", _g2p(w)))
122
+ for ph in phone_list:
123
+ if ph in arpa:
124
+ ph, tn = refine_ph(ph)
125
+ phones.append(ph)
126
+ tones.append(tn)
127
+ else:
128
+ phones.append(ph)
129
+ tones.append(0)
130
+
131
+ word2ph = [1 for i in phones]
132
+
133
+ phones = [post_replace_ph(i) for i in phones]
134
+ return phones, tones, word2ph
135
+
136
+
137
+ if __name__ == "__main__":
138
+ # print(get_dict())
139
+ # print(eng_word_to_phoneme("hello"))
140
+ print(g2p("In this paper, we propose 1 DSPGAN, a GAN-based universal vocoder."))
141
+ # all_phones = set()
142
+ # for k, syllables in eng_dict.items():
143
+ # for group in syllables:
144
+ # for ph in group:
145
+ # all_phones.add(ph)
146
+ # print(all_phones)
bert_vits2/text/english_bert_mock.py ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ import torch
2
+
3
+
4
+ def get_bert_feature(norm_text, word2ph):
5
+ return torch.zeros(1024, sum(word2ph))
bert_vits2/text/japanese.py ADDED
@@ -0,0 +1,104 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # modified from https://github.com/CjangCjengh/vits/blob/main/text/japanese.py
2
+ import re
3
+ import sys
4
+
5
+ import pyopenjtalk
6
+
7
+ from bert_vits2.text import symbols
8
+
9
+ # Regular expression matching Japanese without punctuation marks:
10
+ _japanese_characters = re.compile(
11
+ r'[A-Za-z\d\u3005\u3040-\u30ff\u4e00-\u9fff\uff11-\uff19\uff21-\uff3a\uff41-\uff5a\uff66-\uff9d]')
12
+
13
+ # Regular expression matching non-Japanese characters or punctuation marks:
14
+ _japanese_marks = re.compile(
15
+ r'[^A-Za-z\d\u3005\u3040-\u30ff\u4e00-\u9fff\uff11-\uff19\uff21-\uff3a\uff41-\uff5a\uff66-\uff9d]')
16
+
17
+ # List of (symbol, Japanese) pairs for marks:
18
+ _symbols_to_japanese = [(re.compile('%s' % x[0]), x[1]) for x in [
19
+ ('%', 'パーセント')
20
+ ]]
21
+
22
+ # List of (consonant, sokuon) pairs:
23
+ _real_sokuon = [(re.compile('%s' % x[0]), x[1]) for x in [
24
+ (r'Q([↑↓]*[kg])', r'k#\1'),
25
+ (r'Q([↑↓]*[tdjʧ])', r't#\1'),
26
+ (r'Q([↑↓]*[sʃ])', r's\1'),
27
+ (r'Q([↑↓]*[pb])', r'p#\1')
28
+ ]]
29
+
30
+ # List of (consonant, hatsuon) pairs:
31
+ _real_hatsuon = [(re.compile('%s' % x[0]), x[1]) for x in [
32
+ (r'N([↑↓]*[pbm])', r'm\1'),
33
+ (r'N([↑↓]*[ʧʥj])', r'n^\1'),
34
+ (r'N([↑↓]*[tdn])', r'n\1'),
35
+ (r'N([↑↓]*[kg])', r'ŋ\1')
36
+ ]]
37
+
38
+
39
+ def post_replace_ph(ph):
40
+ rep_map = {
41
+ ':': ',',
42
+ ';': ',',
43
+ ',': ',',
44
+ '。': '.',
45
+ '!': '!',
46
+ '?': '?',
47
+ '\n': '.',
48
+ "·": ",",
49
+ '、': ",",
50
+ '...': '…',
51
+ 'v': "V"
52
+ }
53
+ if ph in rep_map.keys():
54
+ ph = rep_map[ph]
55
+ if ph in symbols:
56
+ return ph
57
+ if ph not in symbols:
58
+ ph = 'UNK'
59
+ return ph
60
+
61
+
62
+ def symbols_to_japanese(text):
63
+ for regex, replacement in _symbols_to_japanese:
64
+ text = re.sub(regex, replacement, text)
65
+ return text
66
+
67
+
68
+ def preprocess_jap(text):
69
+ '''Reference https://r9y9.github.io/ttslearn/latest/notebooks/ch10_Recipe-Tacotron.html'''
70
+ text = symbols_to_japanese(text)
71
+ sentences = re.split(_japanese_marks, text)
72
+ marks = re.findall(_japanese_marks, text)
73
+ text = []
74
+ for i, sentence in enumerate(sentences):
75
+ if re.match(_japanese_characters, sentence):
76
+ p = pyopenjtalk.g2p(sentence)
77
+ text += p.split(" ")
78
+
79
+ if i < len(marks):
80
+ text += [marks[i].replace(' ', '')]
81
+ return text
82
+
83
+
84
+ def text_normalize(text):
85
+ return text
86
+
87
+
88
+ def g2p(norm_text):
89
+ phones = preprocess_jap(norm_text)
90
+ phones = [post_replace_ph(i) for i in phones]
91
+
92
+ tones = [0 for i in phones]
93
+ word2ph = [1 for i in phones]
94
+ return phones, tones, word2ph
95
+
96
+
97
+ if __name__ == '__main__':
98
+ for line in open("../../../Downloads/transcript_utf8.txt").readlines():
99
+ text = line.split(":")[1]
100
+ phones, tones, word2ph = g2p(text)
101
+ for p in phones:
102
+ if p == "z":
103
+ print(text, phones)
104
+ sys.exit(0)
bert_vits2/text/opencpop-strict.txt ADDED
@@ -0,0 +1,429 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ a AA a
2
+ ai AA ai
3
+ an AA an
4
+ ang AA ang
5
+ ao AA ao
6
+ ba b a
7
+ bai b ai
8
+ ban b an
9
+ bang b ang
10
+ bao b ao
11
+ bei b ei
12
+ ben b en
13
+ beng b eng
14
+ bi b i
15
+ bian b ian
16
+ biao b iao
17
+ bie b ie
18
+ bin b in
19
+ bing b ing
20
+ bo b o
21
+ bu b u
22
+ ca c a
23
+ cai c ai
24
+ can c an
25
+ cang c ang
26
+ cao c ao
27
+ ce c e
28
+ cei c ei
29
+ cen c en
30
+ ceng c eng
31
+ cha ch a
32
+ chai ch ai
33
+ chan ch an
34
+ chang ch ang
35
+ chao ch ao
36
+ che ch e
37
+ chen ch en
38
+ cheng ch eng
39
+ chi ch ir
40
+ chong ch ong
41
+ chou ch ou
42
+ chu ch u
43
+ chua ch ua
44
+ chuai ch uai
45
+ chuan ch uan
46
+ chuang ch uang
47
+ chui ch ui
48
+ chun ch un
49
+ chuo ch uo
50
+ ci c i0
51
+ cong c ong
52
+ cou c ou
53
+ cu c u
54
+ cuan c uan
55
+ cui c ui
56
+ cun c un
57
+ cuo c uo
58
+ da d a
59
+ dai d ai
60
+ dan d an
61
+ dang d ang
62
+ dao d ao
63
+ de d e
64
+ dei d ei
65
+ den d en
66
+ deng d eng
67
+ di d i
68
+ dia d ia
69
+ dian d ian
70
+ diao d iao
71
+ die d ie
72
+ ding d ing
73
+ diu d iu
74
+ dong d ong
75
+ dou d ou
76
+ du d u
77
+ duan d uan
78
+ dui d ui
79
+ dun d un
80
+ duo d uo
81
+ e EE e
82
+ ei EE ei
83
+ en EE en
84
+ eng EE eng
85
+ er EE er
86
+ fa f a
87
+ fan f an
88
+ fang f ang
89
+ fei f ei
90
+ fen f en
91
+ feng f eng
92
+ fo f o
93
+ fou f ou
94
+ fu f u
95
+ ga g a
96
+ gai g ai
97
+ gan g an
98
+ gang g ang
99
+ gao g ao
100
+ ge g e
101
+ gei g ei
102
+ gen g en
103
+ geng g eng
104
+ gong g ong
105
+ gou g ou
106
+ gu g u
107
+ gua g ua
108
+ guai g uai
109
+ guan g uan
110
+ guang g uang
111
+ gui g ui
112
+ gun g un
113
+ guo g uo
114
+ ha h a
115
+ hai h ai
116
+ han h an
117
+ hang h ang
118
+ hao h ao
119
+ he h e
120
+ hei h ei
121
+ hen h en
122
+ heng h eng
123
+ hong h ong
124
+ hou h ou
125
+ hu h u
126
+ hua h ua
127
+ huai h uai
128
+ huan h uan
129
+ huang h uang
130
+ hui h ui
131
+ hun h un
132
+ huo h uo
133
+ ji j i
134
+ jia j ia
135
+ jian j ian
136
+ jiang j iang
137
+ jiao j iao
138
+ jie j ie
139
+ jin j in
140
+ jing j ing
141
+ jiong j iong
142
+ jiu j iu
143
+ ju j v
144
+ jv j v
145
+ juan j van
146
+ jvan j van
147
+ jue j ve
148
+ jve j ve
149
+ jun j vn
150
+ jvn j vn
151
+ ka k a
152
+ kai k ai
153
+ kan k an
154
+ kang k ang
155
+ kao k ao
156
+ ke k e
157
+ kei k ei
158
+ ken k en
159
+ keng k eng
160
+ kong k ong
161
+ kou k ou
162
+ ku k u
163
+ kua k ua
164
+ kuai k uai
165
+ kuan k uan
166
+ kuang k uang
167
+ kui k ui
168
+ kun k un
169
+ kuo k uo
170
+ la l a
171
+ lai l ai
172
+ lan l an
173
+ lang l ang
174
+ lao l ao
175
+ le l e
176
+ lei l ei
177
+ leng l eng
178
+ li l i
179
+ lia l ia
180
+ lian l ian
181
+ liang l iang
182
+ liao l iao
183
+ lie l ie
184
+ lin l in
185
+ ling l ing
186
+ liu l iu
187
+ lo l o
188
+ long l ong
189
+ lou l ou
190
+ lu l u
191
+ luan l uan
192
+ lun l un
193
+ luo l uo
194
+ lv l v
195
+ lve l ve
196
+ ma m a
197
+ mai m ai
198
+ man m an
199
+ mang m ang
200
+ mao m ao
201
+ me m e
202
+ mei m ei
203
+ men m en
204
+ meng m eng
205
+ mi m i
206
+ mian m ian
207
+ miao m iao
208
+ mie m ie
209
+ min m in
210
+ ming m ing
211
+ miu m iu
212
+ mo m o
213
+ mou m ou
214
+ mu m u
215
+ na n a
216
+ nai n ai
217
+ nan n an
218
+ nang n ang
219
+ nao n ao
220
+ ne n e
221
+ nei n ei
222
+ nen n en
223
+ neng n eng
224
+ ni n i
225
+ nian n ian
226
+ niang n iang
227
+ niao n iao
228
+ nie n ie
229
+ nin n in
230
+ ning n ing
231
+ niu n iu
232
+ nong n ong
233
+ nou n ou
234
+ nu n u
235
+ nuan n uan
236
+ nun n un
237
+ nuo n uo
238
+ nv n v
239
+ nve n ve
240
+ o OO o
241
+ ou OO ou
242
+ pa p a
243
+ pai p ai
244
+ pan p an
245
+ pang p ang
246
+ pao p ao
247
+ pei p ei
248
+ pen p en
249
+ peng p eng
250
+ pi p i
251
+ pian p ian
252
+ piao p iao
253
+ pie p ie
254
+ pin p in
255
+ ping p ing
256
+ po p o
257
+ pou p ou
258
+ pu p u
259
+ qi q i
260
+ qia q ia
261
+ qian q ian
262
+ qiang q iang
263
+ qiao q iao
264
+ qie q ie
265
+ qin q in
266
+ qing q ing
267
+ qiong q iong
268
+ qiu q iu
269
+ qu q v
270
+ qv q v
271
+ quan q van
272
+ qvan q van
273
+ que q ve
274
+ qve q ve
275
+ qun q vn
276
+ qvn q vn
277
+ ran r an
278
+ rang r ang
279
+ rao r ao
280
+ re r e
281
+ ren r en
282
+ reng r eng
283
+ ri r ir
284
+ rong r ong
285
+ rou r ou
286
+ ru r u
287
+ rua r ua
288
+ ruan r uan
289
+ rui r ui
290
+ run r un
291
+ ruo r uo
292
+ sa s a
293
+ sai s ai
294
+ san s an
295
+ sang s ang
296
+ sao s ao
297
+ se s e
298
+ sen s en
299
+ seng s eng
300
+ sha sh a
301
+ shai sh ai
302
+ shan sh an
303
+ shang sh ang
304
+ shao sh ao
305
+ she sh e
306
+ shei sh ei
307
+ shen sh en
308
+ sheng sh eng
309
+ shi sh ir
310
+ shou sh ou
311
+ shu sh u
312
+ shua sh ua
313
+ shuai sh uai
314
+ shuan sh uan
315
+ shuang sh uang
316
+ shui sh ui
317
+ shun sh un
318
+ shuo sh uo
319
+ si s i0
320
+ song s ong
321
+ sou s ou
322
+ su s u
323
+ suan s uan
324
+ sui s ui
325
+ sun s un
326
+ suo s uo
327
+ ta t a
328
+ tai t ai
329
+ tan t an
330
+ tang t ang
331
+ tao t ao
332
+ te t e
333
+ tei t ei
334
+ teng t eng
335
+ ti t i
336
+ tian t ian
337
+ tiao t iao
338
+ tie t ie
339
+ ting t ing
340
+ tong t ong
341
+ tou t ou
342
+ tu t u
343
+ tuan t uan
344
+ tui t ui
345
+ tun t un
346
+ tuo t uo
347
+ wa w a
348
+ wai w ai
349
+ wan w an
350
+ wang w ang
351
+ wei w ei
352
+ wen w en
353
+ weng w eng
354
+ wo w o
355
+ wu w u
356
+ xi x i
357
+ xia x ia
358
+ xian x ian
359
+ xiang x iang
360
+ xiao x iao
361
+ xie x ie
362
+ xin x in
363
+ xing x ing
364
+ xiong x iong
365
+ xiu x iu
366
+ xu x v
367
+ xv x v
368
+ xuan x van
369
+ xvan x van
370
+ xue x ve
371
+ xve x ve
372
+ xun x vn
373
+ xvn x vn
374
+ ya y a
375
+ yan y En
376
+ yang y ang
377
+ yao y ao
378
+ ye y E
379
+ yi y i
380
+ yin y in
381
+ ying y ing
382
+ yo y o
383
+ yong y ong
384
+ you y ou
385
+ yu y v
386
+ yv y v
387
+ yuan y van
388
+ yvan y van
389
+ yue y ve
390
+ yve y ve
391
+ yun y vn
392
+ yvn y vn
393
+ za z a
394
+ zai z ai
395
+ zan z an
396
+ zang z ang
397
+ zao z ao
398
+ ze z e
399
+ zei z ei
400
+ zen z en
401
+ zeng z eng
402
+ zha zh a
403
+ zhai zh ai
404
+ zhan zh an
405
+ zhang zh ang
406
+ zhao zh ao
407
+ zhe zh e
408
+ zhei zh ei
409
+ zhen zh en
410
+ zheng zh eng
411
+ zhi zh ir
412
+ zhong zh ong
413
+ zhou zh ou
414
+ zhu zh u
415
+ zhua zh ua
416
+ zhuai zh uai
417
+ zhuan zh uan
418
+ zhuang zh uang
419
+ zhui zh ui
420
+ zhun zh un
421
+ zhuo zh uo
422
+ zi z i0
423
+ zong z ong
424
+ zou z ou
425
+ zu z u
426
+ zuan z uan
427
+ zui z ui
428
+ zun z un
429
+ zuo z uo
bert_vits2/text/symbols.py ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ punctuation = ['!', '?', '…', ",", ".", "'", '-']
2
+ pu_symbols = punctuation + ["SP", "UNK"]
3
+ pad = '_'
4
+
5
+ # chinese
6
+ zh_symbols = ['E', 'En', 'a', 'ai', 'an', 'ang', 'ao', 'b', 'c', 'ch', 'd', 'e', 'ei', 'en', 'eng', 'er', 'f', 'g', 'h',
7
+ 'i', 'i0', 'ia', 'ian', 'iang', 'iao', 'ie', 'in', 'ing', 'iong', 'ir', 'iu', 'j', 'k', 'l', 'm', 'n',
8
+ 'o',
9
+ 'ong',
10
+ 'ou', 'p', 'q', 'r', 's', 'sh', 't', 'u', 'ua', 'uai', 'uan', 'uang', 'ui', 'un', 'uo', 'v', 'van', 've',
11
+ 'vn',
12
+ 'w', 'x', 'y', 'z', 'zh',
13
+ "AA", "EE", "OO"]
14
+ num_zh_tones = 6
15
+
16
+ # japanese
17
+ ja_symbols = ['I', 'N', 'U', 'a', 'b', 'by', 'ch', 'cl', 'd', 'dy', 'e', 'f', 'g', 'gy', 'h', 'hy', 'i', 'j', 'k', 'ky',
18
+ 'm', 'my', 'n', 'ny', 'o', 'p', 'py', 'r', 'ry', 's', 'sh', 't', 'ts', 'u', 'V', 'w', 'y', 'z']
19
+ num_ja_tones = 1
20
+
21
+ # English
22
+ en_symbols = ['aa', 'ae', 'ah', 'ao', 'aw', 'ay', 'b', 'ch', 'd', 'dh', 'eh', 'er', 'ey', 'f', 'g', 'hh', 'ih', 'iy',
23
+ 'jh', 'k', 'l', 'm', 'n', 'ng', 'ow', 'oy', 'p', 'r', 's',
24
+ 'sh', 't', 'th', 'uh', 'uw', 'V', 'w', 'y', 'z', 'zh']
25
+ num_en_tones = 4
26
+
27
+ # combine all symbols
28
+ normal_symbols = sorted(set(zh_symbols + ja_symbols + en_symbols))
29
+ symbols = [pad] + normal_symbols + pu_symbols
30
+ sil_phonemes_ids = [symbols.index(i) for i in pu_symbols]
31
+
32
+ # combine all tones
33
+ num_tones = num_zh_tones + num_ja_tones + num_en_tones
34
+
35
+ # language maps
36
+ language_id_map = {
37
+ 'ZH': 0,
38
+ "JA": 1,
39
+ "EN": 2
40
+ }
41
+ num_languages = len(language_id_map.keys())
42
+
43
+ language_tone_start_map = {
44
+ 'ZH': 0,
45
+ "JA": num_zh_tones,
46
+ "EN": num_zh_tones + num_ja_tones
47
+ }
48
+
49
+ if __name__ == '__main__':
50
+ a = set(zh_symbols)
51
+ b = set(en_symbols)
52
+ print(sorted(a & b))
bert_vits2/text/tone_sandhi.py ADDED
@@ -0,0 +1,351 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import List
15
+ from typing import Tuple
16
+
17
+ import jieba
18
+ from pypinyin import lazy_pinyin
19
+ from pypinyin import Style
20
+
21
+
22
+ class ToneSandhi():
23
+ def __init__(self):
24
+ self.must_neural_tone_words = {
25
+ '麻烦', '麻利', '鸳鸯', '高粱', '骨头', '骆驼', '马虎', '首饰', '馒头', '馄饨', '风筝',
26
+ '难为', '队伍', '阔气', '闺女', '门道', '锄头', '铺盖', '铃铛', '铁匠', '钥匙', '里脊',
27
+ '里头', '部分', '那么', '道士', '造化', '迷糊', '连累', '这么', '这个', '运气', '过去',
28
+ '软和', '转悠', '踏实', '跳蚤', '跟头', '趔趄', '财主', '豆腐', '讲究', '记性', '记号',
29
+ '认识', '规矩', '见识', '裁缝', '补丁', '衣裳', '衣服', '衙门', '街坊', '行李', '行当',
30
+ '蛤蟆', '蘑菇', '薄荷', '葫芦', '葡萄', '萝卜', '荸荠', '苗条', '苗头', '苍蝇', '芝麻',
31
+ '舒服', '舒坦', '舌头', '自在', '膏药', '脾气', '脑袋', '脊梁', '能耐', '胳膊', '胭脂',
32
+ '胡萝', '胡琴', '胡同', '聪明', '耽误', '耽搁', '耷拉', '耳朵', '老爷', '老实', '老婆',
33
+ '老头', '老太', '翻腾', '罗嗦', '罐头', '编辑', '结实', '红火', '累赘', '糨糊', '糊涂',
34
+ '精神', '粮食', '簸箕', '篱笆', '算计', '算盘', '答应', '笤帚', '笑语', '笑话', '窟窿',
35
+ '窝囊', '窗户', '稳当', '稀罕', '称呼', '秧歌', '秀气', '秀才', '福气', '祖宗', '砚台',
36
+ '码头', '石榴', '石头', '石匠', '知识', '眼睛', '眯缝', '眨巴', '眉毛', '相声', '盘算',
37
+ '白净', '痢疾', '痛快', '疟疾', '疙瘩', '疏忽', '畜生', '生意', '甘蔗', '琵琶', '琢磨',
38
+ '琉璃', '玻璃', '玫瑰', '玄乎', '狐狸', '状元', '特务', '牲口', '牙碜', '牌楼', '爽快',
39
+ '爱人', '热闹', '烧饼', '烟筒', '烂糊', '点心', '炊帚', '灯笼', '火候', '漂亮', '滑溜',
40
+ '溜达', '温和', '清楚', '消息', '浪头', '活泼', '比方', '正经', '欺负', '模糊', '槟榔',
41
+ '棺材', '棒槌', '棉花', '核桃', '栅栏', '柴火', '架势', '枕头', '枇杷', '机灵', '本事',
42
+ '木头', '木匠', '朋友', '月饼', '月亮', '暖和', '明白', '时候', '新鲜', '故事', '收拾',
43
+ '收成', '提防', '挖苦', '挑剔', '指甲', '指头', '拾掇', '拳头', '拨弄', '招牌', '招呼',
44
+ '抬举', '护士', '折腾', '扫帚', '打量', '打算', '打点', '打扮', '打听', '打发', '扎实',
45
+ '扁担', '戒指', '懒得', '意识', '意思', '情形', '悟性', '怪物', '思量', '怎么', '念头',
46
+ '念叨', '快活', '忙活', '志气', '心思', '得罪', '张罗', '弟兄', '开通', '应酬', '庄稼',
47
+ '干事', '帮手', '帐篷', '希罕', '师父', '师傅', '巴结', '巴掌', '差事', '工夫', '岁数',
48
+ '屁股', '尾巴', '少爷', '小气', '小伙', '将就', '对头', '对付', '寡妇', '家伙', '客气',
49
+ '实在', '官司', '学问', '学生', '字号', '嫁妆', '媳妇', '媒人', '婆家', '娘家', '委屈',
50
+ '姑娘', '姐夫', '妯娌', '妥当', '妖精', '奴才', '女婿', '头发', '太阳', '大爷', '大方',
51
+ '大意', '大夫', '多少', '多么', '外甥', '壮实', '地道', '地方', '在乎', '困难', '嘴巴',
52
+ '嘱咐', '嘟囔', '嘀咕', '喜欢', '喇嘛', '喇叭', '商量', '唾沫', '哑巴', '哈欠', '哆嗦',
53
+ '咳嗽', '和尚', '告诉', '告示', '含糊', '吓唬', '后头', '名字', '名堂', '合同', '吆喝',
54
+ '叫唤', '口袋', '厚道', '厉害', '千斤', '包袱', '包涵', '匀称', '勤快', '动静', '动弹',
55
+ '功夫', '力气', '前头', '刺猬', '刺激', '别扭', '利落', '利索', '利害', '分析', '出息',
56
+ '凑合', '凉快', '冷战', '冤枉', '冒失', '养活', '关系', '先生', '兄弟', '便宜', '使唤',
57
+ '佩服', '作坊', '体面', '位置', '似的', '伙计', '休息', '什么', '人家', '亲戚', '亲家',
58
+ '交情', '云彩', '事情', '买卖', '主意', '丫头', '丧气', '两口', '东西', '东家', '世故',
59
+ '不由', '不在', '下水', '下巴', '上头', '上司', '丈夫', '丈人', '一辈', '那个', '菩萨',
60
+ '父亲', '母亲', '咕噜', '邋遢', '费用', '冤家', '甜头', '介绍', '荒唐', '大人', '泥鳅',
61
+ '幸福', '熟悉', '计划', '扑腾', '蜡烛', '姥爷', '照顾', '喉咙', '吉他', '弄堂', '蚂蚱',
62
+ '凤凰', '拖沓', '寒碜', '糟蹋', '倒腾', '报复', '逻辑', '盘缠', '喽啰', '牢骚', '咖喱',
63
+ '扫把', '惦记'
64
+ }
65
+ self.must_not_neural_tone_words = {
66
+ "男子", "女子", "分子", "原子", "量子", "莲子", "石子", "瓜子", "电子", "人人", "虎虎"
67
+ }
68
+ self.punc = ":,;。?!“”‘’':,;.?!"
69
+
70
+ # the meaning of jieba pos tag: https://blog.csdn.net/weixin_44174352/article/details/113731041
71
+ # e.g.
72
+ # word: "家里"
73
+ # pos: "s"
74
+ # finals: ['ia1', 'i3']
75
+ def _neural_sandhi(self, word: str, pos: str,
76
+ finals: List[str]) -> List[str]:
77
+
78
+ # reduplication words for n. and v. e.g. 奶奶, 试试, 旺旺
79
+ for j, item in enumerate(word):
80
+ if j - 1 >= 0 and item == word[j - 1] and pos[0] in {
81
+ "n", "v", "a"
82
+ } and word not in self.must_not_neural_tone_words:
83
+ finals[j] = finals[j][:-1] + "5"
84
+ ge_idx = word.find("个")
85
+ if len(word) >= 1 and word[-1] in "吧呢啊呐噻嘛吖嗨呐哦哒额滴哩哟喽啰耶喔诶":
86
+ finals[-1] = finals[-1][:-1] + "5"
87
+ elif len(word) >= 1 and word[-1] in "的地得":
88
+ finals[-1] = finals[-1][:-1] + "5"
89
+ # e.g. 走了, 看着, 去过
90
+ # elif len(word) == 1 and word in "了着过" and pos in {"ul", "uz", "ug"}:
91
+ # finals[-1] = finals[-1][:-1] + "5"
92
+ elif len(word) > 1 and word[-1] in "们子" and pos in {
93
+ "r", "n"
94
+ } and word not in self.must_not_neural_tone_words:
95
+ finals[-1] = finals[-1][:-1] + "5"
96
+ # e.g. 桌上, 地下, 家里
97
+ elif len(word) > 1 and word[-1] in "上下里" and pos in {"s", "l", "f"}:
98
+ finals[-1] = finals[-1][:-1] + "5"
99
+ # e.g. 上来, 下去
100
+ elif len(word) > 1 and word[-1] in "来去" and word[-2] in "上下进出回过起开":
101
+ finals[-1] = finals[-1][:-1] + "5"
102
+ # 个做量词
103
+ elif (ge_idx >= 1 and
104
+ (word[ge_idx - 1].isnumeric() or
105
+ word[ge_idx - 1] in "几有两半多各整每做是")) or word == '个':
106
+ finals[ge_idx] = finals[ge_idx][:-1] + "5"
107
+ else:
108
+ if word in self.must_neural_tone_words or word[
109
+ -2:] in self.must_neural_tone_words:
110
+ finals[-1] = finals[-1][:-1] + "5"
111
+
112
+ word_list = self._split_word(word)
113
+ finals_list = [finals[:len(word_list[0])], finals[len(word_list[0]):]]
114
+ for i, word in enumerate(word_list):
115
+ # conventional neural in Chinese
116
+ if word in self.must_neural_tone_words or word[
117
+ -2:] in self.must_neural_tone_words:
118
+ finals_list[i][-1] = finals_list[i][-1][:-1] + "5"
119
+ finals = sum(finals_list, [])
120
+ return finals
121
+
122
+ def _bu_sandhi(self, word: str, finals: List[str]) -> List[str]:
123
+ # e.g. 看不懂
124
+ if len(word) == 3 and word[1] == "不":
125
+ finals[1] = finals[1][:-1] + "5"
126
+ else:
127
+ for i, char in enumerate(word):
128
+ # "不" before tone4 should be bu2, e.g. 不怕
129
+ if char == "不" and i + 1 < len(word) and finals[i +
130
+ 1][-1] == "4":
131
+ finals[i] = finals[i][:-1] + "2"
132
+ return finals
133
+
134
+ def _yi_sandhi(self, word: str, finals: List[str]) -> List[str]:
135
+ # "一" in number sequences, e.g. 一零零, 二一零
136
+ if word.find("一") != -1 and all(
137
+ [item.isnumeric() for item in word if item != "一"]):
138
+ return finals
139
+ # "一" between reduplication words shold be yi5, e.g. 看一看
140
+ elif len(word) == 3 and word[1] == "一" and word[0] == word[-1]:
141
+ finals[1] = finals[1][:-1] + "5"
142
+ # when "一" is ordinal word, it should be yi1
143
+ elif word.startswith("第一"):
144
+ finals[1] = finals[1][:-1] + "1"
145
+ else:
146
+ for i, char in enumerate(word):
147
+ if char == "一" and i + 1 < len(word):
148
+ # "一" before tone4 should be yi2, e.g. 一段
149
+ if finals[i + 1][-1] == "4":
150
+ finals[i] = finals[i][:-1] + "2"
151
+ # "一" before non-tone4 should be yi4, e.g. 一天
152
+ else:
153
+ # "一" 后面如果是标点,还读一声
154
+ if word[i + 1] not in self.punc:
155
+ finals[i] = finals[i][:-1] + "4"
156
+ return finals
157
+
158
+ def _split_word(self, word: str) -> List[str]:
159
+ word_list = jieba.cut_for_search(word)
160
+ word_list = sorted(word_list, key=lambda i: len(i), reverse=False)
161
+ first_subword = word_list[0]
162
+ first_begin_idx = word.find(first_subword)
163
+ if first_begin_idx == 0:
164
+ second_subword = word[len(first_subword):]
165
+ new_word_list = [first_subword, second_subword]
166
+ else:
167
+ second_subword = word[:-len(first_subword)]
168
+ new_word_list = [second_subword, first_subword]
169
+ return new_word_list
170
+
171
+ def _three_sandhi(self, word: str, finals: List[str]) -> List[str]:
172
+ if len(word) == 2 and self._all_tone_three(finals):
173
+ finals[0] = finals[0][:-1] + "2"
174
+ elif len(word) == 3:
175
+ word_list = self._split_word(word)
176
+ if self._all_tone_three(finals):
177
+ # disyllabic + monosyllabic, e.g. 蒙古/包
178
+ if len(word_list[0]) == 2:
179
+ finals[0] = finals[0][:-1] + "2"
180
+ finals[1] = finals[1][:-1] + "2"
181
+ # monosyllabic + disyllabic, e.g. 纸/老虎
182
+ elif len(word_list[0]) == 1:
183
+ finals[1] = finals[1][:-1] + "2"
184
+ else:
185
+ finals_list = [
186
+ finals[:len(word_list[0])], finals[len(word_list[0]):]
187
+ ]
188
+ if len(finals_list) == 2:
189
+ for i, sub in enumerate(finals_list):
190
+ # e.g. 所有/人
191
+ if self._all_tone_three(sub) and len(sub) == 2:
192
+ finals_list[i][0] = finals_list[i][0][:-1] + "2"
193
+ # e.g. 好/喜欢
194
+ elif i == 1 and not self._all_tone_three(sub) and finals_list[i][0][-1] == "3" and \
195
+ finals_list[0][-1][-1] == "3":
196
+
197
+ finals_list[0][-1] = finals_list[0][-1][:-1] + "2"
198
+ finals = sum(finals_list, [])
199
+ # split idiom into two words who's length is 2
200
+ elif len(word) == 4:
201
+ finals_list = [finals[:2], finals[2:]]
202
+ finals = []
203
+ for sub in finals_list:
204
+ if self._all_tone_three(sub):
205
+ sub[0] = sub[0][:-1] + "2"
206
+ finals += sub
207
+
208
+ return finals
209
+
210
+ def _all_tone_three(self, finals: List[str]) -> bool:
211
+ return all(x[-1] == "3" for x in finals)
212
+
213
+ # merge "不" and the word behind it
214
+ # if don't merge, "不" sometimes appears alone according to jieba, which may occur sandhi error
215
+ def _merge_bu(self, seg: List[Tuple[str, str]]) -> List[Tuple[str, str]]:
216
+ new_seg = []
217
+ last_word = ""
218
+ for word, pos in seg:
219
+ if last_word == "不":
220
+ word = last_word + word
221
+ if word != "不":
222
+ new_seg.append((word, pos))
223
+ last_word = word[:]
224
+ if last_word == "不":
225
+ new_seg.append((last_word, 'd'))
226
+ last_word = ""
227
+ return new_seg
228
+
229
+ # function 1: merge "一" and reduplication words in it's left and right, e.g. "听","一","听" ->"听一听"
230
+ # function 2: merge single "一" and the word behind it
231
+ # if don't merge, "一" sometimes appears alone according to jieba, which may occur sandhi error
232
+ # e.g.
233
+ # input seg: [('听', 'v'), ('一', 'm'), ('听', 'v')]
234
+ # output seg: [['听一听', 'v']]
235
+ def _merge_yi(self, seg: List[Tuple[str, str]]) -> List[Tuple[str, str]]:
236
+ new_seg = []
237
+ # function 1
238
+ for i, (word, pos) in enumerate(seg):
239
+ if i - 1 >= 0 and word == "一" and i + 1 < len(seg) and seg[i - 1][
240
+ 0] == seg[i + 1][0] and seg[i - 1][1] == "v":
241
+ new_seg[i - 1][0] = new_seg[i - 1][0] + "一" + new_seg[i - 1][0]
242
+ else:
243
+ if i - 2 >= 0 and seg[i - 1][0] == "一" and seg[i - 2][
244
+ 0] == word and pos == "v":
245
+ continue
246
+ else:
247
+ new_seg.append([word, pos])
248
+ seg = new_seg
249
+ new_seg = []
250
+ # function 2
251
+ for i, (word, pos) in enumerate(seg):
252
+ if new_seg and new_seg[-1][0] == "一":
253
+ new_seg[-1][0] = new_seg[-1][0] + word
254
+ else:
255
+ new_seg.append([word, pos])
256
+ return new_seg
257
+
258
+ # the first and the second words are all_tone_three
259
+ def _merge_continuous_three_tones(
260
+ self, seg: List[Tuple[str, str]]) -> List[Tuple[str, str]]:
261
+ new_seg = []
262
+ sub_finals_list = [
263
+ lazy_pinyin(
264
+ word, neutral_tone_with_five=True, style=Style.FINALS_TONE3)
265
+ for (word, pos) in seg
266
+ ]
267
+ assert len(sub_finals_list) == len(seg)
268
+ merge_last = [False] * len(seg)
269
+ for i, (word, pos) in enumerate(seg):
270
+ if i - 1 >= 0 and self._all_tone_three(
271
+ sub_finals_list[i - 1]) and self._all_tone_three(
272
+ sub_finals_list[i]) and not merge_last[i - 1]:
273
+ # if the last word is reduplication, not merge, because reduplication need to be _neural_sandhi
274
+ if not self._is_reduplication(seg[i - 1][0]) and len(
275
+ seg[i - 1][0]) + len(seg[i][0]) <= 3:
276
+ new_seg[-1][0] = new_seg[-1][0] + seg[i][0]
277
+ merge_last[i] = True
278
+ else:
279
+ new_seg.append([word, pos])
280
+ else:
281
+ new_seg.append([word, pos])
282
+
283
+ return new_seg
284
+
285
+ def _is_reduplication(self, word: str) -> bool:
286
+ return len(word) == 2 and word[0] == word[1]
287
+
288
+ # the last char of first word and the first char of second word is tone_three
289
+ def _merge_continuous_three_tones_2(
290
+ self, seg: List[Tuple[str, str]]) -> List[Tuple[str, str]]:
291
+ new_seg = []
292
+ sub_finals_list = [
293
+ lazy_pinyin(
294
+ word, neutral_tone_with_five=True, style=Style.FINALS_TONE3)
295
+ for (word, pos) in seg
296
+ ]
297
+ assert len(sub_finals_list) == len(seg)
298
+ merge_last = [False] * len(seg)
299
+ for i, (word, pos) in enumerate(seg):
300
+ if i - 1 >= 0 and sub_finals_list[i - 1][-1][-1] == "3" and sub_finals_list[i][0][-1] == "3" and not \
301
+ merge_last[i - 1]:
302
+ # if the last word is reduplication, not merge, because reduplication need to be _neural_sandhi
303
+ if not self._is_reduplication(seg[i - 1][0]) and len(
304
+ seg[i - 1][0]) + len(seg[i][0]) <= 3:
305
+ new_seg[-1][0] = new_seg[-1][0] + seg[i][0]
306
+ merge_last[i] = True
307
+ else:
308
+ new_seg.append([word, pos])
309
+ else:
310
+ new_seg.append([word, pos])
311
+ return new_seg
312
+
313
+ def _merge_er(self, seg: List[Tuple[str, str]]) -> List[Tuple[str, str]]:
314
+ new_seg = []
315
+ for i, (word, pos) in enumerate(seg):
316
+ if i - 1 >= 0 and word == "儿" and seg[i - 1][0] != "#":
317
+ new_seg[-1][0] = new_seg[-1][0] + seg[i][0]
318
+ else:
319
+ new_seg.append([word, pos])
320
+ return new_seg
321
+
322
+ def _merge_reduplication(
323
+ self, seg: List[Tuple[str, str]]) -> List[Tuple[str, str]]:
324
+ new_seg = []
325
+ for i, (word, pos) in enumerate(seg):
326
+ if new_seg and word == new_seg[-1][0]:
327
+ new_seg[-1][0] = new_seg[-1][0] + seg[i][0]
328
+ else:
329
+ new_seg.append([word, pos])
330
+ return new_seg
331
+
332
+ def pre_merge_for_modify(
333
+ self, seg: List[Tuple[str, str]]) -> List[Tuple[str, str]]:
334
+ seg = self._merge_bu(seg)
335
+ try:
336
+ seg = self._merge_yi(seg)
337
+ except:
338
+ print("_merge_yi failed")
339
+ seg = self._merge_reduplication(seg)
340
+ seg = self._merge_continuous_three_tones(seg)
341
+ seg = self._merge_continuous_three_tones_2(seg)
342
+ seg = self._merge_er(seg)
343
+ return seg
344
+
345
+ def modified_tone(self, word: str, pos: str,
346
+ finals: List[str]) -> List[str]:
347
+ finals = self._bu_sandhi(word, finals)
348
+ finals = self._yi_sandhi(word, finals)
349
+ finals = self._neural_sandhi(word, pos, finals)
350
+ finals = self._three_sandhi(word, finals)
351
+ return finals
bert_vits2/transforms.py ADDED
@@ -0,0 +1,192 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from torch.nn import functional as F
3
+
4
+ import numpy as np
5
+
6
+ DEFAULT_MIN_BIN_WIDTH = 1e-3
7
+ DEFAULT_MIN_BIN_HEIGHT = 1e-3
8
+ DEFAULT_MIN_DERIVATIVE = 1e-3
9
+
10
+
11
+ def piecewise_rational_quadratic_transform(inputs,
12
+ unnormalized_widths,
13
+ unnormalized_heights,
14
+ unnormalized_derivatives,
15
+ inverse=False,
16
+ tails=None,
17
+ tail_bound=1.,
18
+ min_bin_width=DEFAULT_MIN_BIN_WIDTH,
19
+ min_bin_height=DEFAULT_MIN_BIN_HEIGHT,
20
+ min_derivative=DEFAULT_MIN_DERIVATIVE):
21
+ if tails is None:
22
+ spline_fn = rational_quadratic_spline
23
+ spline_kwargs = {}
24
+ else:
25
+ spline_fn = unconstrained_rational_quadratic_spline
26
+ spline_kwargs = {
27
+ 'tails': tails,
28
+ 'tail_bound': tail_bound
29
+ }
30
+
31
+ outputs, logabsdet = spline_fn(
32
+ inputs=inputs,
33
+ unnormalized_widths=unnormalized_widths,
34
+ unnormalized_heights=unnormalized_heights,
35
+ unnormalized_derivatives=unnormalized_derivatives,
36
+ inverse=inverse,
37
+ min_bin_width=min_bin_width,
38
+ min_bin_height=min_bin_height,
39
+ min_derivative=min_derivative,
40
+ **spline_kwargs
41
+ )
42
+ return outputs, logabsdet
43
+
44
+
45
+ def searchsorted(bin_locations, inputs, eps=1e-6):
46
+ bin_locations[..., -1] += eps
47
+ return torch.sum(
48
+ inputs[..., None] >= bin_locations,
49
+ dim=-1
50
+ ) - 1
51
+
52
+
53
+ def unconstrained_rational_quadratic_spline(inputs,
54
+ unnormalized_widths,
55
+ unnormalized_heights,
56
+ unnormalized_derivatives,
57
+ inverse=False,
58
+ tails='linear',
59
+ tail_bound=1.,
60
+ min_bin_width=DEFAULT_MIN_BIN_WIDTH,
61
+ min_bin_height=DEFAULT_MIN_BIN_HEIGHT,
62
+ min_derivative=DEFAULT_MIN_DERIVATIVE):
63
+ inside_interval_mask = (inputs >= -tail_bound) & (inputs <= tail_bound)
64
+ outside_interval_mask = ~inside_interval_mask
65
+
66
+ outputs = torch.zeros_like(inputs)
67
+ logabsdet = torch.zeros_like(inputs)
68
+
69
+ if tails == 'linear':
70
+ unnormalized_derivatives = F.pad(unnormalized_derivatives, pad=(1, 1))
71
+ constant = np.log(np.exp(1 - min_derivative) - 1)
72
+ unnormalized_derivatives[..., 0] = constant
73
+ unnormalized_derivatives[..., -1] = constant
74
+
75
+ outputs[outside_interval_mask] = inputs[outside_interval_mask]
76
+ logabsdet[outside_interval_mask] = 0
77
+ else:
78
+ raise RuntimeError('{} tails are not implemented.'.format(tails))
79
+
80
+ outputs[inside_interval_mask], logabsdet[inside_interval_mask] = rational_quadratic_spline(
81
+ inputs=inputs[inside_interval_mask],
82
+ unnormalized_widths=unnormalized_widths[inside_interval_mask, :],
83
+ unnormalized_heights=unnormalized_heights[inside_interval_mask, :],
84
+ unnormalized_derivatives=unnormalized_derivatives[inside_interval_mask, :],
85
+ inverse=inverse,
86
+ left=-tail_bound, right=tail_bound, bottom=-tail_bound, top=tail_bound,
87
+ min_bin_width=min_bin_width,
88
+ min_bin_height=min_bin_height,
89
+ min_derivative=min_derivative
90
+ )
91
+
92
+ return outputs, logabsdet
93
+
94
+
95
+ def rational_quadratic_spline(inputs,
96
+ unnormalized_widths,
97
+ unnormalized_heights,
98
+ unnormalized_derivatives,
99
+ inverse=False,
100
+ left=0., right=1., bottom=0., top=1.,
101
+ min_bin_width=DEFAULT_MIN_BIN_WIDTH,
102
+ min_bin_height=DEFAULT_MIN_BIN_HEIGHT,
103
+ min_derivative=DEFAULT_MIN_DERIVATIVE):
104
+ if torch.min(inputs) < left or torch.max(inputs) > right:
105
+ raise ValueError('Input to a transform is not within its domain')
106
+
107
+ num_bins = unnormalized_widths.shape[-1]
108
+
109
+ if min_bin_width * num_bins > 1.0:
110
+ raise ValueError('Minimal bin width too large for the number of bins')
111
+ if min_bin_height * num_bins > 1.0:
112
+ raise ValueError('Minimal bin height too large for the number of bins')
113
+
114
+ widths = F.softmax(unnormalized_widths, dim=-1)
115
+ widths = min_bin_width + (1 - min_bin_width * num_bins) * widths
116
+ cumwidths = torch.cumsum(widths, dim=-1)
117
+ cumwidths = F.pad(cumwidths, pad=(1, 0), mode='constant', value=0.0)
118
+ cumwidths = (right - left) * cumwidths + left
119
+ cumwidths[..., 0] = left
120
+ cumwidths[..., -1] = right
121
+ widths = cumwidths[..., 1:] - cumwidths[..., :-1]
122
+
123
+ derivatives = min_derivative + F.softplus(unnormalized_derivatives)
124
+
125
+ heights = F.softmax(unnormalized_heights, dim=-1)
126
+ heights = min_bin_height + (1 - min_bin_height * num_bins) * heights
127
+ cumheights = torch.cumsum(heights, dim=-1)
128
+ cumheights = F.pad(cumheights, pad=(1, 0), mode='constant', value=0.0)
129
+ cumheights = (top - bottom) * cumheights + bottom
130
+ cumheights[..., 0] = bottom
131
+ cumheights[..., -1] = top
132
+ heights = cumheights[..., 1:] - cumheights[..., :-1]
133
+
134
+ if inverse:
135
+ bin_idx = searchsorted(cumheights, inputs)[..., None]
136
+ else:
137
+ bin_idx = searchsorted(cumwidths, inputs)[..., None]
138
+
139
+ input_cumwidths = cumwidths.gather(-1, bin_idx)[..., 0]
140
+ input_bin_widths = widths.gather(-1, bin_idx)[..., 0]
141
+
142
+ input_cumheights = cumheights.gather(-1, bin_idx)[..., 0]
143
+ delta = heights / widths
144
+ input_delta = delta.gather(-1, bin_idx)[..., 0]
145
+
146
+ input_derivatives = derivatives.gather(-1, bin_idx)[..., 0]
147
+ input_derivatives_plus_one = derivatives[..., 1:].gather(-1, bin_idx)[..., 0]
148
+
149
+ input_heights = heights.gather(-1, bin_idx)[..., 0]
150
+
151
+ if inverse:
152
+ a = (((inputs - input_cumheights) * (input_derivatives
153
+ + input_derivatives_plus_one
154
+ - 2 * input_delta)
155
+ + input_heights * (input_delta - input_derivatives)))
156
+ b = (input_heights * input_derivatives
157
+ - (inputs - input_cumheights) * (input_derivatives
158
+ + input_derivatives_plus_one
159
+ - 2 * input_delta))
160
+ c = - input_delta * (inputs - input_cumheights)
161
+
162
+ discriminant = b.pow(2) - 4 * a * c
163
+ assert (discriminant >= 0).all()
164
+
165
+ root = (2 * c) / (-b - torch.sqrt(discriminant))
166
+ outputs = root * input_bin_widths + input_cumwidths
167
+
168
+ theta_one_minus_theta = root * (1 - root)
169
+ denominator = input_delta + ((input_derivatives + input_derivatives_plus_one - 2 * input_delta)
170
+ * theta_one_minus_theta)
171
+ derivative_numerator = input_delta.pow(2) * (input_derivatives_plus_one * root.pow(2)
172
+ + 2 * input_delta * theta_one_minus_theta
173
+ + input_derivatives * (1 - root).pow(2))
174
+ logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator)
175
+
176
+ return outputs, -logabsdet
177
+ else:
178
+ theta = (inputs - input_cumwidths) / input_bin_widths
179
+ theta_one_minus_theta = theta * (1 - theta)
180
+
181
+ numerator = input_heights * (input_delta * theta.pow(2)
182
+ + input_derivatives * theta_one_minus_theta)
183
+ denominator = input_delta + ((input_derivatives + input_derivatives_plus_one - 2 * input_delta)
184
+ * theta_one_minus_theta)
185
+ outputs = input_cumheights + numerator / denominator
186
+
187
+ derivative_numerator = input_delta.pow(2) * (input_derivatives_plus_one * theta.pow(2)
188
+ + 2 * input_delta * theta_one_minus_theta
189
+ + input_derivatives * (1 - theta).pow(2))
190
+ logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator)
191
+
192
+ return outputs, logabsdet
bert_vits2/utils.py ADDED
@@ -0,0 +1,292 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import glob
3
+ import sys
4
+ import argparse
5
+ import logging
6
+ import json
7
+ import subprocess
8
+ import numpy as np
9
+ from scipy.io.wavfile import read
10
+ import torch
11
+
12
+ MATPLOTLIB_FLAG = False
13
+
14
+ logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
15
+ logger = logging
16
+
17
+
18
+ def load_checkpoint(checkpoint_path, model, optimizer=None, skip_optimizer=False):
19
+ assert os.path.isfile(checkpoint_path)
20
+ checkpoint_dict = torch.load(checkpoint_path, map_location='cpu')
21
+ iteration = checkpoint_dict['iteration']
22
+ learning_rate = checkpoint_dict['learning_rate']
23
+ if optimizer is not None and not skip_optimizer and checkpoint_dict['optimizer'] is not None:
24
+ optimizer.load_state_dict(checkpoint_dict['optimizer'])
25
+ elif optimizer is None and not skip_optimizer:
26
+ # else: #Disable this line if Infer ,and enable the line upper
27
+ new_opt_dict = optimizer.state_dict()
28
+ new_opt_dict_params = new_opt_dict['param_groups'][0]['params']
29
+ new_opt_dict['param_groups'] = checkpoint_dict['optimizer']['param_groups']
30
+ new_opt_dict['param_groups'][0]['params'] = new_opt_dict_params
31
+ optimizer.load_state_dict(new_opt_dict)
32
+ saved_state_dict = checkpoint_dict['model']
33
+ if hasattr(model, 'module'):
34
+ state_dict = model.module.state_dict()
35
+ else:
36
+ state_dict = model.state_dict()
37
+ new_state_dict = {}
38
+ for k, v in state_dict.items():
39
+ try:
40
+ # assert "emb_g" not in k
41
+ # print("load", k)
42
+ new_state_dict[k] = saved_state_dict[k]
43
+ assert saved_state_dict[k].shape == v.shape, (saved_state_dict[k].shape, v.shape)
44
+ except:
45
+ print("error, %s is not in the checkpoint" % k)
46
+ new_state_dict[k] = v
47
+ if hasattr(model, 'module'):
48
+ model.module.load_state_dict(new_state_dict, strict=False)
49
+ else:
50
+ model.load_state_dict(new_state_dict, strict=False)
51
+ # print("load ")
52
+ logger.info("Loaded checkpoint '{}' (iteration {})".format(
53
+ checkpoint_path, iteration))
54
+ return model, optimizer, learning_rate, iteration
55
+
56
+
57
+ def save_checkpoint(model, optimizer, learning_rate, iteration, checkpoint_path):
58
+ logger.info("Saving model and optimizer state at iteration {} to {}".format(
59
+ iteration, checkpoint_path))
60
+ if hasattr(model, 'module'):
61
+ state_dict = model.module.state_dict()
62
+ else:
63
+ state_dict = model.state_dict()
64
+ torch.save({'model': state_dict,
65
+ 'iteration': iteration,
66
+ 'optimizer': optimizer.state_dict(),
67
+ 'learning_rate': learning_rate}, checkpoint_path)
68
+
69
+
70
+ def summarize(writer, global_step, scalars={}, histograms={}, images={}, audios={}, audio_sampling_rate=22050):
71
+ for k, v in scalars.items():
72
+ writer.add_scalar(k, v, global_step)
73
+ for k, v in histograms.items():
74
+ writer.add_histogram(k, v, global_step)
75
+ for k, v in images.items():
76
+ writer.add_image(k, v, global_step, dataformats='HWC')
77
+ for k, v in audios.items():
78
+ writer.add_audio(k, v, global_step, audio_sampling_rate)
79
+
80
+
81
+ def latest_checkpoint_path(dir_path, regex="G_*.pth"):
82
+ f_list = glob.glob(os.path.join(dir_path, regex))
83
+ f_list.sort(key=lambda f: int("".join(filter(str.isdigit, f))))
84
+ x = f_list[-1]
85
+ print(x)
86
+ return x
87
+
88
+
89
+ def plot_spectrogram_to_numpy(spectrogram):
90
+ global MATPLOTLIB_FLAG
91
+ if not MATPLOTLIB_FLAG:
92
+ import matplotlib
93
+ matplotlib.use("Agg")
94
+ MATPLOTLIB_FLAG = True
95
+ mpl_logger = logging.getLogger('matplotlib')
96
+ mpl_logger.setLevel(logging.WARNING)
97
+ import matplotlib.pylab as plt
98
+ import numpy as np
99
+
100
+ fig, ax = plt.subplots(figsize=(10, 2))
101
+ im = ax.imshow(spectrogram, aspect="auto", origin="lower",
102
+ interpolation='none')
103
+ plt.colorbar(im, ax=ax)
104
+ plt.xlabel("Frames")
105
+ plt.ylabel("Channels")
106
+ plt.tight_layout()
107
+
108
+ fig.canvas.draw()
109
+ data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='')
110
+ data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,))
111
+ plt.close()
112
+ return data
113
+
114
+
115
+ def plot_alignment_to_numpy(alignment, info=None):
116
+ global MATPLOTLIB_FLAG
117
+ if not MATPLOTLIB_FLAG:
118
+ import matplotlib
119
+ matplotlib.use("Agg")
120
+ MATPLOTLIB_FLAG = True
121
+ mpl_logger = logging.getLogger('matplotlib')
122
+ mpl_logger.setLevel(logging.WARNING)
123
+ import matplotlib.pylab as plt
124
+ import numpy as np
125
+
126
+ fig, ax = plt.subplots(figsize=(6, 4))
127
+ im = ax.imshow(alignment.transpose(), aspect='auto', origin='lower',
128
+ interpolation='none')
129
+ fig.colorbar(im, ax=ax)
130
+ xlabel = 'Decoder timestep'
131
+ if info is not None:
132
+ xlabel += '\n\n' + info
133
+ plt.xlabel(xlabel)
134
+ plt.ylabel('Encoder timestep')
135
+ plt.tight_layout()
136
+
137
+ fig.canvas.draw()
138
+ data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='')
139
+ data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,))
140
+ plt.close()
141
+ return data
142
+
143
+
144
+ def load_wav_to_torch(full_path):
145
+ sampling_rate, data = read(full_path)
146
+ return torch.FloatTensor(data.astype(np.float32)), sampling_rate
147
+
148
+
149
+ def load_filepaths_and_text(filename, split="|"):
150
+ with open(filename, encoding='utf-8') as f:
151
+ filepaths_and_text = [line.strip().split(split) for line in f]
152
+ return filepaths_and_text
153
+
154
+
155
+ def get_hparams(init=True):
156
+ parser = argparse.ArgumentParser()
157
+ parser.add_argument('-c', '--config', type=str, default="./configs/base.json",
158
+ help='JSON file for configuration')
159
+ parser.add_argument('-m', '--model', type=str, required=True,
160
+ help='Model name')
161
+
162
+ args = parser.parse_args()
163
+ model_dir = os.path.join("./logs", args.model)
164
+
165
+ if not os.path.exists(model_dir):
166
+ os.makedirs(model_dir)
167
+
168
+ config_path = args.config
169
+ config_save_path = os.path.join(model_dir, "config.json")
170
+ if init:
171
+ with open(config_path, "r") as f:
172
+ data = f.read()
173
+ with open(config_save_path, "w") as f:
174
+ f.write(data)
175
+ else:
176
+ with open(config_save_path, "r") as f:
177
+ data = f.read()
178
+ config = json.loads(data)
179
+
180
+ hparams = HParams(**config)
181
+ hparams.model_dir = model_dir
182
+ return hparams
183
+
184
+
185
+ def clean_checkpoints(path_to_models='logs/44k/', n_ckpts_to_keep=2, sort_by_time=True):
186
+ """Freeing up space by deleting saved ckpts
187
+
188
+ Arguments:
189
+ path_to_models -- Path to the model directory
190
+ n_ckpts_to_keep -- Number of ckpts to keep, excluding G_0.pth and D_0.pth
191
+ sort_by_time -- True -> chronologically delete ckpts
192
+ False -> lexicographically delete ckpts
193
+ """
194
+ import re
195
+ ckpts_files = [f for f in os.listdir(path_to_models) if os.path.isfile(os.path.join(path_to_models, f))]
196
+ name_key = (lambda _f: int(re.compile('._(\d+)\.pth').match(_f).group(1)))
197
+ time_key = (lambda _f: os.path.getmtime(os.path.join(path_to_models, _f)))
198
+ sort_key = time_key if sort_by_time else name_key
199
+ x_sorted = lambda _x: sorted([f for f in ckpts_files if f.startswith(_x) and not f.endswith('_0.pth')],
200
+ key=sort_key)
201
+ to_del = [os.path.join(path_to_models, fn) for fn in
202
+ (x_sorted('G')[:-n_ckpts_to_keep] + x_sorted('D')[:-n_ckpts_to_keep])]
203
+ del_info = lambda fn: logger.info(f".. Free up space by deleting ckpt {fn}")
204
+ del_routine = lambda x: [os.remove(x), del_info(x)]
205
+ rs = [del_routine(fn) for fn in to_del]
206
+
207
+
208
+ def get_hparams_from_dir(model_dir):
209
+ config_save_path = os.path.join(model_dir, "config.json")
210
+ with open(config_save_path, "r", encoding='utf-8') as f:
211
+ data = f.read()
212
+ config = json.loads(data)
213
+
214
+ hparams = HParams(**config)
215
+ hparams.model_dir = model_dir
216
+ return hparams
217
+
218
+
219
+ def get_hparams_from_file(config_path):
220
+ with open(config_path, "r", encoding='utf-8') as f:
221
+ data = f.read()
222
+ config = json.loads(data)
223
+
224
+ hparams = HParams(**config)
225
+ return hparams
226
+
227
+
228
+ def check_git_hash(model_dir):
229
+ source_dir = os.path.dirname(os.path.realpath(__file__))
230
+ if not os.path.exists(os.path.join(source_dir, ".git")):
231
+ logger.warn("{} is not a git repository, therefore hash value comparison will be ignored.".format(
232
+ source_dir
233
+ ))
234
+ return
235
+
236
+ cur_hash = subprocess.getoutput("git rev-parse HEAD")
237
+
238
+ path = os.path.join(model_dir, "githash")
239
+ if os.path.exists(path):
240
+ saved_hash = open(path).read()
241
+ if saved_hash != cur_hash:
242
+ logger.warn("git hash values are different. {}(saved) != {}(current)".format(
243
+ saved_hash[:8], cur_hash[:8]))
244
+ else:
245
+ open(path, "w").write(cur_hash)
246
+
247
+
248
+ def get_logger(model_dir, filename="train.log"):
249
+ global logger
250
+ logger = logging.getLogger(os.path.basename(model_dir))
251
+ logger.setLevel(logging.DEBUG)
252
+
253
+ formatter = logging.Formatter("%(asctime)s\t%(name)s\t%(levelname)s\t%(message)s")
254
+ if not os.path.exists(model_dir):
255
+ os.makedirs(model_dir)
256
+ h = logging.FileHandler(os.path.join(model_dir, filename))
257
+ h.setLevel(logging.DEBUG)
258
+ h.setFormatter(formatter)
259
+ logger.addHandler(h)
260
+ return logger
261
+
262
+
263
+ class HParams():
264
+ def __init__(self, **kwargs):
265
+ for k, v in kwargs.items():
266
+ if type(v) == dict:
267
+ v = HParams(**v)
268
+ self[k] = v
269
+
270
+ def keys(self):
271
+ return self.__dict__.keys()
272
+
273
+ def items(self):
274
+ return self.__dict__.items()
275
+
276
+ def values(self):
277
+ return self.__dict__.values()
278
+
279
+ def __len__(self):
280
+ return len(self.__dict__)
281
+
282
+ def __getitem__(self, key):
283
+ return getattr(self, key)
284
+
285
+ def __setitem__(self, key, value):
286
+ return setattr(self, key, value)
287
+
288
+ def __contains__(self, key):
289
+ return key in self.__dict__
290
+
291
+ def __repr__(self):
292
+ return self.__dict__.__repr__()
chinese_dialect_lexicons/changzhou.json DELETED
@@ -1,23 +0,0 @@
1
- {
2
- "name": "Changzhou dialect to IPA",
3
- "segmentation": {
4
- "type": "mmseg",
5
- "dict": {
6
- "type": "ocd2",
7
- "file": "changzhou.ocd2"
8
- }
9
- },
10
- "conversion_chain": [
11
- {
12
- "dict": {
13
- "type": "group",
14
- "dicts": [
15
- {
16
- "type": "ocd2",
17
- "file": "changzhou.ocd2"
18
- }
19
- ]
20
- }
21
- }
22
- ]
23
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
chinese_dialect_lexicons/changzhou.ocd2 DELETED
Binary file (96.1 kB)
 
chinese_dialect_lexicons/changzhou_3.json DELETED
@@ -1,23 +0,0 @@
1
- {
2
- "name": "Changzhou dialect to IPA",
3
- "segmentation": {
4
- "type": "mmseg",
5
- "dict": {
6
- "type": "ocd2",
7
- "file": "changzhou.ocd2"
8
- }
9
- },
10
- "conversion_chain": [
11
- {
12
- "dict": {
13
- "type": "group",
14
- "dicts": [
15
- {
16
- "type": "ocd2",
17
- "file": "changzhou.ocd2"
18
- }
19
- ]
20
- }
21
- }
22
- ]
23
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
chinese_dialect_lexicons/changzhou_3.ocd2 DELETED
Binary file (96.1 kB)
 
chinese_dialect_lexicons/cixi_2.json DELETED
@@ -1,23 +0,0 @@
1
- {
2
- "name": "Cixi dialect to IPA",
3
- "segmentation": {
4
- "type": "mmseg",
5
- "dict": {
6
- "type": "ocd2",
7
- "file": "cixi.ocd2"
8
- }
9
- },
10
- "conversion_chain": [
11
- {
12
- "dict": {
13
- "type": "group",
14
- "dicts": [
15
- {
16
- "type": "ocd2",
17
- "file": "cixi.ocd2"
18
- }
19
- ]
20
- }
21
- }
22
- ]
23
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
chinese_dialect_lexicons/cixi_2.ocd2 DELETED
Binary file (98 kB)
 
chinese_dialect_lexicons/fuyang_2.json DELETED
@@ -1,23 +0,0 @@
1
- {
2
- "name": "Fuyang dialect to IPA",
3
- "segmentation": {
4
- "type": "mmseg",
5
- "dict": {
6
- "type": "ocd2",
7
- "file": "fuyang.ocd2"
8
- }
9
- },
10
- "conversion_chain": [
11
- {
12
- "dict": {
13
- "type": "group",
14
- "dicts": [
15
- {
16
- "type": "ocd2",
17
- "file": "fuyang.ocd2"
18
- }
19
- ]
20
- }
21
- }
22
- ]
23
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
chinese_dialect_lexicons/fuyang_2.ocd2 DELETED
Binary file (83.7 kB)
 
chinese_dialect_lexicons/hangzhou_2.json DELETED
@@ -1,19 +0,0 @@
1
- {
2
- "name": "Hangzhounese to IPA",
3
- "segmentation": {
4
- "type": "mmseg",
5
- "dict": {
6
- "type": "ocd2",
7
- "file": "hangzhou.ocd2"
8
- }
9
- },
10
- "conversion_chain": [{
11
- "dict": {
12
- "type": "group",
13
- "dicts": [{
14
- "type": "ocd2",
15
- "file": "hangzhou.ocd2"
16
- }]
17
- }
18
- }]
19
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
chinese_dialect_lexicons/hangzhou_2.ocd2 DELETED
Binary file (427 kB)