Alpaca233 Artrajz commited on
Commit
a53e7d9
0 Parent(s):

Duplicate from Artrajz/vits-simple-api

Browse files

Co-authored-by: Artrajz <Artrajz@users.noreply.huggingface.co>

This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +37 -0
  2. Dockerfile +37 -0
  3. LICENSE +21 -0
  4. LICENSE-MoeGoe +21 -0
  5. Model/Bishojo_Mangekyo/config_mangekyo.json +36 -0
  6. Model/Bishojo_Mangekyo/generator_mangekyo.pth +3 -0
  7. Model/Cantonese/config.json +35 -0
  8. Model/Cantonese/model.pth +3 -0
  9. Model/Nene_Nanami_Rong_Tang/1374_epochs.pth +3 -0
  10. Model/Nene_Nanami_Rong_Tang/config.json +35 -0
  11. Model/genshin/G_953000.pth +3 -0
  12. Model/genshin/config.json +55 -0
  13. Model/hubert-soft-0d54a1f4.pt +3 -0
  14. Model/louise/360_epochs.pth +3 -0
  15. Model/louise/config.json +32 -0
  16. Model/model.onnx +3 -0
  17. Model/model.yaml +8 -0
  18. Model/npy/all_emotions.npy +3 -0
  19. Model/paimon/paimon6k.json +55 -0
  20. Model/paimon/paimon6k_390000.pth +3 -0
  21. Model/shanghainese/2796_epochs.pth +3 -0
  22. Model/shanghainese/config.json +35 -0
  23. Model/vctk/pretrained_vctk.pth +3 -0
  24. Model/vctk/vctk_base.json +55 -0
  25. Model/vits_chinese/bert_vits.json +55 -0
  26. Model/vits_chinese/vits_bert_model.pth +3 -0
  27. Model/w2v2-vits/1026_epochs.pth +3 -0
  28. Model/w2v2-vits/config.json +36 -0
  29. README.md +9 -0
  30. README_zh.md +626 -0
  31. app.py +474 -0
  32. attentions.py +300 -0
  33. bert/ProsodyModel.py +75 -0
  34. bert/__init__.py +2 -0
  35. bert/config.json +19 -0
  36. bert/prosody_model.pt +3 -0
  37. bert/prosody_tool.py +426 -0
  38. bert/vocab.txt +0 -0
  39. chinese_dialect_lexicons/changzhou.json +23 -0
  40. chinese_dialect_lexicons/changzhou.ocd2 +0 -0
  41. chinese_dialect_lexicons/changzhou_3.json +23 -0
  42. chinese_dialect_lexicons/changzhou_3.ocd2 +0 -0
  43. chinese_dialect_lexicons/cixi_2.json +23 -0
  44. chinese_dialect_lexicons/cixi_2.ocd2 +0 -0
  45. chinese_dialect_lexicons/fuyang_2.json +23 -0
  46. chinese_dialect_lexicons/fuyang_2.ocd2 +0 -0
  47. chinese_dialect_lexicons/hangzhou_2.json +19 -0
  48. chinese_dialect_lexicons/hangzhou_2.ocd2 +0 -0
  49. chinese_dialect_lexicons/jiading_2.json +23 -0
  50. chinese_dialect_lexicons/jiading_2.ocd2 +0 -0
.gitattributes ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tflite filter=lfs diff=lfs merge=lfs -text
29
+ *.tgz filter=lfs diff=lfs merge=lfs -text
30
+ *.wasm filter=lfs diff=lfs merge=lfs -text
31
+ *.xz filter=lfs diff=lfs merge=lfs -text
32
+ *.zip filter=lfs diff=lfs merge=lfs -text
33
+ *.zst filter=lfs diff=lfs merge=lfs -text
34
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
35
+ chinese_dialect_lexicons/jyutjyu_2.ocd2 filter=lfs diff=lfs merge=lfs -text
36
+ chinese_dialect_lexicons/zaonhe_2.ocd2 filter=lfs diff=lfs merge=lfs -text
37
+ chinese_dialect_lexicons/zaonhe.ocd2 filter=lfs diff=lfs merge=lfs -text
Dockerfile ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ FROM python:3.10.11-slim-bullseye
2
+
3
+ RUN mkdir -p /app
4
+ WORKDIR /app
5
+
6
+ ENV DEBIAN_FRONTEND=noninteractive
7
+
8
+ RUN apt-get update && \
9
+ apt install build-essential -yq && \
10
+ apt install espeak-ng -yq && \
11
+ apt install cmake -yq && \
12
+ apt install -y wget -yq && \
13
+ apt-get clean && \
14
+ apt-get purge -y --auto-remove -o APT::AutoRemove::RecommendsImportant=false && \
15
+ rm -rf /var/lib/apt/lists/*
16
+
17
+ RUN pip install MarkupSafe==2.1.2 numpy==1.23.3 cython six==1.16.0
18
+
19
+ RUN wget https://raw.githubusercontent.com/Artrajz/archived/main/openjtalk/openjtalk-0.3.0.dev2.tar.gz && \
20
+ tar -zxvf openjtalk-0.3.0.dev2.tar.gz && \
21
+ cd openjtalk-0.3.0.dev2 && \
22
+ rm -rf ./pyopenjtalk/open_jtalk_dic_utf_8-1.11 && \
23
+ python setup.py install && \
24
+ cd ../ && \
25
+ rm -f openjtalk-0.3.0.dev2.tar.gz && \
26
+ rm -rf openjtalk-0.3.0.dev2
27
+
28
+ RUN pip install torch --index-url https://download.pytorch.org/whl/cpu
29
+
30
+ COPY requirements.txt /app
31
+ RUN pip install -r requirements.txt
32
+
33
+ COPY . /app
34
+
35
+ EXPOSE 23456
36
+
37
+ CMD ["python", "/app/app.py"]
LICENSE ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ MIT License
2
+
3
+ Copyright (c) 2023 Artrajz
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
LICENSE-MoeGoe ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ MIT License
2
+
3
+ Copyright (c) 2022 CjangCjengh
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
Model/Bishojo_Mangekyo/config_mangekyo.json ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "train": {
3
+ "segment_size": 8192
4
+ },
5
+ "data": {
6
+ "text_cleaners":["japanese_cleaners"],
7
+ "max_wav_value": 32768.0,
8
+ "sampling_rate": 22050,
9
+ "filter_length": 1024,
10
+ "hop_length": 256,
11
+ "win_length": 1024,
12
+ "add_blank": true,
13
+ "n_speakers": 6
14
+ },
15
+ "model": {
16
+ "inter_channels": 192,
17
+ "hidden_channels": 192,
18
+ "filter_channels": 768,
19
+ "n_heads": 2,
20
+ "n_layers": 6,
21
+ "kernel_size": 3,
22
+ "p_dropout": 0.1,
23
+ "resblock": "1",
24
+ "resblock_kernel_sizes": [3,7,11],
25
+ "resblock_dilation_sizes": [[1,3,5], [1,3,5], [1,3,5]],
26
+ "upsample_rates": [8,8,2,2],
27
+ "upsample_initial_channel": 512,
28
+ "upsample_kernel_sizes": [16,16,4,4],
29
+ "n_layers_q": 3,
30
+ "use_spectral_norm": false,
31
+ "gin_channels": 256
32
+ },
33
+ "speakers": ["\u84ee\u83ef", "\u7bdd\u30ce\u9727\u679d", "\u6ca2\u6e21\u96eb", "\u4e9c\u7483\u5b50", "\u706f\u9732\u690e", "\u89a1\u5915\u8389"],
34
+ "symbols": ["_", ",", ".", "!", "?", "-", "~","A", "E", "I", "N", "O", "Q", "U", "a", "b", "d", "e", "f", "g", "h", "i", "j", "k", "m", "n", "o", "p", "r", "s", "t", "u", "v", "w", "y", "z", "\u0283", "\u02a7", "\u2193", "\u2191", " "]
35
+ }
36
+
Model/Bishojo_Mangekyo/generator_mangekyo.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0acae1546bdda967fff35eba4e001c9d4d854d57c83678eb1575160a91a9b6fd
3
+ size 158893717
Model/Cantonese/config.json ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "train": {
3
+ "segment_size": 8192
4
+ },
5
+ "data": {
6
+ "text_cleaners":["chinese_dialect_cleaners"],
7
+ "max_wav_value": 32768.0,
8
+ "sampling_rate": 22050,
9
+ "filter_length": 1024,
10
+ "hop_length": 256,
11
+ "win_length": 1024,
12
+ "add_blank": true,
13
+ "n_speakers": 50
14
+ },
15
+ "model": {
16
+ "inter_channels": 192,
17
+ "hidden_channels": 192,
18
+ "filter_channels": 768,
19
+ "n_heads": 2,
20
+ "n_layers": 6,
21
+ "kernel_size": 3,
22
+ "p_dropout": 0.1,
23
+ "resblock": "1",
24
+ "resblock_kernel_sizes": [3,7,11],
25
+ "resblock_dilation_sizes": [[1,3,5], [1,3,5], [1,3,5]],
26
+ "upsample_rates": [8,8,2,2],
27
+ "upsample_initial_channel": 512,
28
+ "upsample_kernel_sizes": [16,16,4,4],
29
+ "n_layers_q": 3,
30
+ "use_spectral_norm": false,
31
+ "gin_channels": 256
32
+ },
33
+ "speakers": ["\u5e03\u826f\u6893(cantonese)", "\u7dbe\u5730\u5be7\u3005(cantonese)", "\u671d\u6b66\u82b3\u4e43(cantonese)", "\u5728\u539f\u4e03\u6d77(cantonese)", "\u30e6\u30fc\u30b9\u30c6\u30a3\u30a2(cantonese)", "\u30b3\u30ec\u30c3\u30c8(cantonese)", "\u30ea\u30b7\u30a2(cantonese)", "\u30ab\u30a4\u30e0(cantonese)", "\u30eb\u30a4\u30ba(cantonese)", "\u3064\u304f\u3088\u307f\u3061\u3083\u3093(cantonese)", "\u83f2\u5442\u83c8(cantonese)", "\u8b1d\u5b50\u81e3(cantonese)", "\u96ea\u898b(cantonese)", "\u590f\u828a\u5e06(cantonese)", "\u7f85\u5c11\u5cf0(cantonese)", "\u8b1d\u5b50\u7487(cantonese)", "\u6960\u5e0c\u59d0(cantonese)", "\u8389\u8389(cantonese)", "\u5c0f\u8338(cantonese)", "\u5510\u4e50\u541f(cantonese)", "\u5c0f\u6bb7(cantonese)", "\u82b1\u73b2(cantonese)", "\u6d77\u8bcd\u4e0a\u6d77\u8bdd(cantonese)", "\u6d77\u8bcd\u5e7f\u4e1c\u8bdd(cantonese)"],
34
+ "symbols": ["_", ",", ".", "!", "?", "~", "\u2026", "\u2500", "#", "N", "a", "b", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "r", "s", "t", "u", "v", "w", "x", "y", "z", "\u00e6", "\u00e7", "\u00f8", "\u014b", "\u0153", "\u0235", "\u0250", "\u0251", "\u0252", "\u0253", "\u0254", "\u0255", "\u0257", "\u0258", "\u0259", "\u025a", "\u025b", "\u025c", "\u0263", "\u0264", "\u0266", "\u026a", "\u026d", "\u026f", "\u0275", "\u0277", "\u0278", "\u027b", "\u027e", "\u027f", "\u0282", "\u0285", "\u028a", "\u028b", "\u028c", "\u028f", "\u0291", "\u0294", "\u02a6", "\u02ae", "\u02b0", "\u02b7", "\u02c0", "\u02d0", "\u02e5", "\u02e6", "\u02e7", "\u02e8", "\u02e9", "\u0303", "\u031a", "\u0325", "\u0329", "\u1d00", "\u1d07", "\u2191", "\u2193", "\u2205", "\u2c7c", " "]
35
+ }
Model/Cantonese/model.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5bafc0ad64442808ccbdc1c880846d4d7ed30e5db6b9c68982bade0070e135a9
3
+ size 158966349
Model/Nene_Nanami_Rong_Tang/1374_epochs.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:edfb6b428c398fab83a85b5ae41e13cb5a9f7be12692129e8a880d4553701f7b
3
+ size 158888013
Model/Nene_Nanami_Rong_Tang/config.json ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "train": {
3
+ "segment_size": 8192
4
+ },
5
+ "data": {
6
+ "text_cleaners":["zh_ja_mixture_cleaners"],
7
+ "max_wav_value": 32768.0,
8
+ "sampling_rate": 22050,
9
+ "filter_length": 1024,
10
+ "hop_length": 256,
11
+ "win_length": 1024,
12
+ "add_blank": true,
13
+ "n_speakers": 5
14
+ },
15
+ "model": {
16
+ "inter_channels": 192,
17
+ "hidden_channels": 192,
18
+ "filter_channels": 768,
19
+ "n_heads": 2,
20
+ "n_layers": 6,
21
+ "kernel_size": 3,
22
+ "p_dropout": 0.1,
23
+ "resblock": "1",
24
+ "resblock_kernel_sizes": [3,7,11],
25
+ "resblock_dilation_sizes": [[1,3,5], [1,3,5], [1,3,5]],
26
+ "upsample_rates": [8,8,2,2],
27
+ "upsample_initial_channel": 512,
28
+ "upsample_kernel_sizes": [16,16,4,4],
29
+ "n_layers_q": 3,
30
+ "use_spectral_norm": false,
31
+ "gin_channels": 256
32
+ },
33
+ "speakers": ["\u7dbe\u5730\u5be7\u3005", "\u5728\u539f\u4e03\u6d77", "\u5c0f\u8338", "\u5510\u4e50\u541f"],
34
+ "symbols": ["_", ",", ".", "!", "?", "-", "~", "\u2026", "A", "E", "I", "N", "O", "Q", "U", "a", "b", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "r", "s", "t", "u", "v", "w", "y", "z", "\u0283", "\u02a7", "\u02a6", "\u026f", "\u0279", "\u0259", "\u0265", "\u207c", "\u02b0", "`", "\u2192", "\u2193", "\u2191", " "]
35
+ }
Model/genshin/G_953000.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:03017f9a30580eb9103bc892a98299ed702f114d821146aa4b550e5ca724923e
3
+ size 159716737
Model/genshin/config.json ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "train": {
3
+ "log_interval": 200,
4
+ "eval_interval": 1000,
5
+ "seed": 1234,
6
+ "epochs": 10000,
7
+ "learning_rate": 2e-4,
8
+ "betas": [0.8, 0.99],
9
+ "eps": 1e-9,
10
+ "batch_size": 64,
11
+ "fp16_run": true,
12
+ "lr_decay": 0.999875,
13
+ "segment_size": 8192,
14
+ "init_lr_ratio": 1,
15
+ "warmup_epochs": 0,
16
+ "c_mel": 45,
17
+ "c_kl": 1.0
18
+ },
19
+ "data": {
20
+ "training_files":"filelists/uma_genshin_genshinjp_bh3_train.txt.cleaned",
21
+ "validation_files":"filelists/uma_genshin_genshinjp_bh3_val.txt.cleaned",
22
+ "text_cleaners":["zh_ja_mixture_cleaners"],
23
+ "max_wav_value": 32768.0,
24
+ "sampling_rate": 22050,
25
+ "filter_length": 1024,
26
+ "hop_length": 256,
27
+ "win_length": 1024,
28
+ "n_mel_channels": 80,
29
+ "mel_fmin": 0.0,
30
+ "mel_fmax": null,
31
+ "add_blank": true,
32
+ "n_speakers": 804,
33
+ "cleaned_text": true
34
+ },
35
+ "model": {
36
+ "inter_channels": 192,
37
+ "hidden_channels": 192,
38
+ "filter_channels": 768,
39
+ "n_heads": 2,
40
+ "n_layers": 6,
41
+ "kernel_size": 3,
42
+ "p_dropout": 0.1,
43
+ "resblock": "1",
44
+ "resblock_kernel_sizes": [3,7,11],
45
+ "resblock_dilation_sizes": [[1,3,5], [1,3,5], [1,3,5]],
46
+ "upsample_rates": [8,8,2,2],
47
+ "upsample_initial_channel": 512,
48
+ "upsample_kernel_sizes": [16,16,4,4],
49
+ "n_layers_q": 3,
50
+ "use_spectral_norm": false,
51
+ "gin_channels": 256
52
+ },
53
+ "speakers": ["\u7279\u522b\u5468", "\u65e0\u58f0\u94c3\u9e7f", "\u4e1c\u6d77\u5e1d\u7687\uff08\u5e1d\u5b9d\uff0c\u5e1d\u738b\uff09", "\u4e38\u5584\u65af\u57fa", "\u5bcc\u58eb\u5947\u8ff9", "\u5c0f\u6817\u5e3d", "\u9ec4\u91d1\u8239", "\u4f0f\u7279\u52a0", "\u5927\u548c\u8d64\u9aa5", "\u5927\u6811\u5feb\u8f66", "\u8349\u4e0a\u98de", "\u83f1\u4e9a\u9a6c\u900a", "\u76ee\u767d\u9ea6\u6606", "\u795e\u9e70", "\u597d\u6b4c\u5267", "\u6210\u7530\u767d\u4ec1", "\u9c81\u9053\u592b\u8c61\u5f81\uff08\u7687\u5e1d\uff09", "\u6c14\u69fd", "\u7231\u4e3d\u6570\u7801", "\u661f\u4e91\u5929\u7a7a", "\u7389\u85fb\u5341\u5b57", "\u7f8e\u5999\u59ff\u52bf", "\u7435\u7436\u6668\u5149", "\u6469\u8036\u91cd\u70ae", "\u66fc\u57ce\u8336\u5ea7", "\u7f8e\u6d66\u6ce2\u65c1", "\u76ee\u767d\u8d56\u6069", "\u83f1\u66d9", "\u96ea\u4e2d\u7f8e\u4eba", "\u7c73\u6d74", "\u827e\u5c3c\u65af\u98ce\u795e", "\u7231\u4e3d\u901f\u5b50\uff08\u7231\u4e3d\u5feb\u5b50\uff09", "\u7231\u6155\u7ec7\u59ec", "\u7a3b\u8377\u4e00", "\u80dc\u5229\u5956\u5238", "\u7a7a\u4e2d\u795e\u5bab", "\u8363\u8fdb\u95ea\u8000", "\u771f\u673a\u4f36", "\u5ddd\u4e0a\u516c\u4e3b", "\u9ec4\u91d1\u57ce\uff08\u9ec4\u91d1\u57ce\u5e02\uff09", "\u6a31\u82b1\u8fdb\u738b", "\u91c7\u73e0", "\u65b0\u5149\u98ce", "\u4e1c\u5546\u53d8\u9769", "\u8d85\u7ea7\u5c0f\u6d77\u6e7e", "\u9192\u76ee\u98de\u9e70\uff08\u5bc4\u5bc4\u5b50\uff09", "\u8352\u6f20\u82f1\u96c4", "\u4e1c\u701b\u4f50\u6566", "\u4e2d\u5c71\u5e86\u5178", "\u6210\u7530\u5927\u8fdb", "\u897f\u91ce\u82b1", "\u6625\u4e3d\uff08\u4e4c\u62c9\u62c9\uff09", "\u9752\u7af9\u56de\u5fc6", "\u5fae\u5149\u98de\u9a79", "\u7f8e\u4e3d\u5468\u65e5", "\u5f85\u517c\u798f\u6765", "mr cb\uff08cb\u5148\u751f\uff09", "\u540d\u5c06\u6012\u6d9b\uff08\u540d\u5c06\u6237\u4ec1\uff09", "\u76ee\u767d\u591a\u4f2f", "\u4f18\u79c0\u7d20\u8d28", "\u5e1d\u738b\u5149\u8f89", "\u5f85\u517c\u8bd7\u6b4c\u5267", "\u751f\u91ce\u72c4\u675c\u65af", "\u76ee\u767d\u5584\u4fe1", "\u5927\u62d3\u592a\u9633\u795e", "\u53cc\u6da1\u8f6e\uff08\u4e24\u7acb\u76f4\uff0c\u4e24\u55b7\u5c04\uff0c\u4e8c\u9505\u5934\uff0c\u9006\u55b7\u5c04\uff09", "\u91cc\u89c1\u5149\u94bb\uff08\u8428\u6258\u8bfa\u91d1\u521a\u77f3\uff09", "\u5317\u90e8\u7384\u9a79", "\u6a31\u82b1\u5343\u4ee3\u738b", "\u5929\u72fc\u661f\u8c61\u5f81", "\u76ee\u767d\u963f\u5c14\u4e39", "\u516b\u91cd\u65e0\u654c", "\u9e64\u4e38\u521a\u5fd7", "\u76ee\u767d\u5149\u660e", "\u6210\u7530\u62dc\u4ec1\uff08\u6210\u7530\u8def\uff09", "\u4e5f\u6587\u6444\u8f89", "\u5c0f\u6797\u5386\u5947", "\u5317\u6e2f\u706b\u5c71", "\u5947\u9510\u9a8f", "\u82e6\u6da9\u7cd6\u971c", "\u5c0f\u5c0f\u8695\u8327", "\u9a8f\u5ddd\u624b\u7eb2\uff08\u7eff\u5e3d\u6076\u9b54\uff09", "\u79cb\u5ddd\u5f25\u751f\uff08\u5c0f\u5c0f\u7406\u4e8b\u957f\uff09", "\u4e59\u540d\u53f2\u60a6\u5b50\uff08\u4e59\u540d\u8bb0\u8005\uff09", "\u6850\u751f\u9662\u8475", "\u5b89\u5fc3\u6cfd\u523a\u523a\u7f8e", "\u6a2b\u672c\u7406\u5b50", "\u795e\u91cc\u7eeb\u534e\uff08\u9f9f\u9f9f\uff09", "\u7434", "\u7a7a\uff08\u7a7a\u54e5\uff09", "\u4e3d\u838e", "\u8367\uff08\u8367\u59b9\uff09", "\u82ad\u82ad\u62c9", "\u51ef\u4e9a", "\u8fea\u5362\u514b", "\u96f7\u6cfd", "\u5b89\u67cf", "\u6e29\u8fea", "\u9999\u83f1", "\u5317\u6597", "\u884c\u79cb", "\u9b48", "\u51dd\u5149", "\u53ef\u8389", "\u949f\u79bb", "\u83f2\u8c22\u5c14\uff08\u7687\u5973\uff09", "\u73ed\u5c3c\u7279", "\u8fbe\u8fbe\u5229\u4e9a\uff08\u516c\u5b50\uff09", "\u8bfa\u827e\u5c14\uff08\u5973\u4ec6\uff09", "\u4e03\u4e03", "\u91cd\u4e91", "\u7518\u96e8\uff08\u6930\u7f8a\uff09", "\u963f\u8d1d\u591a", "\u8fea\u5965\u5a1c\uff08\u732b\u732b\uff09", "\u83ab\u5a1c", "\u523b\u6674", "\u7802\u7cd6", "\u8f9b\u7131", "\u7f57\u838e\u8389\u4e9a", "\u80e1\u6843", "\u67ab\u539f\u4e07\u53f6\uff08\u4e07\u53f6\uff09", "\u70df\u7eef", "\u5bb5\u5bab", "\u6258\u9a6c", "\u4f18\u83c8", "\u96f7\u7535\u5c06\u519b\uff08\u96f7\u795e\uff09", "\u65e9\u67da", "\u73ca\u745a\u5bab\u5fc3\u6d77\uff08\u5fc3\u6d77\uff0c\u6263\u6263\u7c73\uff09", "\u4e94\u90ce", "\u4e5d\u6761\u88df\u7f57", "\u8352\u6cf7\u4e00\u6597\uff08\u4e00\u6597\uff09", "\u57c3\u6d1b\u4f0a", "\u7533\u9e64", "\u516b\u91cd\u795e\u5b50\uff08\u795e\u5b50\uff09", "\u795e\u91cc\u7eeb\u4eba\uff08\u7eeb\u4eba\uff09", "\u591c\u5170", "\u4e45\u5c90\u5fcd", "\u9e7f\u91ce\u82d1\u5e73\u85cf", "\u63d0\u7eb3\u91cc", "\u67ef\u83b1", "\u591a\u8389", "\u4e91\u5807", "\u7eb3\u897f\u59b2\uff08\u8349\u795e\uff09", "\u6df1\u6e0a\u4f7f\u5f92", "\u59ae\u9732", "\u8d5b\u8bfa", "\u503a\u52a1\u5904\u7406\u4eba", "\u574e\u8482\u4e1d", "\u771f\u5f13\u5feb\u8f66", "\u79cb\u4eba", "\u671b\u65cf", "\u827e\u5c14\u83f2", "\u827e\u8389\u4e1d", "\u827e\u4f26", "\u963f\u6d1b\u74e6", "\u5929\u91ce", "\u5929\u76ee\u5341\u4e94", "\u611a\u4eba\u4f17-\u5b89\u5fb7\u70c8", "\u5b89\u987a", "\u5b89\u897f", "\u8475", "\u9752\u6728", "\u8352\u5ddd\u5e78\u6b21", "\u8352\u8c37", "\u6709\u6cfd", "\u6d45\u5ddd", "\u9ebb\u7f8e", "\u51dd\u5149\u52a9\u624b", "\u963f\u6258", "\u7afa\u5b50", "\u767e\u8bc6", "\u767e\u95fb", "\u767e\u6653", "\u767d\u672f", "\u8d1d\u96c5\u7279\u4e3d\u5947", "\u4e3d\u5854", "\u5931\u843d\u8ff7\u8fed", "\u7f2d\u4e71\u661f\u68d8", "\u4f0a\u7538", "\u4f0f\u7279\u52a0\u5973\u5b69", "\u72c2\u70ed\u84dd\u8c03", "\u8389\u8389\u5a05", "\u841d\u838e\u8389\u5a05", "\u516b\u91cd\u6a31", "\u516b\u91cd\u971e", "\u5361\u83b2", "\u7b2c\u516d\u591c\u60f3\u66f2", "\u5361\u841d\u5c14", "\u59ec\u5b50", "\u6781\u5730\u6218\u5203", "\u5e03\u6d1b\u59ae\u5a05", "\u6b21\u751f\u94f6\u7ffc", "\u7406\u4e4b\u5f8b\u8005%26\u5e0c\u513f", "\u7406\u4e4b\u5f8b\u8005", "\u8ff7\u57ce\u9a87\u5154", "\u5e0c\u513f", "\u9b47\u591c\u661f\u6e0a", "\u9ed1\u5e0c\u513f", "\u5e15\u6735\u83f2\u8389\u4e1d", "\u4e0d\u706d\u661f\u951a", "\u5929\u5143\u9a91\u82f1", "\u5e7d\u5170\u9edb\u5c14", "\u6d3e\u8499bh3", "\u7231\u9171", "\u7eef\u7389\u4e38", "\u5fb7\u4e3d\u838e", "\u6708\u4e0b\u521d\u62e5", "\u6714\u591c\u89c2\u661f", "\u66ae\u5149\u9a91\u58eb", "\u683c\u857e\u4fee", "\u7559\u4e91\u501f\u98ce\u771f\u541b", "\u6885\u6bd4\u4e4c\u65af", "\u4eff\u72b9\u5927", "\u514b\u83b1\u56e0", "\u5723\u5251\u5e7d\u5170\u9edb\u5c14", "\u5996\u7cbe\u7231\u8389", "\u7279\u65af\u62c9zero", "\u82cd\u7384", "\u82e5\u6c34", "\u897f\u7433", "\u6234\u56e0\u65af\u96f7\u5e03", "\u8d1d\u62c9", "\u8d64\u9e22", "\u9547\u9b42\u6b4c", "\u6e21\u9e26", "\u4eba\u4e4b\u5f8b\u8005", "\u7231\u8389\u5e0c\u96c5", "\u5929\u7a79\u6e38\u4fa0", "\u742a\u4e9a\u5a1c", "\u7a7a\u4e4b\u5f8b\u8005", "\u85aa\u708e\u4e4b\u5f8b\u8005", "\u4e91\u58a8\u4e39\u5fc3", "\u7b26\u534e", "\u8bc6\u4e4b\u5f8b\u8005", "\u7279\u74e6\u6797", "\u7ef4\u5c14\u8587", "\u82bd\u8863", "\u96f7\u4e4b\u5f8b\u8005", "\u65ad\u7f6a\u5f71\u821e", "\u963f\u6ce2\u5c3c\u4e9a", "\u698e\u672c", "\u5384\u5c3c\u65af\u7279", "\u6076\u9f99", "\u8303\u4e8c\u7237", "\u6cd5\u62c9", "\u611a\u4eba\u4f17\u58eb\u5175", "\u611a\u4eba\u4f17\u58eb\u5175a", "\u611a\u4eba\u4f17\u58eb\u5175b", "\u611a\u4eba\u4f17\u58eb\u5175c", "\u611a\u4eba\u4f17a", "\u611a\u4eba\u4f17b", "\u98de\u98de", "\u83f2\u5229\u514b\u65af", "\u5973\u6027\u8ddf\u968f\u8005", "\u9022\u5ca9", "\u6446\u6e21\u4eba", "\u72c2\u8e81\u7684\u7537\u4eba", "\u5965\u5179", "\u8299\u841d\u62c9", "\u8ddf\u968f\u8005", "\u871c\u6c41\u751f\u7269", "\u9ec4\u9ebb\u5b50", "\u6e0a\u4e0a", "\u85e4\u6728", "\u6df1\u89c1", "\u798f\u672c", "\u8299\u84c9", "\u53e4\u6cfd", "\u53e4\u7530", "\u53e4\u5c71", "\u53e4\u8c37\u6607", "\u5085\u4e09\u513f", "\u9ad8\u8001\u516d", "\u77ff\u5de5\u5192", "\u5143\u592a", "\u5fb7\u5b89\u516c", "\u8302\u624d\u516c", "\u6770\u62c9\u5fb7", "\u845b\u7f57\u4e3d", "\u91d1\u5ffd\u5f8b", "\u516c\u4fca", "\u9505\u5df4", "\u6b4c\u5fb7", "\u963f\u8c6a", "\u72d7\u4e09\u513f", "\u845b\u745e\u4e1d", "\u82e5\u5fc3", "\u963f\u5c71\u5a46", "\u602a\u9e1f", "\u5e7f\u7af9", "\u89c2\u6d77", "\u5173\u5b8f", "\u871c\u6c41\u536b\u5175", "\u5b88\u536b1", "\u50b2\u6162\u7684\u5b88\u536b", "\u5bb3\u6015\u7684\u5b88\u536b", "\u8d35\u5b89", "\u76d6\u4f0a", "\u963f\u521b", "\u54c8\u592b\u4e39", "\u65e5\u8bed\u963f\u8d1d\u591a\uff08\u91ce\u5c9b\u5065\u513f\uff09", "\u65e5\u8bed\u57c3\u6d1b\u4f0a\uff08\u9ad8\u57a3\u5f69\u9633\uff09", "\u65e5\u8bed\u5b89\u67cf\uff08\u77f3\u89c1\u821e\u83dc\u9999\uff09", "\u65e5\u8bed\u795e\u91cc\u7eeb\u534e\uff08\u65e9\u89c1\u6c99\u7ec7\uff09", "\u65e5\u8bed\u795e\u91cc\u7eeb\u4eba\uff08\u77f3\u7530\u5f70\uff09", "\u65e5\u8bed\u767d\u672f\uff08\u6e38\u4f50\u6d69\u4e8c\uff09", "\u65e5\u8bed\u82ad\u82ad\u62c9\uff08\u9b3c\u5934\u660e\u91cc\uff09", "\u65e5\u8bed\u5317\u6597\uff08\u5c0f\u6e05\u6c34\u4e9a\u7f8e\uff09", "\u65e5\u8bed\u73ed\u5c3c\u7279\uff08\u9022\u5742\u826f\u592a\uff09", "\u65e5\u8bed\u574e\u8482\u4e1d\uff08\u67da\u6728\u51c9\u9999\uff09", "\u65e5\u8bed\u91cd\u4e91\uff08\u9f50\u85e4\u58ee\u9a6c\uff09", "\u65e5\u8bed\u67ef\u83b1\uff08\u524d\u5ddd\u51c9\u5b50\uff09", "\u65e5\u8bed\u8d5b\u8bfa\uff08\u5165\u91ce\u81ea\u7531\uff09", "\u65e5\u8bed\u6234\u56e0\u65af\u96f7\u5e03\uff08\u6d25\u7530\u5065\u6b21\u90ce\uff09", "\u65e5\u8bed\u8fea\u5362\u514b\uff08\u5c0f\u91ce\u8d24\u7ae0\uff09", "\u65e5\u8bed\u8fea\u5965\u5a1c\uff08\u4e95\u6cfd\u8bd7\u7ec7\uff09", "\u65e5\u8bed\u591a\u8389\uff08\u91d1\u7530\u670b\u5b50\uff09", "\u65e5\u8bed\u4f18\u83c8\uff08\u4f50\u85e4\u5229\u5948\uff09", "\u65e5\u8bed\u83f2\u8c22\u5c14\uff08\u5185\u7530\u771f\u793c\uff09", "\u65e5\u8bed\u7518\u96e8\uff08\u4e0a\u7530\u4e3d\u5948\uff09", "\u65e5\u8bed\uff08\u7560\u4e2d\u7950\uff09", "\u65e5\u8bed\u9e7f\u91ce\u9662\u5e73\u85cf\uff08\u4e95\u53e3\u7950\u4e00\uff09", "\u65e5\u8bed\u7a7a\uff08\u5800\u6c5f\u77ac\uff09", "\u65e5\u8bed\u8367\uff08\u60a0\u6728\u78a7\uff09", "\u65e5\u8bed\u80e1\u6843\uff08\u9ad8\u6865\u674e\u4f9d\uff09", "\u65e5\u8bed\u4e00\u6597\uff08\u897f\u5ddd\u8d35\u6559\uff09", "\u65e5\u8bed\u51ef\u4e9a\uff08\u9e1f\u6d77\u6d69\u8f85\uff09", "\u65e5\u8bed\u4e07\u53f6\uff08\u5c9b\u5d0e\u4fe1\u957f\uff09", "\u65e5\u8bed\u523b\u6674\uff08\u559c\u591a\u6751\u82f1\u68a8\uff09", "\u65e5\u8bed\u53ef\u8389\uff08\u4e45\u91ce\u7f8e\u54b2\uff09", "\u65e5\u8bed\u5fc3\u6d77\uff08\u4e09\u68ee\u94c3\u5b50\uff09", "\u65e5\u8bed\u4e5d\u6761\u88df\u7f57\uff08\u6fd1\u6237\u9ebb\u6c99\u7f8e\uff09", "\u65e5\u8bed\u4e3d\u838e\uff08\u7530\u4e2d\u7406\u60e0\uff09", "\u65e5\u8bed\u83ab\u5a1c\uff08\u5c0f\u539f\u597d\u7f8e\uff09", "\u65e5\u8bed\u7eb3\u897f\u59b2\uff08\u7530\u6751\u7531\u52a0\u8389\uff09", "\u65e5\u8bed\u59ae\u9732\uff08\u91d1\u5143\u5bff\u5b50\uff09", "\u65e5\u8bed\u51dd\u5149\uff08\u5927\u539f\u6c99\u8036\u9999\uff09", "\u65e5\u8bed\u8bfa\u827e\u5c14\uff08\u9ad8\u5c3e\u594f\u97f3\uff09", "\u65e5\u8bed\u5965\u5179\uff08\u589e\u8c37\u5eb7\u7eaa\uff09", "\u65e5\u8bed\u6d3e\u8499\uff08\u53e4\u8d3a\u8475\uff09", "\u65e5\u8bed\u7434\uff08\u658b\u85e4\u5343\u548c\uff09", "\u65e5\u8bed\u4e03\u4e03\uff08\u7530\u6751\u7531\u52a0\u8389\uff09", "\u65e5\u8bed\u96f7\u7535\u5c06\u519b\uff08\u6cfd\u57ce\u7f8e\u96ea\uff09", "\u65e5\u8bed\u96f7\u6cfd\uff08\u5185\u5c71\u6602\u8f89\uff09", "\u65e5\u8bed\u7f57\u838e\u8389\u4e9a\uff08\u52a0\u9688\u4e9a\u8863\uff09", "\u65e5\u8bed\u65e9\u67da\uff08\u6d32\u5d0e\u7eeb\uff09", "\u65e5\u8bed\u6563\u5175\uff08\u67ff\u539f\u5f7b\u4e5f\uff09", "\u65e5\u8bed\u7533\u9e64\uff08\u5ddd\u6f84\u7eeb\u5b50\uff09", "\u65e5\u8bed\u4e45\u5c90\u5fcd\uff08\u6c34\u6865\u9999\u7ec7\uff09", "\u65e5\u8bed\u5973\u58eb\uff08\u5e84\u5b50\u88d5\u8863\uff09", "\u65e5\u8bed\u7802\u7cd6\uff08\u85e4\u7530\u831c\uff09", "\u65e5\u8bed\u8fbe\u8fbe\u5229\u4e9a\uff08\u6728\u6751\u826f\u5e73\uff09", "\u65e5\u8bed\u6258\u9a6c\uff08\u68ee\u7530\u6210\u4e00\uff09", "\u65e5\u8bed\u63d0\u7eb3\u91cc\uff08\u5c0f\u6797\u6c99\u82d7\uff09", "\u65e5\u8bed\u6e29\u8fea\uff08\u6751\u6fd1\u6b65\uff09", "\u65e5\u8bed\u9999\u83f1\uff08\u5c0f\u6cfd\u4e9a\u674e\uff09", "\u65e5\u8bed\u9b48\uff08\u677e\u5188\u796f\u4e1e\uff09", "\u65e5\u8bed\u884c\u79cb\uff08\u7686\u5ddd\u7eaf\u5b50\uff09", "\u65e5\u8bed\u8f9b\u7131\uff08\u9ad8\u6865\u667a\u79cb\uff09", "\u65e5\u8bed\u516b\u91cd\u795e\u5b50\uff08\u4f50\u4ed3\u7eeb\u97f3\uff09", "\u65e5\u8bed\u70df\u7eef\uff08\u82b1\u5b88\u7531\u7f8e\u91cc\uff09", "\u65e5\u8bed\u591c\u5170\uff08\u8fdc\u85e4\u7eeb\uff09", "\u65e5\u8bed\u5bb5\u5bab\uff08\u690d\u7530\u4f73\u5948\uff09", "\u65e5\u8bed\u4e91\u5807\uff08\u5c0f\u5ca9\u4e95\u5c0f\u9e1f\uff09", "\u65e5\u8bed\u949f\u79bb\uff08\u524d\u91ce\u667a\u662d\uff09", "\u6770\u514b", "\u963f\u5409", "\u6c5f\u821f", "\u9274\u79cb", "\u5609\u4e49", "\u7eaa\u82b3", "\u666f\u6f84", "\u7ecf\u7eb6", "\u666f\u660e", "\u664b\u4f18", "\u963f\u9e20", "\u9152\u5ba2", "\u4e54\u5c14", "\u4e54\u745f\u592b", "\u7ea6\u987f", "\u4e54\u4f0a\u65af", "\u5c45\u5b89", "\u541b\u541b", "\u987a\u5409", "\u7eaf\u4e5f", "\u91cd\u4f50", "\u5927\u5c9b\u7eaf\u5e73", "\u84b2\u6cfd", "\u52d8\u89e3\u7531\u5c0f\u8def\u5065\u4e09\u90ce", "\u67ab", "\u67ab\u539f\u4e49\u5e86", "\u836b\u5c71", "\u7532\u6590\u7530\u9f8d\u99ac", "\u6d77\u6597", "\u60df\u795e\u6674\u4e4b\u4ecb", "\u9e7f\u91ce\u5948\u5948", "\u5361\u7435\u8389\u4e9a", "\u51ef\u745f\u7433", "\u52a0\u85e4\u4fe1\u609f", "\u52a0\u85e4\u6d0b\u5e73", "\u80dc\u5bb6", "\u8305\u847a\u4e00\u5e86", "\u548c\u662d", "\u4e00\u6b63", "\u4e00\u9053", "\u6842\u4e00", "\u5e86\u6b21\u90ce", "\u963f\u8d24", "\u5065\u53f8", "\u5065\u6b21\u90ce", "\u5065\u4e09\u90ce", "\u5929\u7406", "\u6740\u624ba", "\u6740\u624bb", "\u6728\u5357\u674f\u5948", "\u6728\u6751", "\u56fd\u738b", "\u6728\u4e0b", "\u5317\u6751", "\u6e05\u60e0", "\u6e05\u4eba", "\u514b\u5217\u95e8\u7279", "\u9a91\u58eb", "\u5c0f\u6797", "\u5c0f\u6625", "\u5eb7\u62c9\u5fb7", "\u5927\u8089\u4e38", "\u7434\u7f8e", "\u5b8f\u4e00", "\u5eb7\u4ecb", "\u5e78\u5fb7", "\u9ad8\u5584", "\u68a2", "\u514b\u7f57\u7d22", "\u4e45\u4fdd", "\u4e5d\u6761\u9570\u6cbb", "\u4e45\u6728\u7530", "\u6606\u94a7", "\u83ca\u5730\u541b", "\u4e45\u5229\u987b", "\u9ed1\u7530", "\u9ed1\u6cfd\u4eac\u4e4b\u4ecb", "\u54cd\u592a", "\u5c9a\u59d0", "\u5170\u6eaa", "\u6f9c\u9633", "\u52b3\u4f26\u65af", "\u4e50\u660e", "\u83b1\u8bfa", "\u83b2", "\u826f\u5b50", "\u674e\u5f53", "\u674e\u4e01", "\u5c0f\u4e50", "\u7075", "\u5c0f\u73b2", "\u7433\u7405a", "\u7433\u7405b", "\u5c0f\u5f6c", "\u5c0f\u5fb7", "\u5c0f\u697d", "\u5c0f\u9f99", "\u5c0f\u5434", "\u5c0f\u5434\u7684\u8bb0\u5fc6", "\u7406\u6b63", "\u963f\u9f99", "\u5362\u5361", "\u6d1b\u6210", "\u7f57\u5de7", "\u5317\u98ce\u72fc", "\u5362\u6b63", "\u840d\u59e5\u59e5", "\u524d\u7530", "\u771f\u663c", "\u9ebb\u7eaa", "\u771f", "\u611a\u4eba\u4f17-\u9a6c\u514b\u897f\u59c6", "\u5973\u6027a", "\u5973\u6027b", "\u5973\u6027a\u7684\u8ddf\u968f\u8005", "\u963f\u5b88", "\u739b\u683c\u4e3d\u7279", "\u771f\u7406", "\u739b\u4e54\u4e3d", "\u739b\u6587", "\u6b63\u80dc", "\u660c\u4fe1", "\u5c06\u53f8", "\u6b63\u4eba", "\u8def\u7237", "\u8001\u7ae0", "\u677e\u7530", "\u677e\u672c", "\u677e\u6d66", "\u677e\u5742", "\u8001\u5b5f", "\u5b5f\u4e39", "\u5546\u4eba\u968f\u4ece", "\u4f20\u4ee4\u5175", "\u7c73\u6b47\u5c14", "\u5fa1\u8206\u6e90\u4e00\u90ce", "\u5fa1\u8206\u6e90\u6b21\u90ce", "\u5343\u5ca9\u519b\u6559\u5934", "\u5343\u5ca9\u519b\u58eb\u5175", "\u660e\u535a", "\u660e\u4fca", "\u7f8e\u94c3", "\u7f8e\u548c", "\u963f\u5e78", "\u524a\u6708\u7b51\u9633\u771f\u541b", "\u94b1\u773c\u513f", "\u68ee\u5f66", "\u5143\u52a9", "\u7406\u6c34\u53e0\u5c71\u771f\u541b", "\u7406\u6c34\u758a\u5c71\u771f\u541b", "\u6731\u8001\u677f", "\u6728\u6728", "\u6751\u4e0a", "\u6751\u7530", "\u6c38\u91ce", "\u957f\u91ce\u539f\u9f99\u4e4b\u4ecb", "\u957f\u6fd1", "\u4e2d\u91ce\u5fd7\u4e43", "\u83dc\u83dc\u5b50", "\u6960\u6960", "\u6210\u6fd1", "\u963f\u5185", "\u5b81\u7984", "\u725b\u5fd7", "\u4fe1\u535a", "\u4f38\u592b", "\u91ce\u65b9", "\u8bfa\u62c9", "\u7eaa\u9999", "\u8bfa\u66fc", "\u4fee\u5973", "\u7eaf\u6c34\u7cbe\u7075", "\u5c0f\u5ddd", "\u5c0f\u4ed3\u6faa", "\u5188\u6797", "\u5188\u5d0e\u7ed8\u91cc\u9999", "\u5188\u5d0e\u9646\u6597", "\u5965\u62c9\u592b", "\u8001\u79d1", "\u9b3c\u5a46\u5a46", "\u5c0f\u91ce\u5bfa", "\u5927\u6cb3\u539f\u4e94\u53f3\u536b\u95e8", "\u5927\u4e45\u4fdd\u5927\u4ecb", "\u5927\u68ee", "\u5927\u52a9", "\u5965\u7279", "\u6d3e\u8499", "\u6d3e\u84992", "\u75c5\u4ebaa", "\u75c5\u4ebab", "\u5df4\u987f", "\u6d3e\u6069", "\u670b\u4e49", "\u56f4\u89c2\u7fa4\u4f17", "\u56f4\u89c2\u7fa4\u4f17a", "\u56f4\u89c2\u7fa4\u4f17b", "\u56f4\u89c2\u7fa4\u4f17c", "\u56f4\u89c2\u7fa4\u4f17d", "\u56f4\u89c2\u7fa4\u4f17e", "\u94dc\u96c0", "\u963f\u80a5", "\u5174\u53d4", "\u8001\u5468\u53d4", "\u516c\u4e3b", "\u5f7c\u5f97", "\u4e7e\u5b50", "\u828a\u828a", "\u4e7e\u73ae", "\u7eee\u547d", "\u675e\u5e73", "\u79cb\u6708", "\u6606\u6069", "\u96f7\u7535\u5f71", "\u5170\u9053\u5c14", "\u96f7\u8499\u5fb7", "\u5192\u5931\u7684\u5e15\u62c9\u5fb7", "\u4f36\u4e00", "\u73b2\u82b1", "\u963f\u4ec1", "\u5bb6\u81e3\u4eec", "\u68a8\u7ed8", "\u8363\u6c5f", "\u620e\u4e16", "\u6d6a\u4eba", "\u7f57\u4f0a\u65af", "\u5982\u610f", "\u51c9\u5b50", "\u5f69\u9999", "\u9152\u4e95", "\u5742\u672c", "\u6714\u6b21\u90ce", "\u6b66\u58eba", "\u6b66\u58ebb", "\u6b66\u58ebc", "\u6b66\u58ebd", "\u73ca\u745a", "\u4e09\u7530", "\u838e\u62c9", "\u7b39\u91ce", "\u806a\u7f8e", "\u806a", "\u5c0f\u767e\u5408", "\u6563\u5175", "\u5bb3\u6015\u7684\u5c0f\u5218", "\u8212\u4f2f\u7279", "\u8212\u8328", "\u6d77\u9f99", "\u4e16\u5b50", "\u8c22\u5c14\u76d6", "\u5bb6\u4e01", "\u5546\u534e", "\u6c99\u5bc5", "\u963f\u5347", "\u67f4\u7530", "\u963f\u8302", "\u5f0f\u5927\u5c06", "\u6e05\u6c34", "\u5fd7\u6751\u52d8\u5175\u536b", "\u65b0\u4e4b\u4e1e", "\u5fd7\u7ec7", "\u77f3\u5934", "\u8bd7\u7fbd", "\u8bd7\u7b60", "\u77f3\u58ee", "\u7fd4\u592a", "\u6b63\u4e8c", "\u5468\u5e73", "\u8212\u6768", "\u9f50\u683c\u8299\u4e3d\u96c5", "\u5973\u58eb", "\u601d\u52e4", "\u516d\u6307\u4e54\u745f", "\u611a\u4eba\u4f17\u5c0f\u5175d", "\u611a\u4eba\u4f17\u5c0f\u5175a", "\u611a\u4eba\u4f17\u5c0f\u5175b", "\u611a\u4eba\u4f17\u5c0f\u5175c", "\u5434\u8001\u4e94", "\u5434\u8001\u4e8c", "\u6ed1\u5934\u9b3c", "\u8a00\u7b11", "\u5434\u8001\u4e03", "\u58eb\u5175h", "\u58eb\u5175i", "\u58eb\u5175a", "\u58eb\u5175b", "\u58eb\u5175c", "\u58eb\u5175d", "\u58eb\u5175e", "\u58eb\u5175f", "\u58eb\u5175g", "\u594f\u592a", "\u65af\u5766\u5229", "\u6387\u661f\u652b\u8fb0\u5929\u541b", "\u5c0f\u5934", "\u5927\u6b66", "\u9676\u4e49\u9686", "\u6749\u672c", "\u82cf\u897f", "\u5acc\u7591\u4ebaa", "\u5acc\u7591\u4ebab", "\u5acc\u7591\u4ebac", "\u5acc\u7591\u4ebad", "\u65af\u4e07", "\u5251\u5ba2a", "\u5251\u5ba2b", "\u963f\u4e8c", "\u5fe0\u80dc", "\u5fe0\u592b", "\u963f\u656c", "\u5b5d\u5229", "\u9e70\u53f8\u8fdb", "\u9ad8\u5c71", "\u4e5d\u6761\u5b5d\u884c", "\u6bc5", "\u7af9\u5185", "\u62d3\u771f", "\u5353\u4e5f", "\u592a\u90ce\u4e38", "\u6cf0\u52d2", "\u624b\u5c9b", "\u54f2\u5e73", "\u54f2\u592b", "\u6258\u514b", "\u5927boss", "\u963f\u5f3a", "\u6258\u5c14\u5fb7\u62c9", "\u65c1\u89c2\u8005", "\u5929\u6210", "\u963f\u5927", "\u8482\u739b\u4e4c\u65af", "\u63d0\u7c73", "\u6237\u7530", "\u963f\u4e09", "\u4e00\u8d77\u7684\u4eba", "\u5fb7\u7530", "\u5fb7\u957f", "\u667a\u6811", "\u5229\u5f66", "\u80d6\u4e4e\u4e4e\u7684\u65c5\u884c\u8005", "\u85cf\u5b9d\u4ebaa", "\u85cf\u5b9d\u4ebab", "\u85cf\u5b9d\u4ebac", "\u85cf\u5b9d\u4ebad", "\u963f\u7947", "\u6052\u96c4", "\u9732\u5b50", "\u8bdd\u5267\u56e2\u56e2\u957f", "\u5185\u6751", "\u4e0a\u91ce", "\u4e0a\u6749", "\u8001\u6234", "\u8001\u9ad8", "\u8001\u8d3e", "\u8001\u58a8", "\u8001\u5b59", "\u5929\u67a2\u661f", "\u8001\u4e91", "\u6709\u4e50\u658b", "\u4e11\u96c4", "\u4e4c\u7ef4", "\u74e6\u4eac", "\u83f2\u5c14\u6208\u9edb\u7279", "\u7ef4\u591a\u5229\u4e9a", "\u8587\u5c14", "\u74e6\u683c\u7eb3", "\u963f\u5916", "\u4f8d\u5973", "\u74e6\u62c9", "\u671b\u96c5", "\u5b9b\u70df", "\u742c\u7389", "\u6218\u58eba", "\u6218\u58ebb", "\u6e21\u8fba", "\u6e21\u90e8", "\u963f\u4f1f", "\u6587\u749f", "\u6587\u6e0a", "\u97e6\u5c14\u7eb3", "\u738b\u6273\u624b", "\u6b66\u6c9b", "\u6653\u98de", "\u8f9b\u7a0b", "\u661f\u706b", "\u661f\u7a00", "\u8f9b\u79c0", "\u79c0\u534e", "\u963f\u65ed", "\u5f90\u5218\u5e08", "\u77e2\u90e8", "\u516b\u6728", "\u5c71\u4e0a", "\u963f\u9633", "\u989c\u7b11", "\u5eb7\u660e", "\u6cf0\u4e45", "\u5b89\u6b66", "\u77e2\u7530\u5e78\u559c", "\u77e2\u7530\u8f9b\u559c", "\u4e49\u575a", "\u83ba\u513f", "\u76c8\u4e30", "\u5b9c\u5e74", "\u94f6\u674f", "\u9038\u8f69", "\u6a2a\u5c71", "\u6c38\u8d35", "\u6c38\u4e1a", "\u5609\u4e45", "\u5409\u5ddd", "\u4e49\u9ad8", "\u7528\u9ad8", "\u9633\u592a", "\u5143\u84c9", "\u73a5\u8f89", "\u6bd3\u534e", "\u6709\u9999", "\u5e78\u4e5f", "\u7531\u771f", "\u7ed3\u83dc", "\u97f5\u5b81", "\u767e\u5408", "\u767e\u5408\u534e", "\u5c24\u82cf\u6ce2\u592b", "\u88d5\u5b50", "\u60a0\u7b56", "\u60a0\u4e5f", "\u4e8e\u5ae3", "\u67da\u5b50", "\u8001\u90d1", "\u6b63\u8302", "\u5fd7\u6210", "\u82b7\u5de7", "\u77e5\u6613", "\u652f\u652f", "\u5468\u826f", "\u73e0\u51fd", "\u795d\u660e", "\u795d\u6d9b"],
54
+ "symbols": ["_", ",", ".", "!", "?", "-", "~", "\u2026", "A", "E", "I", "N", "O", "Q", "U", "a", "b", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "r", "s", "t", "u", "v", "w", "y", "z", "\u0283", "\u02a7", "\u02a6", "\u026f", "\u0279", "\u0259", "\u0265", "\u207c", "\u02b0", "`", "\u2192", "\u2193", "\u2191", " "]
55
+ }
Model/hubert-soft-0d54a1f4.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e82e7d079df05fe3aa535f6f7d42d309bdae1d2a53324e2b2386c56721f4f649
3
+ size 378435957
Model/louise/360_epochs.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:20b38cc55191ec02c2809e80d758ff0d56bd44760841704feb9921aa58a4d9de
3
+ size 203264375
Model/louise/config.json ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "train": {
3
+ "segment_size": 8192
4
+ },
5
+ "data": {
6
+ "max_wav_value": 32768.0,
7
+ "sampling_rate": 22050,
8
+ "filter_length": 1024,
9
+ "hop_length": 256,
10
+ "win_length": 1024,
11
+ "add_blank": true,
12
+ "n_speakers": 0
13
+ },
14
+ "model": {
15
+ "inter_channels": 192,
16
+ "hidden_channels": 256,
17
+ "filter_channels": 768,
18
+ "n_heads": 2,
19
+ "n_layers": 6,
20
+ "kernel_size": 3,
21
+ "p_dropout": 0.1,
22
+ "resblock": "1",
23
+ "resblock_kernel_sizes": [3,7,11],
24
+ "resblock_dilation_sizes": [[1,3,5], [1,3,5], [1,3,5]],
25
+ "upsample_rates": [8,8,2,2],
26
+ "upsample_initial_channel": 512,
27
+ "upsample_kernel_sizes": [16,16,4,4],
28
+ "n_layers_q": 3,
29
+ "use_spectral_norm": false
30
+ },
31
+ "speakers": ["\u30eb\u30a4\u30ba"]
32
+ }
Model/model.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9b6e449686c0db86f5c607b8c9fa1d87468c27198a1f0a20280c4e258239763d
3
+ size 661423381
Model/model.yaml ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ $audonnx.core.model.Model==0.3.2:
2
+ path: model.onnx
3
+ labels:
4
+ logits:
5
+ - arousal
6
+ - dominance
7
+ - valence
8
+ transform: null
Model/npy/all_emotions.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:48e81667f1fc4ce2b2eaed80fadd0871e1ddfc8933767915954c39ac854d5724
3
+ size 22356096
Model/paimon/paimon6k.json ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "train": {
3
+ "log_interval": 200,
4
+ "eval_interval": 1000,
5
+ "seed": 1234,
6
+ "epochs": 40000,
7
+ "learning_rate": 2e-4,
8
+ "betas": [0.8, 0.99],
9
+ "eps": 1e-9,
10
+ "batch_size": 46,
11
+ "fp16_run": true,
12
+ "lr_decay": 0.999875,
13
+ "segment_size": 8192,
14
+ "init_lr_ratio": 1,
15
+ "warmup_epochs": 0,
16
+ "c_mel": 45,
17
+ "c_kl": 1.0
18
+ },
19
+ "data": {
20
+ "training_files":"filelists/paimon_6k_train_chs.txt.cleaned",
21
+ "validation_files":"filelists/paimon_6k_val_chs.txt.cleaned",
22
+ "text_cleaners":["chinese_cleaners"],
23
+ "max_wav_value": 32768.0,
24
+ "sampling_rate": 48000,
25
+ "filter_length": 1024,
26
+ "hop_length": 256,
27
+ "win_length": 1024,
28
+ "n_mel_channels": 80,
29
+ "mel_fmin": 0.0,
30
+ "mel_fmax": null,
31
+ "add_blank": true,
32
+ "n_speakers": 0,
33
+ "cleaned_text": true
34
+ },
35
+ "model": {
36
+ "inter_channels": 192,
37
+ "hidden_channels": 192,
38
+ "filter_channels": 768,
39
+ "n_heads": 2,
40
+ "n_layers": 6,
41
+ "kernel_size": 3,
42
+ "p_dropout": 0.1,
43
+ "resblock": "1",
44
+ "resblock_kernel_sizes": [3,7,11],
45
+ "resblock_dilation_sizes": [[1,3,5], [1,3,5], [1,3,5]],
46
+ "upsample_rates": [8,8,2,2],
47
+ "upsample_initial_channel": 512,
48
+ "upsample_kernel_sizes": [16,16,4,4],
49
+ "n_layers_q": 3,
50
+ "use_spectral_norm": false,
51
+ "gin_channels": 256
52
+ },
53
+ "speakers": ["paimon"],
54
+ "symbols": ["_", "\uff0c", "\u3002", "\uff01", "\uff1f", "\u2014", "\u2026", "\u300c", "\u300d", "\u3105", "\u3106", "\u3107", "\u3108", "\u3109", "\u310a", "\u310b", "\u310c", "\u310d", "\u310e", "\u310f", "\u3110", "\u3111", "\u3112", "\u3113", "\u3114", "\u3115", "\u3116", "\u3117", "\u3118", "\u3119", "\u311a", "\u311b", "\u311c", "\u311d", "\u311e", "\u311f", "\u3120", "\u3121", "\u3122", "\u3123", "\u3124", "\u3125", "\u3126", "\u3127", "\u3128", "\u3129", "\u02c9", "\u02ca", "\u02c7", "\u02cb", "\u02d9", " "]
55
+ }
Model/paimon/paimon6k_390000.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b282f7fed04b92186ae8ed1956496e4492d1ee07d84870143c6e7fd3b82d5cb6
3
+ size 449983387
Model/shanghainese/2796_epochs.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:750299355c3cd6bec4bca61ac50dbfb4c1e129be9b0806442cee24071bed657b
3
+ size 158882637
Model/shanghainese/config.json ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "train": {
3
+ "segment_size": 8192
4
+ },
5
+ "data": {
6
+ "text_cleaners":["shanghainese_cleaners"],
7
+ "max_wav_value": 32768.0,
8
+ "sampling_rate": 22050,
9
+ "filter_length": 1024,
10
+ "hop_length": 256,
11
+ "win_length": 1024,
12
+ "add_blank": true,
13
+ "n_speakers": 2
14
+ },
15
+ "model": {
16
+ "inter_channels": 192,
17
+ "hidden_channels": 192,
18
+ "filter_channels": 768,
19
+ "n_heads": 2,
20
+ "n_layers": 6,
21
+ "kernel_size": 3,
22
+ "p_dropout": 0.1,
23
+ "resblock": "1",
24
+ "resblock_kernel_sizes": [3,7,11],
25
+ "resblock_dilation_sizes": [[1,3,5], [1,3,5], [1,3,5]],
26
+ "upsample_rates": [8,8,2,2],
27
+ "upsample_initial_channel": 512,
28
+ "upsample_kernel_sizes": [16,16,4,4],
29
+ "n_layers_q": 3,
30
+ "use_spectral_norm": false,
31
+ "gin_channels": 256
32
+ },
33
+ "speakers": ["\u4e0a\u6d77\u8bdd(shanghainese)"],
34
+ "symbols": ["_", ",", ".", "!", "?", "\u2026", "a", "b", "d", "f", "g", "h", "i", "k", "l", "m", "n", "o", "p", "s", "t", "u", "v", "y", "z", "\u00f8", "\u014b", "\u0235", "\u0251", "\u0254", "\u0255", "\u0259", "\u0264", "\u0266", "\u026a", "\u027f", "\u0291", "\u0294", "\u02b0", "\u0303", "\u0329", "\u1d00", "\u1d07", "1", "5", "6", "7", "8", " "]
35
+ }
Model/vctk/pretrained_vctk.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ab981615c443d935fc3a89b08137df544a1175bad99bcbbc9f59e7c3d4930043
3
+ size 159123481
Model/vctk/vctk_base.json ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "train": {
3
+ "log_interval": 200,
4
+ "eval_interval": 1000,
5
+ "seed": 1234,
6
+ "epochs": 10000,
7
+ "learning_rate": 2e-4,
8
+ "betas": [0.8, 0.99],
9
+ "eps": 1e-9,
10
+ "batch_size": 64,
11
+ "fp16_run": true,
12
+ "lr_decay": 0.999875,
13
+ "segment_size": 8192,
14
+ "init_lr_ratio": 1,
15
+ "warmup_epochs": 0,
16
+ "c_mel": 45,
17
+ "c_kl": 1.0
18
+ },
19
+ "data": {
20
+ "training_files":"filelists/vctk_audio_sid_text_train_filelist.txt.cleaned",
21
+ "validation_files":"filelists/vctk_audio_sid_text_val_filelist.txt.cleaned",
22
+ "text_cleaners":["english_cleaners2"],
23
+ "max_wav_value": 32768.0,
24
+ "sampling_rate": 22050,
25
+ "filter_length": 1024,
26
+ "hop_length": 256,
27
+ "win_length": 1024,
28
+ "n_mel_channels": 80,
29
+ "mel_fmin": 0.0,
30
+ "mel_fmax": null,
31
+ "add_blank": true,
32
+ "n_speakers": 109,
33
+ "cleaned_text": true
34
+ },
35
+ "model": {
36
+ "inter_channels": 192,
37
+ "hidden_channels": 192,
38
+ "filter_channels": 768,
39
+ "n_heads": 2,
40
+ "n_layers": 6,
41
+ "kernel_size": 3,
42
+ "p_dropout": 0.1,
43
+ "resblock": "1",
44
+ "resblock_kernel_sizes": [3,7,11],
45
+ "resblock_dilation_sizes": [[1,3,5], [1,3,5], [1,3,5]],
46
+ "upsample_rates": [8,8,2,2],
47
+ "upsample_initial_channel": 512,
48
+ "upsample_kernel_sizes": [16,16,4,4],
49
+ "n_layers_q": 3,
50
+ "use_spectral_norm": false,
51
+ "gin_channels": 256
52
+ },
53
+ "speakers": ["vctk0(english)","vctk1(english)","vctk2(english)","vctk3(english)","vctk4(english)","vctk5(english)","vctk6(english)","vctk7(english)","vctk8(english)","vctk9(english)","vctk10(english)","vctk11(english)","vctk12(english)","vctk13(english)","vctk14(english)","vctk15(english)","vctk16(english)","vctk17(english)","vctk18(english)","vctk19(english)"],
54
+ "symbols": ["_", ";", ":", ",", ".", "!", "?", "¡", "¿", "—", "…", "\"", "«", "»", "“", "”", " ", "A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z", "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z", "ɑ", "ɐ", "ɒ", "æ", "ɓ", "ʙ", "β", "ɔ", "ɕ", "ç", "ɗ", "ɖ", "ð", "ʤ", "ə", "ɘ", "ɚ", "ɛ", "ɜ", "ɝ", "ɞ", "ɟ", "ʄ", "ɡ", "ɠ", "ɢ", "ʛ", "ɦ", "ɧ", "ħ", "ɥ", "ʜ", "ɨ", "ɪ", "ʝ", "ɭ", "ɬ", "ɫ", "ɮ", "ʟ", "ɱ", "ɯ", "ɰ", "ŋ", "ɳ", "ɲ", "ɴ", "ø", "ɵ", "ɸ", "θ", "œ", "ɶ", "ʘ", "ɹ", "ɺ", "ɾ", "ɻ", "ʀ", "ʁ", "ɽ", "ʂ", "ʃ", "ʈ", "ʧ", "ʉ", "ʊ", "ʋ", "ⱱ", "ʌ", "ɣ", "ɤ", "ʍ", "χ", "ʎ", "ʏ", "ʑ", "ʐ", "ʒ", "ʔ", "ʡ", "ʕ", "ʢ", "ǀ", "ǁ", "ǂ", "ǃ", "ˈ", "ˌ", "ː", "ˑ", "ʼ", "ʴ", "ʰ", "ʱ", "ʲ", "ʷ", "ˠ", "ˤ", "˞", "↓", "↑", "→", "↗", "↘", "'", "̩", "'", "ᵻ"]
55
+ }
Model/vits_chinese/bert_vits.json ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "train": {
3
+ "log_interval": 100,
4
+ "eval_interval": 10000,
5
+ "seed": 1234,
6
+ "epochs": 20000,
7
+ "learning_rate": 1e-4,
8
+ "betas": [0.8, 0.99],
9
+ "eps": 1e-9,
10
+ "batch_size": 8,
11
+ "fp16_run": false,
12
+ "lr_decay": 0.999875,
13
+ "segment_size": 12800,
14
+ "init_lr_ratio": 1,
15
+ "warmup_epochs": 0,
16
+ "c_mel": 45,
17
+ "c_kl": 1.0
18
+ },
19
+ "data": {
20
+ "training_files":"filelists/train.txt",
21
+ "validation_files":"filelists/valid.txt",
22
+ "text_cleaners":["bert_chinese_cleaners"],
23
+ "max_wav_value": 32768.0,
24
+ "sampling_rate": 16000,
25
+ "filter_length": 1024,
26
+ "hop_length": 256,
27
+ "win_length": 1024,
28
+ "n_mel_channels": 80,
29
+ "mel_fmin": 0.0,
30
+ "mel_fmax": null,
31
+ "add_blank": false,
32
+ "n_speakers": 0
33
+ },
34
+ "model": {
35
+ "inter_channels": 192,
36
+ "hidden_channels": 192,
37
+ "filter_channels": 768,
38
+ "n_heads": 2,
39
+ "n_layers": 6,
40
+ "kernel_size": 3,
41
+ "p_dropout": 0.1,
42
+ "resblock": "1",
43
+ "resblock_kernel_sizes": [3,7,11],
44
+ "resblock_dilation_sizes": [[1,3,5], [1,3,5], [1,3,5]],
45
+ "upsample_rates": [8,8,2,2],
46
+ "upsample_initial_channel": 512,
47
+ "upsample_kernel_sizes": [16,16,4,4],
48
+ "n_layers_q": 3,
49
+ "use_spectral_norm": false,
50
+ "use_sdp": false,
51
+ "bert_embedding": true
52
+ },
53
+ "speakers": ["bert"],
54
+ "symbols": ["sil", "eos", "sp", "#0", "#1", "#2", "#3", "^", "b", "c", "ch", "d", "f", "g", "h", "j", "k", "l", "m", "n", "p", "q", "r", "s", "sh", "t", "x", "z", "zh", "a1", "a2", "a3", "a4", "a5", "ai1", "ai2", "ai3", "ai4", "ai5", "an1", "an2", "an3", "an4", "an5", "ang1", "ang2", "ang3", "ang4", "ang5", "ao1", "ao2", "ao3", "ao4", "ao5", "e1", "e2", "e3", "e4", "e5", "ei1", "ei2", "ei3", "ei4", "ei5", "en1", "en2", "en3", "en4", "en5", "eng1", "eng2", "eng3", "eng4", "eng5", "er1", "er2", "er3", "er4", "er5", "i1", "i2", "i3", "i4", "i5", "ia1", "ia2", "ia3", "ia4", "ia5", "ian1", "ian2", "ian3", "ian4", "ian5", "iang1", "iang2", "iang3", "iang4", "iang5", "iao1", "iao2", "iao3", "iao4", "iao5", "ie1", "ie2", "ie3", "ie4", "ie5", "ii1", "ii2", "ii3", "ii4", "ii5", "iii1", "iii2", "iii3", "iii4", "iii5", "in1", "in2", "in3", "in4", "in5", "ing1", "ing2", "ing3", "ing4", "ing5", "iong1", "iong2", "iong3", "iong4", "iong5", "iou1", "iou2", "iou3", "iou4", "iou5", "o1", "o2", "o3", "o4", "o5", "ong1", "ong2", "ong3", "ong4", "ong5", "ou1", "ou2", "ou3", "ou4", "ou5", "u1", "u2", "u3", "u4", "u5", "ua1", "ua2", "ua3", "ua4", "ua5", "uai1", "uai2", "uai3", "uai4", "uai5", "uan1", "uan2", "uan3", "uan4", "uan5", "uang1", "uang2", "uang3", "uang4", "uang5", "uei1", "uei2", "uei3", "uei4", "uei5", "uen1", "uen2", "uen3", "uen4", "uen5", "ueng1", "ueng2", "ueng3", "ueng4", "ueng5", "uo1", "uo2", "uo3", "uo4", "uo5", "v1", "v2", "v3", "v4", "v5", "van1", "van2", "van3", "van4", "van5", "ve1", "ve2", "ve3", "ve4", "ve5", "vn1", "vn2", "vn3", "vn4", "vn5"]
55
+ }
Model/vits_chinese/vits_bert_model.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:be0dcf53ffcd49d51fd9a710338a9ff7eed60d0c26ccbb03ebd5a9175f20dc39
3
+ size 141822877
Model/w2v2-vits/1026_epochs.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3f61e221e36af355dba89f20f70215d3a93dbe9fd497172ce46c950f757ccce0
3
+ size 159675849
Model/w2v2-vits/config.json ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "train": {
3
+ "segment_size": 8192
4
+ },
5
+ "data": {
6
+ "text_cleaners":["zh_ja_mixture_cleaners"],
7
+ "max_wav_value": 32768.0,
8
+ "sampling_rate": 22050,
9
+ "filter_length": 1024,
10
+ "hop_length": 256,
11
+ "win_length": 1024,
12
+ "add_blank": true,
13
+ "n_speakers": 5,
14
+ "emotion_embedding": true
15
+ },
16
+ "model": {
17
+ "inter_channels": 192,
18
+ "hidden_channels": 192,
19
+ "filter_channels": 768,
20
+ "n_heads": 2,
21
+ "n_layers": 6,
22
+ "kernel_size": 3,
23
+ "p_dropout": 0.1,
24
+ "resblock": "1",
25
+ "resblock_kernel_sizes": [3,7,11],
26
+ "resblock_dilation_sizes": [[1,3,5], [1,3,5], [1,3,5]],
27
+ "upsample_rates": [8,8,2,2],
28
+ "upsample_initial_channel": 512,
29
+ "upsample_kernel_sizes": [16,16,4,4],
30
+ "n_layers_q": 3,
31
+ "use_spectral_norm": false,
32
+ "gin_channels": 256
33
+ },
34
+ "speakers": ["\u7dbe\u5730\u5be7\u3005", "\u5728\u539f\u4e03\u6d77", "\u5c0f\u8338", "\u5510\u4e50\u541f"],
35
+ "symbols": ["_", ",", ".", "!", "?", "-", "~", "\u2026", "A", "E", "I", "N", "O", "Q", "U", "a", "b", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "r", "s", "t", "u", "v", "w", "y", "z", "\u0283", "\u02a7", "\u02a6", "\u026f", "\u0279", "\u0259", "\u0265", "\u207c", "\u02b0", "`", "\u2192", "\u2193", "\u2191", " "]
36
+ }
README.md ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: mit
3
+ title: vits-simple-api
4
+ sdk: gradio
5
+ python_version: 3.10.11
6
+ emoji: 👀
7
+ app_file: app.py
8
+ duplicated_from: Artrajz/vits-simple-api
9
+ ---
README_zh.md ADDED
@@ -0,0 +1,626 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <div class="title" align=center>
2
+ <h1>vits-simple-api</h1>
3
+ <div>Simply call the vits api</div>
4
+ <br/>
5
+ <br/>
6
+ <p>
7
+ <img src="https://img.shields.io/github/license/Artrajz/vits-simple-api">
8
+ <img src="https://img.shields.io/badge/python-3.9%7C3.10-green">
9
+ <a href="https://hub.docker.com/r/artrajz/vits-simple-api">
10
+ <img src="https://img.shields.io/docker/pulls/artrajz/vits-simple-api"></a>
11
+ </p>
12
+ <a href="https://github.com/Artrajz/vits-simple-api/blob/main/README.md">English</a>|<a href="https://github.com/Artrajz/vits-simple-api/blob/main/README_zh.md">中文文档</a>
13
+ <br/>
14
+ </div>
15
+
16
+
17
+
18
+
19
+ # Feature
20
+
21
+ - [x] VITS语音合成
22
+ - [x] VITS语音转换
23
+ - [x] HuBert-soft VITS模型
24
+ - [x] W2V2 VITS / emotional-vits维度情感模型
25
+ - [x] 加载多模型
26
+ - [x] 自动识别语言并处理,根据模型的cleaner设置语言类型识别的范围,支持自定义语言类型范围
27
+ - [x] 自定义默认参数
28
+ - [x] 长文本批处理
29
+ - [x] GPU加速推理
30
+ - [x] SSML语音合成标记语言(完善中...)
31
+
32
+ <details><summary>Update Logs</summary><pre><code>
33
+ <h2>2023.6.5</h2>
34
+ <p>更换音频编码使用的库,增加flac格式,增加中文对读简单数学公式的支持</p>
35
+ <h2>2023.5.24</h2>
36
+ <p>添加dimensional_emotion api,从文件夹加载多个npy文件,Docker添加了Linux/ARM64和Linux/ARM64/v8平台</p>
37
+ <h2>2023.5.15</h2>
38
+ <p>增加english_cleaner,需要额外安装espeak才能使用</p>
39
+ <h2>2023.5.12</h2>
40
+ <p>增加ssml支持,但仍需完善。重构部分功能,hubert_vits中的speaker_id改为id</p>
41
+ <h2>2023.5.2</h2>
42
+ <p>增加w2v2-vits/emotional-vits模型支持,修改了speakers映射表并添加了对应模型支持的语言</p>
43
+ <h2>2023.4.23</h2>
44
+ <p>增加api key鉴权,默认禁用,需要在config.py中启用</p>
45
+ <h2>2023.4.17</h2>
46
+ <p>修改单语言的cleaner需要标注才会clean,增加GPU加速推理,但需要手动安装gpu推理环境</p>
47
+ <h2>2023.4.12</h2>
48
+ <p>项目由MoeGoe-Simple-API更名为vits-simple-api,支持长文本批处理,增加长文本分段阈值max</p>
49
+ <h2>2023.4.7</h2>
50
+ <p>增加配置文件可自定义默认参数,本次更新需要手动更新config.py,具体使用方法见config.py</p>
51
+ <h2>2023.4.6</h2>
52
+ <p>加入自动识别语种选项auto,lang参数默认修改为auto,自动识别仍有一定缺陷,请自行选择</p>
53
+ <p>统一POST请求类型为multipart/form-data</p>
54
+ </code></pre></details>
55
+
56
+
57
+
58
+ ## demo
59
+
60
+ [![Hugging Face Spaces](https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Spaces-blue)](https://huggingface.co/spaces/Artrajz/vits-simple-api)
61
+
62
+ 注意不同的id支持的语言可能有所不同。[speakers](https://artrajz-vits-simple-api.hf.space/voice/speakers)
63
+
64
+
65
+ - `https://artrajz-vits-simple-api.hf.space/voice/vits?text=你好,こんにちは&id=164`
66
+ - `https://artrajz-vits-simple-api.hf.space/voice/vits?text=你知道1+1=几吗?我觉得1+1≠3&id=164&lang=zh`
67
+ - `https://artrajz-vits-simple-api.hf.space/voice/vits?text=Difficult the first time, easy the second.&id=4`
68
+ - 激动:`https://artrajz-vits-simple-api.hf.space/voice/w2v2-vits?text=こんにちは&id=3&emotion=111`
69
+ - 小声:`https://artrajz-vits-simple-api.hf.space/voice/w2v2-vits?text=こんにちは&id=3&emotion=2077`
70
+
71
+ https://user-images.githubusercontent.com/73542220/237995061-c1f25b4e-dd86-438a-9363-4bb1fe65b425.mov
72
+
73
+ # 部署
74
+
75
+ ## Docker部署
76
+
77
+ ### 镜像拉取脚本
78
+
79
+ ```
80
+ bash -c "$(wget -O- https://raw.githubusercontent.com/Artrajz/vits-simple-api/main/vits-simple-api-installer-latest.sh)"
81
+ ```
82
+
83
+ - 目前docker镜像支持的平台`linux/amd64,linux/arm64`
84
+ - 在拉取完成后,需要导入VITS模型才能使用,请根据以下步骤导入模型。
85
+
86
+ ### 下载VITS模型
87
+
88
+ 将模型放入`/usr/local/vits-simple-api/Model`
89
+
90
+ <details><summary>Folder structure</summary><pre><code>
91
+ │ hubert-soft-0d54a1f4.pt
92
+ │ model.onnx
93
+ │ model.yaml
94
+ ├─g
95
+ │ config.json
96
+ │ G_953000.pth
97
+
98
+ ├─louise
99
+ │ 360_epochs.pth
100
+ │ config.json
101
+
102
+ ├─Nene_Nanami_Rong_Tang
103
+ │ 1374_epochs.pth
104
+ │ config.json
105
+
106
+ ├─Zero_no_tsukaima
107
+ │ 1158_epochs.pth
108
+ │ config.json
109
+
110
+ └─npy
111
+ 25ecb3f6-f968-11ed-b094-e0d4e84af078.npy
112
+ all_emotions.npy
113
+ </code></pre></details>
114
+
115
+
116
+
117
+ ### 修改模型路径
118
+
119
+ Modify in `/usr/local/vits-simple-api/config.py`
120
+
121
+ <details><summary>config.py</summary><pre><code>
122
+ # 在此填写模型路径
123
+ MODEL_LIST = [
124
+ # VITS
125
+ [ABS_PATH + "/Model/Nene_Nanami_Rong_Tang/1374_epochs.pth", ABS_PATH + "/Model/Nene_Nanami_Rong_Tang/config.json"],
126
+ [ABS_PATH + "/Model/Zero_no_tsukaima/1158_epochs.pth", ABS_PATH + "/Model/Zero_no_tsukaima/config.json"],
127
+ [ABS_PATH + "/Model/g/G_953000.pth", ABS_PATH + "/Model/g/config.json"],
128
+ # HuBert-VITS (Need to configure HUBERT_SOFT_MODEL)
129
+ [ABS_PATH + "/Model/louise/360_epochs.pth", ABS_PATH + "/Model/louise/config.json"],
130
+ # W2V2-VITS (Need to configure DIMENSIONAL_EMOTION_NPY)
131
+ [ABS_PATH + "/Model/w2v2-vits/1026_epochs.pth", ABS_PATH + "/Model/w2v2-vits/config.json"],
132
+ ]
133
+ # hubert-vits: hubert soft 编码器
134
+ HUBERT_SOFT_MODEL = ABS_PATH + "/Model/hubert-soft-0d54a1f4.pt"
135
+ # w2v2-vits: Dimensional emotion npy file
136
+ # 加载单独的npy: ABS_PATH+"/all_emotions.npy
137
+ # 加载多个npy: [ABS_PATH + "/emotions1.npy", ABS_PATH + "/emotions2.npy"]
138
+ # 从文件夹里加载npy: ABS_PATH + "/Model/npy"
139
+ DIMENSIONAL_EMOTION_NPY = ABS_PATH + "/Model/npy"
140
+ # w2v2-vits: 需要在同一路径下有model.onnx和model.yaml
141
+ DIMENSIONAL_EMOTION_MODEL = ABS_PATH + "/Model/model.yaml"
142
+ </code></pre></details>
143
+
144
+
145
+
146
+ ### 启动
147
+
148
+ `docker compose up -d`
149
+
150
+ 或者重新执行拉取脚本
151
+
152
+ ### 镜像更新
153
+
154
+ 重新执行docker镜像拉取脚本即可
155
+
156
+ ## 虚拟环境部署
157
+
158
+ ### Clone
159
+
160
+ `git clone https://github.com/Artrajz/vits-simple-api.git`
161
+
162
+ ### 下载python依赖
163
+
164
+ 推荐使用python的虚拟环境,python版本 >= 3.9
165
+
166
+ `pip install -r requirements.txt`
167
+
168
+ windows下可能安装不了fasttext,可以用以下命令安装,附[wheels下载地址](https://www.lfd.uci.edu/~gohlke/pythonlibs/#fasttext)
169
+
170
+ ```
171
+ #python3.10 win_amd64
172
+ pip install https://github.com/Artrajz/archived/raw/main/fasttext/fasttext-0.9.2-cp310-cp310-win_amd64.whl
173
+ #python3.9 win_amd64
174
+ pip install https://github.com/Artrajz/archived/raw/main/fasttext/fasttext-0.9.2-cp39-cp39-win_amd64.whl
175
+ ```
176
+
177
+ ### 下载VITS模型
178
+
179
+ 将模型放入 `/path/to/vits-simple-api/Model`
180
+
181
+ <details><summary>文件夹结构</summary><pre><code>
182
+ ├─g
183
+ │ config.json
184
+ │ G_953000.pth
185
+
186
+ ├─louise
187
+ │ 360_epochs.pth
188
+ │ config.json
189
+ │ hubert-soft-0d54a1f4.pt
190
+
191
+ ├─Nene_Nanami_Rong_Tang
192
+ │ 1374_epochs.pth
193
+ │ config.json
194
+
195
+ └─Zero_no_tsukaima
196
+ 1158_epochs.pth
197
+ config.json
198
+ </code></pre></details>
199
+
200
+ ### 修改模型路径
201
+
202
+ 在 `/path/to/vits-simple-api/config.py` 修改
203
+
204
+ <details><summary>config.py</summary><pre><code>
205
+ # 在此填写模型路径
206
+ MODEL_LIST = [
207
+ # VITS
208
+ [ABS_PATH + "/Model/Nene_Nanami_Rong_Tang/1374_epochs.pth", ABS_PATH + "/Model/Nene_Nanami_Rong_Tang/config.json"],
209
+ [ABS_PATH + "/Model/Zero_no_tsukaima/1158_epochs.pth", ABS_PATH + "/Model/Zero_no_tsukaima/config.json"],
210
+ [ABS_PATH + "/Model/g/G_953000.pth", ABS_PATH + "/Model/g/config.json"],
211
+ # HuBert-VITS (Need to configure HUBERT_SOFT_MODEL)
212
+ [ABS_PATH + "/Model/louise/360_epochs.pth", ABS_PATH + "/Model/louise/config.json"],
213
+ # W2V2-VITS (Need to configure DIMENSIONAL_EMOTION_NPY)
214
+ [ABS_PATH + "/Model/w2v2-vits/1026_epochs.pth", ABS_PATH + "/Model/w2v2-vits/config.json"],
215
+ ]
216
+ # hubert-vits: hubert soft 编码器
217
+ HUBERT_SOFT_MODEL = ABS_PATH + "/Model/hubert-soft-0d54a1f4.pt"
218
+ # w2v2-vits: Dimensional emotion npy file
219
+ # 加载单独的npy: ABS_PATH+"/all_emotions.npy
220
+ # 加载多个npy: [ABS_PATH + "/emotions1.npy", ABS_PATH + "/emotions2.npy"]
221
+ # 从文件夹里加载npy: ABS_PATH + "/Model/npy"
222
+ DIMENSIONAL_EMOTION_NPY = ABS_PATH + "/Model/npy"
223
+ # w2v2-vits: 需要在同一路径下有model.onnx和model.yaml
224
+ DIMENSIONAL_EMOTION_MODEL = ABS_PATH + "/Model/model.yaml"
225
+ </code></pre></details>
226
+
227
+
228
+
229
+ ### 启动
230
+
231
+ `python app.py`
232
+
233
+ # GPU 加速
234
+
235
+ ## windows
236
+
237
+ ### 安装CUDA
238
+
239
+ 查看显卡最高支持CUDA的版本
240
+
241
+ ```
242
+ nvidia-smi
243
+ ```
244
+
245
+ 以CUDA11.7为例,[官网](https://developer.nvidia.com/cuda-11-7-0-download-archive?target_os=Windows&target_arch=x86_64&target_version=10&target_type=exe_local)
246
+
247
+ ### 安装GPU版pytorch
248
+
249
+ CUDA11.7对应的pytorch是用这个命令安装
250
+
251
+ ```
252
+ pip3 install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cu117
253
+ ```
254
+
255
+ 对应版本的命令可以在[官网](https://pytorch.org/get-started/locally/)找到
256
+
257
+ ## Linux
258
+
259
+ 安装过程类似,但我没有相应的环境所以没办法测试
260
+
261
+ # Openjtalk安装问题
262
+
263
+ 如果你是arm64架构的平台,由于pypi官网上没有arm64对应的whl,可能安装会出现一些问题,你可以使用我构建的whl来安装
264
+
265
+ ```
266
+ pip install openjtalk==0.3.0.dev2 --index-url https://pypi.artrajz.cn/simple
267
+ ```
268
+
269
+ 或者是自己手动构建一个whl,可以根据[教程](https://artrajz.cn/index.php/archives/167/)来构建
270
+
271
+ # API
272
+
273
+ ## GET
274
+
275
+ #### speakers list
276
+
277
+ - GET http://127.0.0.1:23456/voice/speakers
278
+
279
+ 返回id对应角色的映射表
280
+
281
+ #### voice vits
282
+
283
+ - GET http://127.0.0.1:23456/voice/vits?text=text
284
+
285
+ 其他参数不指定时均为默认值
286
+
287
+ - GET http://127.0.0.1:23456/voice/vits?text=[ZH]text[ZH][JA]text[JA]&lang=mix
288
+
289
+ lang=mix时文本要标注
290
+
291
+ - GET http://127.0.0.1:23456/voice/vits?text=text&id=142&format=wav&lang=zh&length=1.4
292
+
293
+ 文本为text,角色id为142,音频格式为wav,文本语言为zh,语音长度为1.4,其余参数默认
294
+
295
+ #### check
296
+
297
+ - GET http://127.0.0.1:23456/voice/check?id=0&model=vits
298
+
299
+ ## POST
300
+
301
+ - python
302
+
303
+ ```python
304
+ import re
305
+ import requests
306
+ import os
307
+ import random
308
+ import string
309
+ from requests_toolbelt.multipart.encoder import MultipartEncoder
310
+
311
+ abs_path = os.path.dirname(__file__)
312
+ base = "http://127.0.0.1:23456"
313
+
314
+
315
+ # 映射表
316
+ def voice_speakers():
317
+ url = f"{base}/voice/speakers"
318
+
319
+ res = requests.post(url=url)
320
+ json = res.json()
321
+ for i in json:
322
+ print(i)
323
+ for j in json[i]:
324
+ print(j)
325
+ return json
326
+
327
+
328
+ # 语音合成 voice vits
329
+ def voice_vits(text, id=0, format="wav", lang="auto", length=1, noise=0.667, noisew=0.8, max=50):
330
+ fields = {
331
+ "text": text,
332
+ "id": str(id),
333
+ "format": format,
334
+ "lang": lang,
335
+ "length": str(length),
336
+ "noise": str(noise),
337
+ "noisew": str(noisew),
338
+ "max": str(max)
339
+ }
340
+ boundary = '----VoiceConversionFormBoundary' + ''.join(random.sample(string.ascii_letters + string.digits, 16))
341
+
342
+ m = MultipartEncoder(fields=fields, boundary=boundary)
343
+ headers = {"Content-Type": m.content_type}
344
+ url = f"{base}/voice"
345
+
346
+ res = requests.post(url=url, data=m, headers=headers)
347
+ fname = re.findall("filename=(.+)", res.headers["Content-Disposition"])[0]
348
+ path = f"{abs_path}/{fname}"
349
+
350
+ with open(path, "wb") as f:
351
+ f.write(res.content)
352
+ print(path)
353
+ return path
354
+
355
+
356
+ # 语音转换 hubert-vits
357
+ def voice_hubert_vits(upload_path, id, format="wav", length=1, noise=0.667, noisew=0.8):
358
+ upload_name = os.path.basename(upload_path)
359
+ upload_type = f'audio/{upload_name.split(".")[1]}' # wav,ogg
360
+
361
+ with open(upload_path, 'rb') as upload_file:
362
+ fields = {
363
+ "upload": (upload_name, upload_file, upload_type),
364
+ "id": str(id),
365
+ "format": format,
366
+ "length": str(length),
367
+ "noise": str(noise),
368
+ "noisew": str(noisew),
369
+ }
370
+ boundary = '----VoiceConversionFormBoundary' + ''.join(random.sample(string.ascii_letters + string.digits, 16))
371
+
372
+ m = MultipartEncoder(fields=fields, boundary=boundary)
373
+ headers = {"Content-Type": m.content_type}
374
+ url = f"{base}/voice/hubert-vits"
375
+
376
+ res = requests.post(url=url, data=m, headers=headers)
377
+ fname = re.findall("filename=(.+)", res.headers["Content-Disposition"])[0]
378
+ path = f"{abs_path}/{fname}"
379
+
380
+ with open(path, "wb") as f:
381
+ f.write(res.content)
382
+ print(path)
383
+ return path
384
+
385
+
386
+ # 维度情感模型 w2v2-vits
387
+ def voice_w2v2_vits(text, id=0, format="wav", lang="auto", length=1, noise=0.667, noisew=0.8, max=50, emotion=0):
388
+ fields = {
389
+ "text": text,
390
+ "id": str(id),
391
+ "format": format,
392
+ "lang": lang,
393
+ "length": str(length),
394
+ "noise": str(noise),
395
+ "noisew": str(noisew),
396
+ "max": str(max),
397
+ "emotion": str(emotion)
398
+ }
399
+ boundary = '----VoiceConversionFormBoundary' + ''.join(random.sample(string.ascii_letters + string.digits, 16))
400
+
401
+ m = MultipartEncoder(fields=fields, boundary=boundary)
402
+ headers = {"Content-Type": m.content_type}
403
+ url = f"{base}/voice/w2v2-vits"
404
+
405
+ res = requests.post(url=url, data=m, headers=headers)
406
+ fname = re.findall("filename=(.+)", res.headers["Content-Disposition"])[0]
407
+ path = f"{abs_path}/{fname}"
408
+
409
+ with open(path, "wb") as f:
410
+ f.write(res.content)
411
+ print(path)
412
+ return path
413
+
414
+
415
+ # 语音转换 同VITS模型内角色之间的音色转换
416
+ def voice_conversion(upload_path, original_id, target_id):
417
+ upload_name = os.path.basename(upload_path)
418
+ upload_type = f'audio/{upload_name.split(".")[1]}' # wav,ogg
419
+
420
+ with open(upload_path, 'rb') as upload_file:
421
+ fields = {
422
+ "upload": (upload_name, upload_file, upload_type),
423
+ "original_id": str(original_id),
424
+ "target_id": str(target_id),
425
+ }
426
+ boundary = '----VoiceConversionFormBoundary' + ''.join(random.sample(string.ascii_letters + string.digits, 16))
427
+ m = MultipartEncoder(fields=fields, boundary=boundary)
428
+
429
+ headers = {"Content-Type": m.content_type}
430
+ url = f"{base}/voice/conversion"
431
+
432
+ res = requests.post(url=url, data=m, headers=headers)
433
+
434
+ fname = re.findall("filename=(.+)", res.headers["Content-Disposition"])[0]
435
+ path = f"{abs_path}/{fname}"
436
+
437
+ with open(path, "wb") as f:
438
+ f.write(res.content)
439
+ print(path)
440
+ return path
441
+
442
+
443
+ def voice_ssml(ssml):
444
+ fields = {
445
+ "ssml": ssml,
446
+ }
447
+ boundary = '----VoiceConversionFormBoundary' + ''.join(random.sample(string.ascii_letters + string.digits, 16))
448
+
449
+ m = MultipartEncoder(fields=fields, boundary=boundary)
450
+ headers = {"Content-Type": m.content_type}
451
+ url = f"{base}/voice/ssml"
452
+
453
+ res = requests.post(url=url, data=m, headers=headers)
454
+ fname = re.findall("filename=(.+)", res.headers["Content-Disposition"])[0]
455
+ path = f"{abs_path}/{fname}"
456
+
457
+ with open(path, "wb") as f:
458
+ f.write(res.content)
459
+ print(path)
460
+ return path
461
+
462
+ def voice_dimensional_emotion(upload_path):
463
+ upload_name = os.path.basename(upload_path)
464
+ upload_type = f'audio/{upload_name.split(".")[1]}' # wav,ogg
465
+
466
+ with open(upload_path, 'rb') as upload_file:
467
+ fields = {
468
+ "upload": (upload_name, upload_file, upload_type),
469
+ }
470
+ boundary = '----VoiceConversionFormBoundary' + ''.join(random.sample(string.ascii_letters + string.digits, 16))
471
+
472
+ m = MultipartEncoder(fields=fields, boundary=boundary)
473
+ headers = {"Content-Type": m.content_type}
474
+ url = f"{base}/voice/dimension-emotion"
475
+
476
+ res = requests.post(url=url, data=m, headers=headers)
477
+ fname = re.findall("filename=(.+)", res.headers["Content-Disposition"])[0]
478
+ path = f"{abs_path}/{fname}"
479
+
480
+ with open(path, "wb") as f:
481
+ f.write(res.content)
482
+ print(path)
483
+ return path
484
+ ```
485
+
486
+ ## API KEY
487
+
488
+ 在config.py中设置`API_KEY_ENABLED = True`以启用,api key填写:`API_KEY = "api-key"`。
489
+
490
+ 启用后,GET请求中使用需要增加参数api_key,POST请求中使用需要在header中添加参数`X-API-KEY`。
491
+
492
+ # Parameter
493
+
494
+ ## VITS语音合成
495
+
496
+ | Name | Parameter | Is must | Default | Type | Instruction |
497
+ | ------------- | --------- | ------- | ------- | ----- | ------------------------------------------------------------ |
498
+ | 合成文本 | text | true | | str | |
499
+ | 角色id | id | false | 0 | int | |
500
+ | 音频格式 | format | false | wav | str | 支持wav,ogg,silk,mp3,flac |
501
+ | 文本语言 | lang | false | auto | str | auto为自动识别语言模式,也是默认模式。lang=mix时,文本应该用[ZH] 或 [JA] 包裹。方言无法自动识别。 |
502
+ | 语音长度/语速 | length | false | 1.0 | float | 调节语音长度,相当于调节语速,该数值越大语速越慢 |
503
+ | 噪声 | noise | false | 0.667 | float | |
504
+ | 噪声偏差 | noisew | false | 0.8 | float | |
505
+ | 分段阈值 | max | false | 50 | int | 按标点符号分段,加起来大于max时为一段文本。max<=0表示不分段。 |
506
+
507
+ ## VITS 语音转换
508
+
509
+ | Name | Parameter | Is must | Default | Type | Instruction |
510
+ | ---------- | ----------- | ------- | ------- | ---- | ---------------------- |
511
+ | 上传音频 | upload | true | | file | wav or ogg |
512
+ | 源角色id | original_id | true | | int | 上传文件所使用的角色id |
513
+ | 目标角色id | target_id | true | | int | 要转换的目标角色id |
514
+
515
+ ## HuBert-VITS 语音转换
516
+
517
+ | Name | Parameter | Is must | Default | Type | Instruction |
518
+ | ------------- | --------- | ------- | ------- | ----- | ------------------------------------------------ |
519
+ | 上传音频 | upload | true | | file | |
520
+ | 目标角色id | id | true | | int | |
521
+ | 音频格式 | format | true | | str | wav,ogg,silk |
522
+ | 语音长度/语速 | length | true | | float | 调节语音长度,相当于调节语速,该数值越大语速越慢 |
523
+ | 噪声 | noise | true | | float | |
524
+ | 噪声偏差 | noisew | true | | float | |
525
+
526
+ ## Dimensional emotion
527
+
528
+ | Name | Parameter | Is must | Default | Type | Instruction |
529
+ | -------- | --------- | ------- | ------- | ---- | ----------------------------- |
530
+ | 上传音频 | upload | true | | file | 返回存储维度情感向量的npy文件 |
531
+
532
+ ## W2V2-VITS
533
+
534
+ | Name | Parameter | Is must | Default | Type | Instruction |
535
+ | ------------- | --------- | ------- | ------- | ----- | ------------------------------------------------------------ |
536
+ | 合成文本 | text | true | | str | |
537
+ | 角色id | id | false | 0 | int | |
538
+ | 音频格式 | format | false | wav | str | 支持wav,ogg,silk,mp3,flac |
539
+ | 文本语言 | lang | false | auto | str | auto为自动识别语言模式,也是默认模式。lang=mix时,文本应该用[ZH] 或 [JA] 包裹。方言无法自动识别。 |
540
+ | 语音长度/语速 | length | false | 1.0 | float | 调节语音长度,相当于调节语速,该数值越大语速越慢 |
541
+ | 噪声 | noise | false | 0.667 | float | |
542
+ | 噪声偏差 | noisew | false | 0.8 | float | |
543
+ | 分段阈值 | max | false | 50 | int | 按标点符号分段,加起来大于max时为一段文本。max<=0表示不分段。 |
544
+ | 维度情感 | emotion | false | 0 | int | 范围取决于npy情感参考文件,如[innnky](https://huggingface.co/spaces/innnky/nene-emotion/tree/main)的all_emotions.npy模型范围是0-5457 |
545
+
546
+ ## SSML语音合成标记语言
547
+ 目前支持的元素与属性
548
+
549
+ `speak`元素
550
+
551
+ | Attribute | Description | Is must |
552
+ | --------- | ------------------------------------------------------------ | ------- |
553
+ | id | 默认值从`config.py`中读取 | false |
554
+ | lang | 默认值从`config.py`中读取 | false |
555
+ | length | 默认值从`config.py`中读取 | false |
556
+ | noise | 默认值从`config.py`中读取 | false |
557
+ | noisew | 默认值从`config.py`中读取 | false |
558
+ | max | 按标点符号分段,加起来大于max时为一段文本。max<=0表示不分段,这里默认为0。 | false |
559
+ | model | 默认为vits,可选`w2v2-vits`,`emotion-vits` | false |
560
+ | emotion | 只有用`w2v2-vits`或`emotion-vits`时`emotion`才生效,范围取决于npy情感参考文件 | false |
561
+
562
+ `voice`元素
563
+
564
+ 优先级大于`speak`
565
+
566
+ | Attribute | Description | Is must |
567
+ | --------- | ------------------------------------------------------------ | ------- |
568
+ | id | 默认值从`config.py`中读取 | false |
569
+ | lang | 默认值从`config.py`中读取 | false |
570
+ | length | 默认值从`config.py`中读取 | false |
571
+ | noise | 默认值从`config.py`中读取 | false |
572
+ | noisew | 默认值从`config.py`中读取 | false |
573
+ | max | 按标点符号分段,加起来大于max时为一段文本。max<=0表示不分段,这里默认为0。 | false |
574
+ | model | 默认为vits,可选`w2v2-vits`,`emotion-vits` | false |
575
+ | emotion | 只有用`w2v2-vits`或`emotion-vits`时`emotion`才会生效 | false |
576
+
577
+ `break`元素
578
+
579
+ | Attribute | Description | Is must |
580
+ | --------- | ------------------------------------------------------------ | ------- |
581
+ | strength | x-weak,weak,medium(默认值),strong,x-strong | false |
582
+ | time | 暂停的绝对持续时间,以秒为单位(例如 `2s`)或以毫秒为单位(例如 `500ms`)。 有效值的范围为 0 到 5000 毫秒。 如果设置的值大于支持的最大值,则服务将使用 `5000ms`。 如果设置了 `time` 属性,则会忽略 `strength` 属性。 | false |
583
+
584
+ | Strength | Relative Duration |
585
+ | :------- | :---------------- |
586
+ | x-weak | 250 毫秒 |
587
+ | weak | 500 毫秒 |
588
+ | Medium | 750 毫秒 |
589
+ | Strong | 1000 毫秒 |
590
+ | x-strong | 1250 毫秒 |
591
+
592
+ 示例
593
+
594
+ ```xml
595
+ <speak lang="zh" format="mp3" length="1.2">
596
+ <voice id="92" >这几天心里颇不宁静。</voice>
597
+ <voice id="125">今晚在院子里坐着乘凉,忽然想起日日走过的荷塘,在这满月的光里,总该另有一番样子吧。</voice>
598
+ <voice id="142">月亮渐渐地升高了,墙外马路上孩子们的欢笑,已经听不见了;</voice>
599
+ <voice id="98">妻在屋里拍着闰儿,迷迷糊糊地哼着眠歌。</voice>
600
+ <voice id="120">我悄悄地披了大衫,带上门出去。</voice><break time="2s"/>
601
+ <voice id="121">沿着荷塘,是一条曲折的小煤屑路。</voice>
602
+ <voice id="122">这是一条幽僻的路;白天也少人走,夜晚更加寂寞。</voice>
603
+ <voice id="123">荷塘四面,长着许多树,蓊蓊郁郁的。</voice>
604
+ <voice id="124">路的一旁,是些杨柳,和一些不知道名字的树。</voice>
605
+ <voice id="125">没有月光的晚上,这路上阴森森的,有些怕人。</voice>
606
+ <voice id="126">今晚却很好,虽然月光也还是淡淡的。</voice><break time="2s"/>
607
+ <voice id="127">路上只我一个人,背着手踱着。</voice>
608
+ <voice id="128">这一片天地好像是我的;我也像超出了平常的自己,到了另一个世界里。</voice>
609
+ <voice id="129">我爱热闹,也爱冷静;<break strength="x-weak"/>爱群居,也爱独处。</voice>
610
+ <voice id="130">像今晚上,一个人在这苍茫的月下,什么都可以想,什么都可以不想,便觉是个自由的人。</voice>
611
+ <voice id="131">白天里��定要做的事,一定要说的话,现在都可不理。</voice>
612
+ <voice id="132">这是独处的妙处,我且受用这无边的荷香月色好了。</voice>
613
+ </speak>
614
+ ```
615
+
616
+ # 交流平台
617
+
618
+ 现在只有 [Q群](https://qm.qq.com/cgi-bin/qm/qr?k=-1GknIe4uXrkmbDKBGKa1aAUteq40qs_&jump_from=webapi&authKey=x5YYt6Dggs1ZqWxvZqvj3fV8VUnxRyXm5S5Kzntc78+Nv3iXOIawplGip9LWuNR/)
619
+
620
+ # 鸣谢
621
+
622
+ - vits:https://github.com/jaywalnut310/vits
623
+ - MoeGoe:https://github.com/CjangCjengh/MoeGoe
624
+ - emotional-vits:https://github.com/innnky/emotional-vits
625
+ - vits-uma-genshin-honkai:https://huggingface.co/spaces/zomehwh/vits-uma-genshin-honkai
626
+
app.py ADDED
@@ -0,0 +1,474 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import logging
3
+ import time
4
+ import logzero
5
+ import uuid
6
+ from flask import Flask, request, send_file, jsonify, make_response, render_template
7
+ from werkzeug.utils import secure_filename
8
+ from flask_apscheduler import APScheduler
9
+ from functools import wraps
10
+ from utils.utils import clean_folder, check_is_none
11
+ from utils.merge import merge_model
12
+ from io import BytesIO
13
+
14
+ app = Flask(__name__)
15
+ app.config.from_pyfile("config.py")
16
+
17
+ scheduler = APScheduler()
18
+ scheduler.init_app(app)
19
+ if app.config.get("CLEAN_INTERVAL_SECONDS", 3600) > 0:
20
+ scheduler.start()
21
+
22
+ logzero.loglevel(logging.WARNING)
23
+ logger = logging.getLogger("vits-simple-api")
24
+ level = app.config.get("LOGGING_LEVEL", "DEBUG")
25
+ level_dict = {'DEBUG': logging.DEBUG, 'INFO': logging.INFO, 'WARNING': logging.WARNING, 'ERROR': logging.ERROR,
26
+ 'CRITICAL': logging.CRITICAL}
27
+ logging.basicConfig(level=level_dict[level])
28
+ logging.getLogger('numba').setLevel(logging.WARNING)
29
+ logging.getLogger("langid.langid").setLevel(logging.INFO)
30
+ logging.getLogger("apscheduler.scheduler").setLevel(logging.INFO)
31
+
32
+ tts = merge_model(app.config["MODEL_LIST"])
33
+
34
+ if not os.path.exists(app.config['UPLOAD_FOLDER']):
35
+ os.makedirs(app.config['UPLOAD_FOLDER'], exist_ok=True)
36
+
37
+ if not os.path.exists(app.config['CACHE_PATH']):
38
+ os.makedirs(app.config['CACHE_PATH'], exist_ok=True)
39
+
40
+
41
+ def require_api_key(func):
42
+ @wraps(func)
43
+ def check_api_key(*args, **kwargs):
44
+ if not app.config.get('API_KEY_ENABLED', False):
45
+ return func(*args, **kwargs)
46
+ else:
47
+ api_key = request.args.get('api_key') or request.headers.get('X-API-KEY')
48
+ if api_key and api_key == app.config['API_KEY']:
49
+ return func(*args, **kwargs)
50
+ else:
51
+ return make_response(jsonify({"status": "error", "message": "Invalid API Key"}), 401)
52
+
53
+ return check_api_key
54
+
55
+
56
+ @app.route('/', methods=["GET", "POST"])
57
+ def index():
58
+ kwargs = {
59
+ "speakers": tts.voice_speakers,
60
+ "speakers_count": tts.speakers_count
61
+ }
62
+ return render_template("index.html", **kwargs)
63
+
64
+
65
+ @app.route('/voice/speakers', methods=["GET", "POST"])
66
+ def voice_speakers_api():
67
+ return jsonify(tts.voice_speakers)
68
+
69
+
70
+ @app.route('/voice', methods=["GET", "POST"])
71
+ @app.route('/voice/vits', methods=["GET", "POST"])
72
+ @require_api_key
73
+ def voice_vits_api():
74
+ try:
75
+ if request.method == "GET":
76
+ text = request.args.get("text", "")
77
+ id = int(request.args.get("id", app.config.get("ID", 0)))
78
+ format = request.args.get("format", app.config.get("FORMAT", "wav"))
79
+ lang = request.args.get("lang", app.config.get("LANG", "auto"))
80
+ length = float(request.args.get("length", app.config.get("LENGTH", 1)))
81
+ noise = float(request.args.get("noise", app.config.get("NOISE", 0.667)))
82
+ noisew = float(request.args.get("noisew", app.config.get("NOISEW", 0.8)))
83
+ max = int(request.args.get("max", app.config.get("MAX", 50)))
84
+ use_streaming = request.args.get('streaming', False, type=bool)
85
+ elif request.method == "POST":
86
+ content_type = request.headers.get('Content-Type')
87
+ if content_type == 'application/json':
88
+ data = request.get_json()
89
+ else:
90
+ data = request.form
91
+ text = data.get("text", "")
92
+ id = int(data.get("id", app.config.get("ID", 0)))
93
+ format = data.get("format", app.config.get("FORMAT", "wav"))
94
+ lang = data.get("lang", app.config.get("LANG", "auto"))
95
+ length = float(data.get("length", app.config.get("LENGTH", 1)))
96
+ noise = float(data.get("noise", app.config.get("NOISE", 0.667)))
97
+ noisew = float(data.get("noisew", app.config.get("NOISEW", 0.8)))
98
+ max = int(data.get("max", app.config.get("MAX", 50)))
99
+ use_streaming = request.form.get('streaming', False, type=bool)
100
+ except Exception as e:
101
+ logger.error(f"[VITS] {e}")
102
+ return make_response("parameter error", 400)
103
+
104
+ logger.info(f"[VITS] id:{id} format:{format} lang:{lang} length:{length} noise:{noise} noisew:{noisew}")
105
+ logger.info(f"[VITS] len:{len(text)} text:{text}")
106
+
107
+ if check_is_none(text):
108
+ logger.info(f"[VITS] text is empty")
109
+ return make_response(jsonify({"status": "error", "message": "text is empty"}), 400)
110
+
111
+ if check_is_none(id):
112
+ logger.info(f"[VITS] speaker id is empty")
113
+ return make_response(jsonify({"status": "error", "message": "speaker id is empty"}), 400)
114
+
115
+ if id < 0 or id >= tts.vits_speakers_count:
116
+ logger.info(f"[VITS] speaker id {id} does not exist")
117
+ return make_response(jsonify({"status": "error", "message": f"id {id} does not exist"}), 400)
118
+
119
+ # 校验模型是否支持输入的语言
120
+ speaker_lang = tts.voice_speakers["VITS"][id].get('lang')
121
+ if lang.upper() != "AUTO" and lang.upper() != "MIX" and len(speaker_lang) != 1 and lang not in speaker_lang:
122
+ logger.info(f"[VITS] lang \"{lang}\" is not in {speaker_lang}")
123
+ return make_response(jsonify({"status": "error", "message": f"lang '{lang}' is not in {speaker_lang}"}), 400)
124
+
125
+ # 如果配置文件中设置了LANGUAGE_AUTOMATIC_DETECT则强制将speaker_lang设置为LANGUAGE_AUTOMATIC_DETECT
126
+ if app.config.get("LANGUAGE_AUTOMATIC_DETECT", []) != []:
127
+ speaker_lang = app.config.get("LANGUAGE_AUTOMATIC_DETECT")
128
+
129
+ if use_streaming and format.upper() != "MP3":
130
+ format = "mp3"
131
+ logger.warning("Streaming response only supports MP3 format.")
132
+
133
+ fname = f"{str(uuid.uuid1())}.{format}"
134
+ file_type = f"audio/{format}"
135
+ task = {"text": text,
136
+ "id": id,
137
+ "format": format,
138
+ "length": length,
139
+ "noise": noise,
140
+ "noisew": noisew,
141
+ "max": max,
142
+ "lang": lang,
143
+ "speaker_lang": speaker_lang}
144
+
145
+ if app.config.get("SAVE_AUDIO", False):
146
+ logger.debug(f"[VITS] {fname}")
147
+
148
+ if use_streaming:
149
+ audio = tts.stream_vits_infer(task, fname)
150
+ response = make_response(audio)
151
+ response.headers['Content-Disposition'] = f'attachment; filename={fname}'
152
+ response.headers['Content-Type'] = file_type
153
+ return response
154
+ else:
155
+ t1 = time.time()
156
+ audio = tts.vits_infer(task, fname)
157
+ t2 = time.time()
158
+ logger.info(f"[VITS] finish in {(t2 - t1):.2f}s")
159
+ return send_file(path_or_file=audio, mimetype=file_type, download_name=fname)
160
+
161
+
162
+ @app.route('/voice/hubert-vits', methods=["POST"])
163
+ @require_api_key
164
+ def voice_hubert_api():
165
+ if request.method == "POST":
166
+ try:
167
+ voice = request.files['upload']
168
+ id = int(request.form.get("id"))
169
+ format = request.form.get("format", app.config.get("LANG", "auto"))
170
+ length = float(request.form.get("length", app.config.get("LENGTH", 1)))
171
+ noise = float(request.form.get("noise", app.config.get("NOISE", 0.667)))
172
+ noisew = float(request.form.get("noisew", app.config.get("NOISEW", 0.8)))
173
+ use_streaming = request.form.get('streaming', False, type=bool)
174
+ except Exception as e:
175
+ logger.error(f"[hubert] {e}")
176
+ return make_response("parameter error", 400)
177
+
178
+ logger.info(f"[hubert] id:{id} format:{format} length:{length} noise:{noise} noisew:{noisew}")
179
+
180
+ fname = secure_filename(str(uuid.uuid1()) + "." + voice.filename.split(".")[1])
181
+ voice.save(os.path.join(app.config['UPLOAD_FOLDER'], fname))
182
+
183
+ if check_is_none(id):
184
+ logger.info(f"[hubert] speaker id is empty")
185
+ return make_response(jsonify({"status": "error", "message": "speaker id is empty"}), 400)
186
+
187
+ if id < 0 or id >= tts.hubert_speakers_count:
188
+ logger.info(f"[hubert] speaker id {id} does not exist")
189
+ return make_response(jsonify({"status": "error", "message": f"id {id} does not exist"}), 400)
190
+
191
+ file_type = f"audio/{format}"
192
+ task = {"id": id,
193
+ "format": format,
194
+ "length": length,
195
+ "noise": noise,
196
+ "noisew": noisew,
197
+ "audio_path": os.path.join(app.config['UPLOAD_FOLDER'], fname)}
198
+
199
+ t1 = time.time()
200
+ audio = tts.hubert_vits_infer(task, fname)
201
+ t2 = time.time()
202
+ if app.config.get("SAVE_AUDIO", False):
203
+ logger.debug(f"[hubert] {fname}")
204
+ logger.info(f"[hubert] finish in {(t2 - t1):.2f}s")
205
+ if use_streaming:
206
+ audio = tts.generate_audio_chunks(audio)
207
+ response = make_response(audio)
208
+ response.headers['Content-Disposition'] = f'attachment; filename={fname}'
209
+ response.headers['Content-Type'] = file_type
210
+ return response
211
+ else:
212
+ return send_file(path_or_file=audio, mimetype=file_type, download_name=fname)
213
+
214
+
215
+ @app.route('/voice/w2v2-vits', methods=["GET", "POST"])
216
+ @require_api_key
217
+ def voice_w2v2_api():
218
+ try:
219
+ if request.method == "GET":
220
+ text = request.args.get("text", "")
221
+ id = int(request.args.get("id", app.config.get("ID", 0)))
222
+ format = request.args.get("format", app.config.get("FORMAT", "wav"))
223
+ lang = request.args.get("lang", app.config.get("LANG", "auto"))
224
+ length = float(request.args.get("length", app.config.get("LENGTH", 1)))
225
+ noise = float(request.args.get("noise", app.config.get("NOISE", 0.667)))
226
+ noisew = float(request.args.get("noisew", app.config.get("NOISEW", 0.8)))
227
+ max = int(request.args.get("max", app.config.get("MAX", 50)))
228
+ emotion = int(request.args.get("emotion", app.config.get("EMOTION", 0)))
229
+ use_streaming = request.args.get('streaming', False, type=bool)
230
+ elif request.method == "POST":
231
+ content_type = request.headers.get('Content-Type')
232
+ if content_type == 'application/json':
233
+ data = request.get_json()
234
+ else:
235
+ data = request.form
236
+ text = data.get("text", "")
237
+ id = int(data.get("id", app.config.get("ID", 0)))
238
+ format = data.get("format", app.config.get("FORMAT", "wav"))
239
+ lang = data.get("lang", app.config.get("LANG", "auto"))
240
+ length = float(data.get("length"))
241
+ noise = float(data.get("noise", app.config.get("NOISE", 0.667)))
242
+ noisew = float(data.get("noisew", app.config.get("NOISEW", 0.8)))
243
+ max = int(data.get("max", app.config.get("MAX", 50)))
244
+ emotion = int(data.get("emotion", app.config.get("EMOTION", 0)))
245
+ use_streaming = request.form.get('streaming', False, type=bool)
246
+ except Exception as e:
247
+ logger.error(f"[w2v2] {e}")
248
+ return make_response(f"parameter error", 400)
249
+
250
+ logger.info(f"[w2v2] id:{id} format:{format} lang:{lang} "
251
+ f"length:{length} noise:{noise} noisew:{noisew} emotion:{emotion}")
252
+ logger.info(f"[w2v2] len:{len(text)} text:{text}")
253
+
254
+ if check_is_none(text):
255
+ logger.info(f"[w2v2] text is empty")
256
+ return make_response(jsonify({"status": "error", "message": "text is empty"}), 400)
257
+
258
+ if check_is_none(id):
259
+ logger.info(f"[w2v2] speaker id is empty")
260
+ return make_response(jsonify({"status": "error", "message": "speaker id is empty"}), 400)
261
+
262
+ if id < 0 or id >= tts.w2v2_speakers_count:
263
+ logger.info(f"[w2v2] speaker id {id} does not exist")
264
+ return make_response(jsonify({"status": "error", "message": f"id {id} does not exist"}), 400)
265
+
266
+ # 校验模型是否支持输入的语言
267
+ speaker_lang = tts.voice_speakers["W2V2-VITS"][id].get('lang')
268
+ if lang.upper() != "AUTO" and lang.upper() != "MIX" and len(speaker_lang) != 1 and lang not in speaker_lang:
269
+ logger.info(f"[w2v2] lang \"{lang}\" is not in {speaker_lang}")
270
+ return make_response(jsonify({"status": "error", "message": f"lang '{lang}' is not in {speaker_lang}"}), 400)
271
+
272
+ # 如果配置文件中设置了LANGUAGE_AUTOMATIC_DETECT则强制将speaker_lang设置为LANGUAGE_AUTOMATIC_DETECT
273
+ if app.config.get("LANGUAGE_AUTOMATIC_DETECT", []) != []:
274
+ speaker_lang = app.config.get("LANGUAGE_AUTOMATIC_DETECT")
275
+
276
+ if use_streaming and format.upper() != "MP3":
277
+ format = "mp3"
278
+ logger.warning("Streaming response only supports MP3 format.")
279
+
280
+ fname = f"{str(uuid.uuid1())}.{format}"
281
+ file_type = f"audio/{format}"
282
+ task = {"text": text,
283
+ "id": id,
284
+ "format": format,
285
+ "length": length,
286
+ "noise": noise,
287
+ "noisew": noisew,
288
+ "max": max,
289
+ "lang": lang,
290
+ "emotion": emotion,
291
+ "speaker_lang": speaker_lang}
292
+
293
+ t1 = time.time()
294
+ audio = tts.w2v2_vits_infer(task, fname)
295
+ t2 = time.time()
296
+ if app.config.get("SAVE_AUDIO", False):
297
+ logger.debug(f"[W2V2] {fname}")
298
+ if use_streaming:
299
+ audio = tts.generate_audio_chunks(audio)
300
+ response = make_response(audio)
301
+ response.headers['Content-Disposition'] = f'attachment; filename={fname}'
302
+ response.headers['Content-Type'] = file_type
303
+ return response
304
+ else:
305
+ logger.info(f"[w2v2] finish in {(t2 - t1):.2f}s")
306
+ return send_file(path_or_file=audio, mimetype=file_type, download_name=fname)
307
+
308
+
309
+ @app.route('/voice/conversion', methods=["POST"])
310
+ @app.route('/voice/vits/conversion', methods=["POST"])
311
+ @require_api_key
312
+ def vits_voice_conversion_api():
313
+ if request.method == "POST":
314
+ try:
315
+ voice = request.files['upload']
316
+ original_id = int(request.form["original_id"])
317
+ target_id = int(request.form["target_id"])
318
+ format = request.form.get("format", voice.filename.split(".")[1])
319
+ use_streaming = request.form.get('streaming', False, type=bool)
320
+ except Exception as e:
321
+ logger.error(f"[vits_voice_convertsion] {e}")
322
+ return make_response("parameter error", 400)
323
+
324
+ logger.info(f"[vits_voice_convertsion] orginal_id:{original_id} target_id:{target_id}")
325
+ fname = secure_filename(str(uuid.uuid1()) + "." + voice.filename.split(".")[1])
326
+ audio_path = os.path.join(app.config['UPLOAD_FOLDER'], fname)
327
+ voice.save(audio_path)
328
+ file_type = f"audio/{format}"
329
+ task = {"audio_path": audio_path,
330
+ "original_id": original_id,
331
+ "target_id": target_id,
332
+ "format": format}
333
+
334
+ t1 = time.time()
335
+ audio = tts.vits_voice_conversion(task, fname)
336
+ t2 = time.time()
337
+ if app.config.get("SAVE_AUDIO", False):
338
+ logger.debug(f"[Voice conversion] {fname}")
339
+ logger.info(f"[Voice conversion] finish in {(t2 - t1):.2f}s")
340
+ if use_streaming:
341
+ audio = tts.generate_audio_chunks(audio)
342
+ response = make_response(audio)
343
+ response.headers['Content-Disposition'] = f'attachment; filename={fname}'
344
+ response.headers['Content-Type'] = file_type
345
+ return response
346
+ else:
347
+ return send_file(path_or_file=audio, mimetype=file_type, download_name=fname)
348
+
349
+
350
+ @app.route('/voice/ssml', methods=["POST"])
351
+ @require_api_key
352
+ def ssml():
353
+ try:
354
+ content_type = request.headers.get('Content-Type')
355
+ if content_type == 'application/json':
356
+ data = request.get_json()
357
+ else:
358
+ data = request.form
359
+ ssml = data.get("ssml")
360
+ except Exception as e:
361
+ logger.info(f"[ssml] {e}")
362
+ return make_response(jsonify({"status": "error", "message": f"parameter error"}), 400)
363
+
364
+ logger.debug(ssml)
365
+
366
+ fname = f"{str(uuid.uuid1())}.{format}"
367
+ file_type = f"audio/{format}"
368
+
369
+ t1 = time.time()
370
+ audio, format = tts.create_ssml_infer_task(ssml, fname)
371
+ t2 = time.time()
372
+ if app.config.get("SAVE_AUDIO", False):
373
+ logger.debug(f"[ssml] {fname}")
374
+ logger.info(f"[ssml] finish in {(t2 - t1):.2f}s")
375
+
376
+ if eval(ssml.get('streaming', False)):
377
+ audio = tts.generate_audio_chunks(audio)
378
+ response = make_response(audio)
379
+ response.headers['Content-Disposition'] = f'attachment; filename={fname}'
380
+ response.headers['Content-Type'] = file_type
381
+ return response
382
+ else:
383
+ return send_file(path_or_file=audio, mimetype=file_type, download_name=fname)
384
+
385
+
386
+ @app.route('/voice/dimension-emotion', methods=["POST"])
387
+ def dimensional_emotion():
388
+ if request.method == "POST":
389
+ try:
390
+ audio = request.files['upload']
391
+ use_streaming = request.form.get('streaming', False, type=bool)
392
+ except Exception as e:
393
+ logger.error(f"[dimensional_emotion] {e}")
394
+ return make_response("parameter error", 400)
395
+
396
+ content = BytesIO(audio.read())
397
+
398
+ file_type = "application/octet-stream; charset=ascii"
399
+ fname = os.path.splitext(audio.filename)[0] + ".npy"
400
+ audio = tts.get_dimensional_emotion_npy(content)
401
+ if use_streaming:
402
+ audio = tts.generate_audio_chunks(audio)
403
+ response = make_response(audio)
404
+ response.headers['Content-Disposition'] = f'attachment; filename={fname}'
405
+ response.headers['Content-Type'] = file_type
406
+ return response
407
+ else:
408
+ return send_file(path_or_file=audio, mimetype=file_type, download_name=fname)
409
+
410
+
411
+ @app.route('/voice/check', methods=["GET", "POST"])
412
+ def check():
413
+ try:
414
+ if request.method == "GET":
415
+ model = request.args.get("model")
416
+ id = int(request.args.get("id"))
417
+ elif request.method == "POST":
418
+ content_type = request.headers.get('Content-Type')
419
+ if content_type == 'application/json':
420
+ data = request.get_json()
421
+ else:
422
+ data = request.form
423
+ model = data.get("model")
424
+ id = int(data.get("id"))
425
+ except Exception as e:
426
+ logger.info(f"[check] {e}")
427
+ return make_response(jsonify({"status": "error", "message": "parameter error"}), 400)
428
+
429
+ if check_is_none(model):
430
+ logger.info(f"[check] model {model} is empty")
431
+ return make_response(jsonify({"status": "error", "message": "model is empty"}), 400)
432
+
433
+ if model.upper() not in ("VITS", "HUBERT", "W2V2"):
434
+ res = make_response(jsonify({"status": "error", "message": f"model {model} does not exist"}))
435
+ res.status = 404
436
+ logger.info(f"[check] speaker id {id} error")
437
+ return res
438
+
439
+ if check_is_none(id):
440
+ logger.info(f"[check] speaker id is empty")
441
+ return make_response(jsonify({"status": "error", "message": "speaker id is empty"}), 400)
442
+
443
+ if model.upper() == "VITS":
444
+ speaker_list = tts.voice_speakers["VITS"]
445
+ elif model.upper() == "HUBERT":
446
+ speaker_list = tts.voice_speakers["HUBERT-VITS"]
447
+ elif model.upper() == "W2V2":
448
+ speaker_list = tts.voice_speakers["W2V2-VITS"]
449
+
450
+ if len(speaker_list) == 0:
451
+ logger.info(f"[check] {model} not loaded")
452
+ return make_response(jsonify({"status": "error", "message": f"{model} not loaded"}), 400)
453
+
454
+ if id < 0 or id >= len(speaker_list):
455
+ logger.info(f"[check] speaker id {id} does not exist")
456
+ return make_response(jsonify({"status": "error", "message": f"id {id} does not exist"}), 400)
457
+ name = str(speaker_list[id]["name"])
458
+ lang = speaker_list[id]["lang"]
459
+ logger.info(f"[check] check id:{id} name:{name} lang:{lang}")
460
+
461
+ return make_response(jsonify({"status": "success", "id": id, "name": name, "lang": lang}), 200)
462
+
463
+
464
+ # regular cleaning
465
+ @scheduler.task('interval', id='clean_task', seconds=app.config.get("CLEAN_INTERVAL_SECONDS", 3600),
466
+ misfire_grace_time=900)
467
+ def clean_task():
468
+ clean_folder(app.config["UPLOAD_FOLDER"])
469
+ clean_folder(app.config["CACHE_PATH"])
470
+
471
+
472
+ if __name__ == '__main__':
473
+ app.run(host='0.0.0.0', port=app.config.get("PORT", 23456), debug=app.config.get("DEBUG", False)) # 对外开放
474
+ # app.run(host='127.0.0.1', port=app.config.get("PORT",23456), debug=True) # 本地运行、调试
attentions.py ADDED
@@ -0,0 +1,300 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+ import torch
3
+ from torch import nn
4
+ from torch.nn import functional as F
5
+
6
+ import commons
7
+ from modules import LayerNorm
8
+
9
+
10
+ class Encoder(nn.Module):
11
+ def __init__(self, hidden_channels, filter_channels, n_heads, n_layers, kernel_size=1, p_dropout=0., window_size=4, **kwargs):
12
+ super().__init__()
13
+ self.hidden_channels = hidden_channels
14
+ self.filter_channels = filter_channels
15
+ self.n_heads = n_heads
16
+ self.n_layers = n_layers
17
+ self.kernel_size = kernel_size
18
+ self.p_dropout = p_dropout
19
+ self.window_size = window_size
20
+
21
+ self.drop = nn.Dropout(p_dropout)
22
+ self.attn_layers = nn.ModuleList()
23
+ self.norm_layers_1 = nn.ModuleList()
24
+ self.ffn_layers = nn.ModuleList()
25
+ self.norm_layers_2 = nn.ModuleList()
26
+ for i in range(self.n_layers):
27
+ self.attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout, window_size=window_size))
28
+ self.norm_layers_1.append(LayerNorm(hidden_channels))
29
+ self.ffn_layers.append(FFN(hidden_channels, hidden_channels, filter_channels, kernel_size, p_dropout=p_dropout))
30
+ self.norm_layers_2.append(LayerNorm(hidden_channels))
31
+
32
+ def forward(self, x, x_mask):
33
+ attn_mask = x_mask.unsqueeze(2) * x_mask.unsqueeze(-1)
34
+ x = x * x_mask
35
+ for i in range(self.n_layers):
36
+ y = self.attn_layers[i](x, x, attn_mask)
37
+ y = self.drop(y)
38
+ x = self.norm_layers_1[i](x + y)
39
+
40
+ y = self.ffn_layers[i](x, x_mask)
41
+ y = self.drop(y)
42
+ x = self.norm_layers_2[i](x + y)
43
+ x = x * x_mask
44
+ return x
45
+
46
+
47
+ class Decoder(nn.Module):
48
+ def __init__(self, hidden_channels, filter_channels, n_heads, n_layers, kernel_size=1, p_dropout=0., proximal_bias=False, proximal_init=True, **kwargs):
49
+ super().__init__()
50
+ self.hidden_channels = hidden_channels
51
+ self.filter_channels = filter_channels
52
+ self.n_heads = n_heads
53
+ self.n_layers = n_layers
54
+ self.kernel_size = kernel_size
55
+ self.p_dropout = p_dropout
56
+ self.proximal_bias = proximal_bias
57
+ self.proximal_init = proximal_init
58
+
59
+ self.drop = nn.Dropout(p_dropout)
60
+ self.self_attn_layers = nn.ModuleList()
61
+ self.norm_layers_0 = nn.ModuleList()
62
+ self.encdec_attn_layers = nn.ModuleList()
63
+ self.norm_layers_1 = nn.ModuleList()
64
+ self.ffn_layers = nn.ModuleList()
65
+ self.norm_layers_2 = nn.ModuleList()
66
+ for i in range(self.n_layers):
67
+ self.self_attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout, proximal_bias=proximal_bias, proximal_init=proximal_init))
68
+ self.norm_layers_0.append(LayerNorm(hidden_channels))
69
+ self.encdec_attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout))
70
+ self.norm_layers_1.append(LayerNorm(hidden_channels))
71
+ self.ffn_layers.append(FFN(hidden_channels, hidden_channels, filter_channels, kernel_size, p_dropout=p_dropout, causal=True))
72
+ self.norm_layers_2.append(LayerNorm(hidden_channels))
73
+
74
+ def forward(self, x, x_mask, h, h_mask):
75
+ """
76
+ x: decoder input
77
+ h: encoder output
78
+ """
79
+ self_attn_mask = commons.subsequent_mask(x_mask.size(2)).to(device=x.device, dtype=x.dtype)
80
+ encdec_attn_mask = h_mask.unsqueeze(2) * x_mask.unsqueeze(-1)
81
+ x = x * x_mask
82
+ for i in range(self.n_layers):
83
+ y = self.self_attn_layers[i](x, x, self_attn_mask)
84
+ y = self.drop(y)
85
+ x = self.norm_layers_0[i](x + y)
86
+
87
+ y = self.encdec_attn_layers[i](x, h, encdec_attn_mask)
88
+ y = self.drop(y)
89
+ x = self.norm_layers_1[i](x + y)
90
+
91
+ y = self.ffn_layers[i](x, x_mask)
92
+ y = self.drop(y)
93
+ x = self.norm_layers_2[i](x + y)
94
+ x = x * x_mask
95
+ return x
96
+
97
+
98
+ class MultiHeadAttention(nn.Module):
99
+ def __init__(self, channels, out_channels, n_heads, p_dropout=0., window_size=None, heads_share=True, block_length=None, proximal_bias=False, proximal_init=False):
100
+ super().__init__()
101
+ assert channels % n_heads == 0
102
+
103
+ self.channels = channels
104
+ self.out_channels = out_channels
105
+ self.n_heads = n_heads
106
+ self.p_dropout = p_dropout
107
+ self.window_size = window_size
108
+ self.heads_share = heads_share
109
+ self.block_length = block_length
110
+ self.proximal_bias = proximal_bias
111
+ self.proximal_init = proximal_init
112
+ self.attn = None
113
+
114
+ self.k_channels = channels // n_heads
115
+ self.conv_q = nn.Conv1d(channels, channels, 1)
116
+ self.conv_k = nn.Conv1d(channels, channels, 1)
117
+ self.conv_v = nn.Conv1d(channels, channels, 1)
118
+ self.conv_o = nn.Conv1d(channels, out_channels, 1)
119
+ self.drop = nn.Dropout(p_dropout)
120
+
121
+ if window_size is not None:
122
+ n_heads_rel = 1 if heads_share else n_heads
123
+ rel_stddev = self.k_channels**-0.5
124
+ self.emb_rel_k = nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev)
125
+ self.emb_rel_v = nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev)
126
+
127
+ nn.init.xavier_uniform_(self.conv_q.weight)
128
+ nn.init.xavier_uniform_(self.conv_k.weight)
129
+ nn.init.xavier_uniform_(self.conv_v.weight)
130
+ if proximal_init:
131
+ with torch.no_grad():
132
+ self.conv_k.weight.copy_(self.conv_q.weight)
133
+ self.conv_k.bias.copy_(self.conv_q.bias)
134
+
135
+ def forward(self, x, c, attn_mask=None):
136
+ q = self.conv_q(x)
137
+ k = self.conv_k(c)
138
+ v = self.conv_v(c)
139
+
140
+ x, self.attn = self.attention(q, k, v, mask=attn_mask)
141
+
142
+ x = self.conv_o(x)
143
+ return x
144
+
145
+ def attention(self, query, key, value, mask=None):
146
+ # reshape [b, d, t] -> [b, n_h, t, d_k]
147
+ b, d, t_s, t_t = (*key.size(), query.size(2))
148
+ query = query.view(b, self.n_heads, self.k_channels, t_t).transpose(2, 3)
149
+ key = key.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3)
150
+ value = value.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3)
151
+
152
+ scores = torch.matmul(query / math.sqrt(self.k_channels), key.transpose(-2, -1))
153
+ if self.window_size is not None:
154
+ assert t_s == t_t, "Relative attention is only available for self-attention."
155
+ key_relative_embeddings = self._get_relative_embeddings(self.emb_rel_k, t_s)
156
+ rel_logits = self._matmul_with_relative_keys(query /math.sqrt(self.k_channels), key_relative_embeddings)
157
+ scores_local = self._relative_position_to_absolute_position(rel_logits)
158
+ scores = scores + scores_local
159
+ if self.proximal_bias:
160
+ assert t_s == t_t, "Proximal bias is only available for self-attention."
161
+ scores = scores + self._attention_bias_proximal(t_s).to(device=scores.device, dtype=scores.dtype)
162
+ if mask is not None:
163
+ scores = scores.masked_fill(mask == 0, -1e4)
164
+ if self.block_length is not None:
165
+ assert t_s == t_t, "Local attention is only available for self-attention."
166
+ block_mask = torch.ones_like(scores).triu(-self.block_length).tril(self.block_length)
167
+ scores = scores.masked_fill(block_mask == 0, -1e4)
168
+ p_attn = F.softmax(scores, dim=-1) # [b, n_h, t_t, t_s]
169
+ p_attn = self.drop(p_attn)
170
+ output = torch.matmul(p_attn, value)
171
+ if self.window_size is not None:
172
+ relative_weights = self._absolute_position_to_relative_position(p_attn)
173
+ value_relative_embeddings = self._get_relative_embeddings(self.emb_rel_v, t_s)
174
+ output = output + self._matmul_with_relative_values(relative_weights, value_relative_embeddings)
175
+ output = output.transpose(2, 3).contiguous().view(b, d, t_t) # [b, n_h, t_t, d_k] -> [b, d, t_t]
176
+ return output, p_attn
177
+
178
+ def _matmul_with_relative_values(self, x, y):
179
+ """
180
+ x: [b, h, l, m]
181
+ y: [h or 1, m, d]
182
+ ret: [b, h, l, d]
183
+ """
184
+ ret = torch.matmul(x, y.unsqueeze(0))
185
+ return ret
186
+
187
+ def _matmul_with_relative_keys(self, x, y):
188
+ """
189
+ x: [b, h, l, d]
190
+ y: [h or 1, m, d]
191
+ ret: [b, h, l, m]
192
+ """
193
+ ret = torch.matmul(x, y.unsqueeze(0).transpose(-2, -1))
194
+ return ret
195
+
196
+ def _get_relative_embeddings(self, relative_embeddings, length):
197
+ max_relative_position = 2 * self.window_size + 1
198
+ # Pad first before slice to avoid using cond ops.
199
+ pad_length = max(length - (self.window_size + 1), 0)
200
+ slice_start_position = max((self.window_size + 1) - length, 0)
201
+ slice_end_position = slice_start_position + 2 * length - 1
202
+ if pad_length > 0:
203
+ padded_relative_embeddings = F.pad(
204
+ relative_embeddings,
205
+ commons.convert_pad_shape([[0, 0], [pad_length, pad_length], [0, 0]]))
206
+ else:
207
+ padded_relative_embeddings = relative_embeddings
208
+ used_relative_embeddings = padded_relative_embeddings[:,slice_start_position:slice_end_position]
209
+ return used_relative_embeddings
210
+
211
+ def _relative_position_to_absolute_position(self, x):
212
+ """
213
+ x: [b, h, l, 2*l-1]
214
+ ret: [b, h, l, l]
215
+ """
216
+ batch, heads, length, _ = x.size()
217
+ # Concat columns of pad to shift from relative to absolute indexing.
218
+ x = F.pad(x, commons.convert_pad_shape([[0,0],[0,0],[0,0],[0,1]]))
219
+
220
+ # Concat extra elements so to add up to shape (len+1, 2*len-1).
221
+ x_flat = x.view([batch, heads, length * 2 * length])
222
+ x_flat = F.pad(x_flat, commons.convert_pad_shape([[0,0],[0,0],[0,length-1]]))
223
+
224
+ # Reshape and slice out the padded elements.
225
+ x_final = x_flat.view([batch, heads, length+1, 2*length-1])[:, :, :length, length-1:]
226
+ return x_final
227
+
228
+ def _absolute_position_to_relative_position(self, x):
229
+ """
230
+ x: [b, h, l, l]
231
+ ret: [b, h, l, 2*l-1]
232
+ """
233
+ batch, heads, length, _ = x.size()
234
+ # padd along column
235
+ x = F.pad(x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, length-1]]))
236
+ x_flat = x.view([batch, heads, length**2 + length*(length -1)])
237
+ # add 0's in the beginning that will skew the elements after reshape
238
+ x_flat = F.pad(x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [length, 0]]))
239
+ x_final = x_flat.view([batch, heads, length, 2*length])[:,:,:,1:]
240
+ return x_final
241
+
242
+ def _attention_bias_proximal(self, length):
243
+ """Bias for self-attention to encourage attention to close positions.
244
+ Args:
245
+ length: an integer scalar.
246
+ Returns:
247
+ a Tensor with shape [1, 1, length, length]
248
+ """
249
+ r = torch.arange(length, dtype=torch.float32)
250
+ diff = torch.unsqueeze(r, 0) - torch.unsqueeze(r, 1)
251
+ return torch.unsqueeze(torch.unsqueeze(-torch.log1p(torch.abs(diff)), 0), 0)
252
+
253
+
254
+ class FFN(nn.Module):
255
+ def __init__(self, in_channels, out_channels, filter_channels, kernel_size, p_dropout=0., activation=None, causal=False):
256
+ super().__init__()
257
+ self.in_channels = in_channels
258
+ self.out_channels = out_channels
259
+ self.filter_channels = filter_channels
260
+ self.kernel_size = kernel_size
261
+ self.p_dropout = p_dropout
262
+ self.activation = activation
263
+ self.causal = causal
264
+
265
+ if causal:
266
+ self.padding = self._causal_padding
267
+ else:
268
+ self.padding = self._same_padding
269
+
270
+ self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size)
271
+ self.conv_2 = nn.Conv1d(filter_channels, out_channels, kernel_size)
272
+ self.drop = nn.Dropout(p_dropout)
273
+
274
+ def forward(self, x, x_mask):
275
+ x = self.conv_1(self.padding(x * x_mask))
276
+ if self.activation == "gelu":
277
+ x = x * torch.sigmoid(1.702 * x)
278
+ else:
279
+ x = torch.relu(x)
280
+ x = self.drop(x)
281
+ x = self.conv_2(self.padding(x * x_mask))
282
+ return x * x_mask
283
+
284
+ def _causal_padding(self, x):
285
+ if self.kernel_size == 1:
286
+ return x
287
+ pad_l = self.kernel_size - 1
288
+ pad_r = 0
289
+ padding = [[0, 0], [0, 0], [pad_l, pad_r]]
290
+ x = F.pad(x, commons.convert_pad_shape(padding))
291
+ return x
292
+
293
+ def _same_padding(self, x):
294
+ if self.kernel_size == 1:
295
+ return x
296
+ pad_l = (self.kernel_size - 1) // 2
297
+ pad_r = self.kernel_size // 2
298
+ padding = [[0, 0], [0, 0], [pad_l, pad_r]]
299
+ x = F.pad(x, commons.convert_pad_shape(padding))
300
+ return x
bert/ProsodyModel.py ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import torch
3
+ import torch.nn as nn
4
+ import torch.nn.functional as F
5
+
6
+ from transformers import BertModel, BertConfig, BertTokenizer
7
+
8
+
9
+ class CharEmbedding(nn.Module):
10
+ def __init__(self, model_dir):
11
+ super().__init__()
12
+ self.tokenizer = BertTokenizer.from_pretrained(model_dir)
13
+ self.bert_config = BertConfig.from_pretrained(model_dir)
14
+ self.hidden_size = self.bert_config.hidden_size
15
+ self.bert = BertModel(self.bert_config)
16
+ self.proj = nn.Linear(self.hidden_size, 256)
17
+ self.linear = nn.Linear(256, 3)
18
+
19
+ def text2Token(self, text):
20
+ token = self.tokenizer.tokenize(text)
21
+ txtid = self.tokenizer.convert_tokens_to_ids(token)
22
+ return txtid
23
+
24
+ def forward(self, inputs_ids, inputs_masks, tokens_type_ids):
25
+ out_seq = self.bert(input_ids=inputs_ids,
26
+ attention_mask=inputs_masks,
27
+ token_type_ids=tokens_type_ids)[0]
28
+ out_seq = self.proj(out_seq)
29
+ return out_seq
30
+
31
+
32
+ class TTSProsody(object):
33
+ def __init__(self, path, device):
34
+ self.device = device
35
+ self.char_model = CharEmbedding(path)
36
+ self.char_model.load_state_dict(
37
+ torch.load(
38
+ os.path.join(path, 'prosody_model.pt'),
39
+ map_location="cpu"
40
+ ),
41
+ strict=False
42
+ )
43
+ self.char_model.eval()
44
+ self.char_model.to(self.device)
45
+
46
+ def get_char_embeds(self, text):
47
+ input_ids = self.char_model.text2Token(text)
48
+ input_masks = [1] * len(input_ids)
49
+ type_ids = [0] * len(input_ids)
50
+ input_ids = torch.LongTensor([input_ids]).to(self.device)
51
+ input_masks = torch.LongTensor([input_masks]).to(self.device)
52
+ type_ids = torch.LongTensor([type_ids]).to(self.device)
53
+
54
+ with torch.no_grad():
55
+ char_embeds = self.char_model(
56
+ input_ids, input_masks, type_ids).squeeze(0).cpu()
57
+ return char_embeds
58
+
59
+ def expand_for_phone(self, char_embeds, length): # length of phones for char
60
+ assert char_embeds.size(0) == len(length)
61
+ expand_vecs = list()
62
+ for vec, leng in zip(char_embeds, length):
63
+ vec = vec.expand(leng, -1)
64
+ expand_vecs.append(vec)
65
+ expand_embeds = torch.cat(expand_vecs, 0)
66
+ assert expand_embeds.size(0) == sum(length)
67
+ return expand_embeds.numpy()
68
+
69
+
70
+ if __name__ == "__main__":
71
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
72
+ prosody = TTSProsody('./bert/', device)
73
+ while True:
74
+ text = input("请输入文本:")
75
+ prosody.get_char_embeds(text)
bert/__init__.py ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ """ from https://github.com/PlayVoice/vits_chinese """
2
+ from .ProsodyModel import TTSProsody
bert/config.json ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "attention_probs_dropout_prob": 0.1,
3
+ "directionality": "bidi",
4
+ "hidden_act": "gelu",
5
+ "hidden_dropout_prob": 0.1,
6
+ "hidden_size": 768,
7
+ "initializer_range": 0.02,
8
+ "intermediate_size": 3072,
9
+ "max_position_embeddings": 512,
10
+ "num_attention_heads": 12,
11
+ "num_hidden_layers": 12,
12
+ "pooler_fc_size": 768,
13
+ "pooler_num_attention_heads": 12,
14
+ "pooler_num_fc_layers": 3,
15
+ "pooler_size_per_head": 128,
16
+ "pooler_type": "first_token_transform",
17
+ "type_vocab_size": 2,
18
+ "vocab_size": 21128
19
+ }
bert/prosody_model.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3accec7a0d5cbfccaa8a42b96374a91d442a69801c6a01402baae3bf06b8c015
3
+ size 409941419
bert/prosody_tool.py ADDED
@@ -0,0 +1,426 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ def is_chinese(uchar):
2
+ if uchar >= u'\u4e00' and uchar <= u'\u9fa5':
3
+ return True
4
+ else:
5
+ return False
6
+
7
+
8
+ pinyin_dict = {
9
+ "a": ("^", "a"),
10
+ "ai": ("^", "ai"),
11
+ "an": ("^", "an"),
12
+ "ang": ("^", "ang"),
13
+ "ao": ("^", "ao"),
14
+ "ba": ("b", "a"),
15
+ "bai": ("b", "ai"),
16
+ "ban": ("b", "an"),
17
+ "bang": ("b", "ang"),
18
+ "bao": ("b", "ao"),
19
+ "be": ("b", "e"),
20
+ "bei": ("b", "ei"),
21
+ "ben": ("b", "en"),
22
+ "beng": ("b", "eng"),
23
+ "bi": ("b", "i"),
24
+ "bian": ("b", "ian"),
25
+ "biao": ("b", "iao"),
26
+ "bie": ("b", "ie"),
27
+ "bin": ("b", "in"),
28
+ "bing": ("b", "ing"),
29
+ "bo": ("b", "o"),
30
+ "bu": ("b", "u"),
31
+ "ca": ("c", "a"),
32
+ "cai": ("c", "ai"),
33
+ "can": ("c", "an"),
34
+ "cang": ("c", "ang"),
35
+ "cao": ("c", "ao"),
36
+ "ce": ("c", "e"),
37
+ "cen": ("c", "en"),
38
+ "ceng": ("c", "eng"),
39
+ "cha": ("ch", "a"),
40
+ "chai": ("ch", "ai"),
41
+ "chan": ("ch", "an"),
42
+ "chang": ("ch", "ang"),
43
+ "chao": ("ch", "ao"),
44
+ "che": ("ch", "e"),
45
+ "chen": ("ch", "en"),
46
+ "cheng": ("ch", "eng"),
47
+ "chi": ("ch", "iii"),
48
+ "chong": ("ch", "ong"),
49
+ "chou": ("ch", "ou"),
50
+ "chu": ("ch", "u"),
51
+ "chua": ("ch", "ua"),
52
+ "chuai": ("ch", "uai"),
53
+ "chuan": ("ch", "uan"),
54
+ "chuang": ("ch", "uang"),
55
+ "chui": ("ch", "uei"),
56
+ "chun": ("ch", "uen"),
57
+ "chuo": ("ch", "uo"),
58
+ "ci": ("c", "ii"),
59
+ "cong": ("c", "ong"),
60
+ "cou": ("c", "ou"),
61
+ "cu": ("c", "u"),
62
+ "cuan": ("c", "uan"),
63
+ "cui": ("c", "uei"),
64
+ "cun": ("c", "uen"),
65
+ "cuo": ("c", "uo"),
66
+ "da": ("d", "a"),
67
+ "dai": ("d", "ai"),
68
+ "dan": ("d", "an"),
69
+ "dang": ("d", "ang"),
70
+ "dao": ("d", "ao"),
71
+ "de": ("d", "e"),
72
+ "dei": ("d", "ei"),
73
+ "den": ("d", "en"),
74
+ "deng": ("d", "eng"),
75
+ "di": ("d", "i"),
76
+ "dia": ("d", "ia"),
77
+ "dian": ("d", "ian"),
78
+ "diao": ("d", "iao"),
79
+ "die": ("d", "ie"),
80
+ "ding": ("d", "ing"),
81
+ "diu": ("d", "iou"),
82
+ "dong": ("d", "ong"),
83
+ "dou": ("d", "ou"),
84
+ "du": ("d", "u"),
85
+ "duan": ("d", "uan"),
86
+ "dui": ("d", "uei"),
87
+ "dun": ("d", "uen"),
88
+ "duo": ("d", "uo"),
89
+ "e": ("^", "e"),
90
+ "ei": ("^", "ei"),
91
+ "en": ("^", "en"),
92
+ "ng": ("^", "en"),
93
+ "eng": ("^", "eng"),
94
+ "er": ("^", "er"),
95
+ "fa": ("f", "a"),
96
+ "fan": ("f", "an"),
97
+ "fang": ("f", "ang"),
98
+ "fei": ("f", "ei"),
99
+ "fen": ("f", "en"),
100
+ "feng": ("f", "eng"),
101
+ "fo": ("f", "o"),
102
+ "fou": ("f", "ou"),
103
+ "fu": ("f", "u"),
104
+ "ga": ("g", "a"),
105
+ "gai": ("g", "ai"),
106
+ "gan": ("g", "an"),
107
+ "gang": ("g", "ang"),
108
+ "gao": ("g", "ao"),
109
+ "ge": ("g", "e"),
110
+ "gei": ("g", "ei"),
111
+ "gen": ("g", "en"),
112
+ "geng": ("g", "eng"),
113
+ "gong": ("g", "ong"),
114
+ "gou": ("g", "ou"),
115
+ "gu": ("g", "u"),
116
+ "gua": ("g", "ua"),
117
+ "guai": ("g", "uai"),
118
+ "guan": ("g", "uan"),
119
+ "guang": ("g", "uang"),
120
+ "gui": ("g", "uei"),
121
+ "gun": ("g", "uen"),
122
+ "guo": ("g", "uo"),
123
+ "ha": ("h", "a"),
124
+ "hai": ("h", "ai"),
125
+ "han": ("h", "an"),
126
+ "hang": ("h", "ang"),
127
+ "hao": ("h", "ao"),
128
+ "he": ("h", "e"),
129
+ "hei": ("h", "ei"),
130
+ "hen": ("h", "en"),
131
+ "heng": ("h", "eng"),
132
+ "hong": ("h", "ong"),
133
+ "hou": ("h", "ou"),
134
+ "hu": ("h", "u"),
135
+ "hua": ("h", "ua"),
136
+ "huai": ("h", "uai"),
137
+ "huan": ("h", "uan"),
138
+ "huang": ("h", "uang"),
139
+ "hui": ("h", "uei"),
140
+ "hun": ("h", "uen"),
141
+ "huo": ("h", "uo"),
142
+ "ji": ("j", "i"),
143
+ "jia": ("j", "ia"),
144
+ "jian": ("j", "ian"),
145
+ "jiang": ("j", "iang"),
146
+ "jiao": ("j", "iao"),
147
+ "jie": ("j", "ie"),
148
+ "jin": ("j", "in"),
149
+ "jing": ("j", "ing"),
150
+ "jiong": ("j", "iong"),
151
+ "jiu": ("j", "iou"),
152
+ "ju": ("j", "v"),
153
+ "juan": ("j", "van"),
154
+ "jue": ("j", "ve"),
155
+ "jun": ("j", "vn"),
156
+ "ka": ("k", "a"),
157
+ "kai": ("k", "ai"),
158
+ "kan": ("k", "an"),
159
+ "kang": ("k", "ang"),
160
+ "kao": ("k", "ao"),
161
+ "ke": ("k", "e"),
162
+ "kei": ("k", "ei"),
163
+ "ken": ("k", "en"),
164
+ "keng": ("k", "eng"),
165
+ "kong": ("k", "ong"),
166
+ "kou": ("k", "ou"),
167
+ "ku": ("k", "u"),
168
+ "kua": ("k", "ua"),
169
+ "kuai": ("k", "uai"),
170
+ "kuan": ("k", "uan"),
171
+ "kuang": ("k", "uang"),
172
+ "kui": ("k", "uei"),
173
+ "kun": ("k", "uen"),
174
+ "kuo": ("k", "uo"),
175
+ "la": ("l", "a"),
176
+ "lai": ("l", "ai"),
177
+ "lan": ("l", "an"),
178
+ "lang": ("l", "ang"),
179
+ "lao": ("l", "ao"),
180
+ "le": ("l", "e"),
181
+ "lei": ("l", "ei"),
182
+ "leng": ("l", "eng"),
183
+ "li": ("l", "i"),
184
+ "lia": ("l", "ia"),
185
+ "lian": ("l", "ian"),
186
+ "liang": ("l", "iang"),
187
+ "liao": ("l", "iao"),
188
+ "lie": ("l", "ie"),
189
+ "lin": ("l", "in"),
190
+ "ling": ("l", "ing"),
191
+ "liu": ("l", "iou"),
192
+ "lo": ("l", "o"),
193
+ "long": ("l", "ong"),
194
+ "lou": ("l", "ou"),
195
+ "lu": ("l", "u"),
196
+ "lv": ("l", "v"),
197
+ "luan": ("l", "uan"),
198
+ "lve": ("l", "ve"),
199
+ "lue": ("l", "ve"),
200
+ "lun": ("l", "uen"),
201
+ "luo": ("l", "uo"),
202
+ "ma": ("m", "a"),
203
+ "mai": ("m", "ai"),
204
+ "man": ("m", "an"),
205
+ "mang": ("m", "ang"),
206
+ "mao": ("m", "ao"),
207
+ "me": ("m", "e"),
208
+ "mei": ("m", "ei"),
209
+ "men": ("m", "en"),
210
+ "meng": ("m", "eng"),
211
+ "mi": ("m", "i"),
212
+ "mian": ("m", "ian"),
213
+ "miao": ("m", "iao"),
214
+ "mie": ("m", "ie"),
215
+ "min": ("m", "in"),
216
+ "ming": ("m", "ing"),
217
+ "miu": ("m", "iou"),
218
+ "mo": ("m", "o"),
219
+ "mou": ("m", "ou"),
220
+ "mu": ("m", "u"),
221
+ "na": ("n", "a"),
222
+ "nai": ("n", "ai"),
223
+ "nan": ("n", "an"),
224
+ "nang": ("n", "ang"),
225
+ "nao": ("n", "ao"),
226
+ "ne": ("n", "e"),
227
+ "nei": ("n", "ei"),
228
+ "nen": ("n", "en"),
229
+ "neng": ("n", "eng"),
230
+ "ni": ("n", "i"),
231
+ "nia": ("n", "ia"),
232
+ "nian": ("n", "ian"),
233
+ "niang": ("n", "iang"),
234
+ "niao": ("n", "iao"),
235
+ "nie": ("n", "ie"),
236
+ "nin": ("n", "in"),
237
+ "ning": ("n", "ing"),
238
+ "niu": ("n", "iou"),
239
+ "nong": ("n", "ong"),
240
+ "nou": ("n", "ou"),
241
+ "nu": ("n", "u"),
242
+ "nv": ("n", "v"),
243
+ "nuan": ("n", "uan"),
244
+ "nve": ("n", "ve"),
245
+ "nue": ("n", "ve"),
246
+ "nuo": ("n", "uo"),
247
+ "o": ("^", "o"),
248
+ "ou": ("^", "ou"),
249
+ "pa": ("p", "a"),
250
+ "pai": ("p", "ai"),
251
+ "pan": ("p", "an"),
252
+ "pang": ("p", "ang"),
253
+ "pao": ("p", "ao"),
254
+ "pe": ("p", "e"),
255
+ "pei": ("p", "ei"),
256
+ "pen": ("p", "en"),
257
+ "peng": ("p", "eng"),
258
+ "pi": ("p", "i"),
259
+ "pian": ("p", "ian"),
260
+ "piao": ("p", "iao"),
261
+ "pie": ("p", "ie"),
262
+ "pin": ("p", "in"),
263
+ "ping": ("p", "ing"),
264
+ "po": ("p", "o"),
265
+ "pou": ("p", "ou"),
266
+ "pu": ("p", "u"),
267
+ "qi": ("q", "i"),
268
+ "qia": ("q", "ia"),
269
+ "qian": ("q", "ian"),
270
+ "qiang": ("q", "iang"),
271
+ "qiao": ("q", "iao"),
272
+ "qie": ("q", "ie"),
273
+ "qin": ("q", "in"),
274
+ "qing": ("q", "ing"),
275
+ "qiong": ("q", "iong"),
276
+ "qiu": ("q", "iou"),
277
+ "qu": ("q", "v"),
278
+ "quan": ("q", "van"),
279
+ "que": ("q", "ve"),
280
+ "qun": ("q", "vn"),
281
+ "ran": ("r", "an"),
282
+ "rang": ("r", "ang"),
283
+ "rao": ("r", "ao"),
284
+ "re": ("r", "e"),
285
+ "ren": ("r", "en"),
286
+ "reng": ("r", "eng"),
287
+ "ri": ("r", "iii"),
288
+ "rong": ("r", "ong"),
289
+ "rou": ("r", "ou"),
290
+ "ru": ("r", "u"),
291
+ "rua": ("r", "ua"),
292
+ "ruan": ("r", "uan"),
293
+ "rui": ("r", "uei"),
294
+ "run": ("r", "uen"),
295
+ "ruo": ("r", "uo"),
296
+ "sa": ("s", "a"),
297
+ "sai": ("s", "ai"),
298
+ "san": ("s", "an"),
299
+ "sang": ("s", "ang"),
300
+ "sao": ("s", "ao"),
301
+ "se": ("s", "e"),
302
+ "sen": ("s", "en"),
303
+ "seng": ("s", "eng"),
304
+ "sha": ("sh", "a"),
305
+ "shai": ("sh", "ai"),
306
+ "shan": ("sh", "an"),
307
+ "shang": ("sh", "ang"),
308
+ "shao": ("sh", "ao"),
309
+ "she": ("sh", "e"),
310
+ "shei": ("sh", "ei"),
311
+ "shen": ("sh", "en"),
312
+ "sheng": ("sh", "eng"),
313
+ "shi": ("sh", "iii"),
314
+ "shou": ("sh", "ou"),
315
+ "shu": ("sh", "u"),
316
+ "shua": ("sh", "ua"),
317
+ "shuai": ("sh", "uai"),
318
+ "shuan": ("sh", "uan"),
319
+ "shuang": ("sh", "uang"),
320
+ "shui": ("sh", "uei"),
321
+ "shun": ("sh", "uen"),
322
+ "shuo": ("sh", "uo"),
323
+ "si": ("s", "ii"),
324
+ "song": ("s", "ong"),
325
+ "sou": ("s", "ou"),
326
+ "su": ("s", "u"),
327
+ "suan": ("s", "uan"),
328
+ "sui": ("s", "uei"),
329
+ "sun": ("s", "uen"),
330
+ "suo": ("s", "uo"),
331
+ "ta": ("t", "a"),
332
+ "tai": ("t", "ai"),
333
+ "tan": ("t", "an"),
334
+ "tang": ("t", "ang"),
335
+ "tao": ("t", "ao"),
336
+ "te": ("t", "e"),
337
+ "tei": ("t", "ei"),
338
+ "teng": ("t", "eng"),
339
+ "ti": ("t", "i"),
340
+ "tian": ("t", "ian"),
341
+ "tiao": ("t", "iao"),
342
+ "tie": ("t", "ie"),
343
+ "ting": ("t", "ing"),
344
+ "tong": ("t", "ong"),
345
+ "tou": ("t", "ou"),
346
+ "tu": ("t", "u"),
347
+ "tuan": ("t", "uan"),
348
+ "tui": ("t", "uei"),
349
+ "tun": ("t", "uen"),
350
+ "tuo": ("t", "uo"),
351
+ "wa": ("^", "ua"),
352
+ "wai": ("^", "uai"),
353
+ "wan": ("^", "uan"),
354
+ "wang": ("^", "uang"),
355
+ "wei": ("^", "uei"),
356
+ "wen": ("^", "uen"),
357
+ "weng": ("^", "ueng"),
358
+ "wo": ("^", "uo"),
359
+ "wu": ("^", "u"),
360
+ "xi": ("x", "i"),
361
+ "xia": ("x", "ia"),
362
+ "xian": ("x", "ian"),
363
+ "xiang": ("x", "iang"),
364
+ "xiao": ("x", "iao"),
365
+ "xie": ("x", "ie"),
366
+ "xin": ("x", "in"),
367
+ "xing": ("x", "ing"),
368
+ "xiong": ("x", "iong"),
369
+ "xiu": ("x", "iou"),
370
+ "xu": ("x", "v"),
371
+ "xuan": ("x", "van"),
372
+ "xue": ("x", "ve"),
373
+ "xun": ("x", "vn"),
374
+ "ya": ("^", "ia"),
375
+ "yan": ("^", "ian"),
376
+ "yang": ("^", "iang"),
377
+ "yao": ("^", "iao"),
378
+ "ye": ("^", "ie"),
379
+ "yi": ("^", "i"),
380
+ "yin": ("^", "in"),
381
+ "ying": ("^", "ing"),
382
+ "yo": ("^", "iou"),
383
+ "yong": ("^", "iong"),
384
+ "you": ("^", "iou"),
385
+ "yu": ("^", "v"),
386
+ "yuan": ("^", "van"),
387
+ "yue": ("^", "ve"),
388
+ "yun": ("^", "vn"),
389
+ "za": ("z", "a"),
390
+ "zai": ("z", "ai"),
391
+ "zan": ("z", "an"),
392
+ "zang": ("z", "ang"),
393
+ "zao": ("z", "ao"),
394
+ "ze": ("z", "e"),
395
+ "zei": ("z", "ei"),
396
+ "zen": ("z", "en"),
397
+ "zeng": ("z", "eng"),
398
+ "zha": ("zh", "a"),
399
+ "zhai": ("zh", "ai"),
400
+ "zhan": ("zh", "an"),
401
+ "zhang": ("zh", "ang"),
402
+ "zhao": ("zh", "ao"),
403
+ "zhe": ("zh", "e"),
404
+ "zhei": ("zh", "ei"),
405
+ "zhen": ("zh", "en"),
406
+ "zheng": ("zh", "eng"),
407
+ "zhi": ("zh", "iii"),
408
+ "zhong": ("zh", "ong"),
409
+ "zhou": ("zh", "ou"),
410
+ "zhu": ("zh", "u"),
411
+ "zhua": ("zh", "ua"),
412
+ "zhuai": ("zh", "uai"),
413
+ "zhuan": ("zh", "uan"),
414
+ "zhuang": ("zh", "uang"),
415
+ "zhui": ("zh", "uei"),
416
+ "zhun": ("zh", "uen"),
417
+ "zhuo": ("zh", "uo"),
418
+ "zi": ("z", "ii"),
419
+ "zong": ("z", "ong"),
420
+ "zou": ("z", "ou"),
421
+ "zu": ("z", "u"),
422
+ "zuan": ("z", "uan"),
423
+ "zui": ("z", "uei"),
424
+ "zun": ("z", "uen"),
425
+ "zuo": ("z", "uo"),
426
+ }
bert/vocab.txt ADDED
The diff for this file is too large to render. See raw diff
 
chinese_dialect_lexicons/changzhou.json ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "name": "Changzhou dialect to IPA",
3
+ "segmentation": {
4
+ "type": "mmseg",
5
+ "dict": {
6
+ "type": "ocd2",
7
+ "file": "changzhou.ocd2"
8
+ }
9
+ },
10
+ "conversion_chain": [
11
+ {
12
+ "dict": {
13
+ "type": "group",
14
+ "dicts": [
15
+ {
16
+ "type": "ocd2",
17
+ "file": "changzhou.ocd2"
18
+ }
19
+ ]
20
+ }
21
+ }
22
+ ]
23
+ }
chinese_dialect_lexicons/changzhou.ocd2 ADDED
Binary file (96.1 kB). View file
 
chinese_dialect_lexicons/changzhou_3.json ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "name": "Changzhou dialect to IPA",
3
+ "segmentation": {
4
+ "type": "mmseg",
5
+ "dict": {
6
+ "type": "ocd2",
7
+ "file": "changzhou.ocd2"
8
+ }
9
+ },
10
+ "conversion_chain": [
11
+ {
12
+ "dict": {
13
+ "type": "group",
14
+ "dicts": [
15
+ {
16
+ "type": "ocd2",
17
+ "file": "changzhou.ocd2"
18
+ }
19
+ ]
20
+ }
21
+ }
22
+ ]
23
+ }
chinese_dialect_lexicons/changzhou_3.ocd2 ADDED
Binary file (96.1 kB). View file
 
chinese_dialect_lexicons/cixi_2.json ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "name": "Cixi dialect to IPA",
3
+ "segmentation": {
4
+ "type": "mmseg",
5
+ "dict": {
6
+ "type": "ocd2",
7
+ "file": "cixi.ocd2"
8
+ }
9
+ },
10
+ "conversion_chain": [
11
+ {
12
+ "dict": {
13
+ "type": "group",
14
+ "dicts": [
15
+ {
16
+ "type": "ocd2",
17
+ "file": "cixi.ocd2"
18
+ }
19
+ ]
20
+ }
21
+ }
22
+ ]
23
+ }
chinese_dialect_lexicons/cixi_2.ocd2 ADDED
Binary file (98 kB). View file
 
chinese_dialect_lexicons/fuyang_2.json ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "name": "Fuyang dialect to IPA",
3
+ "segmentation": {
4
+ "type": "mmseg",
5
+ "dict": {
6
+ "type": "ocd2",
7
+ "file": "fuyang.ocd2"
8
+ }
9
+ },
10
+ "conversion_chain": [
11
+ {
12
+ "dict": {
13
+ "type": "group",
14
+ "dicts": [
15
+ {
16
+ "type": "ocd2",
17
+ "file": "fuyang.ocd2"
18
+ }
19
+ ]
20
+ }
21
+ }
22
+ ]
23
+ }
chinese_dialect_lexicons/fuyang_2.ocd2 ADDED
Binary file (83.7 kB). View file
 
chinese_dialect_lexicons/hangzhou_2.json ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "name": "Hangzhounese to IPA",
3
+ "segmentation": {
4
+ "type": "mmseg",
5
+ "dict": {
6
+ "type": "ocd2",
7
+ "file": "hangzhou.ocd2"
8
+ }
9
+ },
10
+ "conversion_chain": [{
11
+ "dict": {
12
+ "type": "group",
13
+ "dicts": [{
14
+ "type": "ocd2",
15
+ "file": "hangzhou.ocd2"
16
+ }]
17
+ }
18
+ }]
19
+ }
chinese_dialect_lexicons/hangzhou_2.ocd2 ADDED
Binary file (427 kB). View file
 
chinese_dialect_lexicons/jiading_2.json ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "name": "Jiading dialect to IPA",
3
+ "segmentation": {
4
+ "type": "mmseg",
5
+ "dict": {
6
+ "type": "ocd2",
7
+ "file": "jiading.ocd2"
8
+ }
9
+ },
10
+ "conversion_chain": [
11
+ {
12
+ "dict": {
13
+ "type": "group",
14
+ "dicts": [
15
+ {
16
+ "type": "ocd2",
17
+ "file": "jiading.ocd2"
18
+ }
19
+ ]
20
+ }
21
+ }
22
+ ]
23
+ }
chinese_dialect_lexicons/jiading_2.ocd2 ADDED
Binary file (111 kB). View file