Spaces:
Runtime error
Runtime error
Upload 103 files
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .gitattributes +3 -0
- Dockerfile +37 -0
- LICENSE +21 -0
- LICENSE-MoeGoe +21 -0
- README.md +622 -13
- README_zh.md +619 -0
- app.py +384 -0
- attentions.py +300 -0
- chinese_dialect_lexicons/changzhou.json +23 -0
- chinese_dialect_lexicons/changzhou.ocd2 +0 -0
- chinese_dialect_lexicons/changzhou_3.json +23 -0
- chinese_dialect_lexicons/changzhou_3.ocd2 +0 -0
- chinese_dialect_lexicons/cixi_2.json +23 -0
- chinese_dialect_lexicons/cixi_2.ocd2 +0 -0
- chinese_dialect_lexicons/fuyang_2.json +23 -0
- chinese_dialect_lexicons/fuyang_2.ocd2 +0 -0
- chinese_dialect_lexicons/hangzhou_2.json +19 -0
- chinese_dialect_lexicons/hangzhou_2.ocd2 +0 -0
- chinese_dialect_lexicons/jiading_2.json +23 -0
- chinese_dialect_lexicons/jiading_2.ocd2 +0 -0
- chinese_dialect_lexicons/jiashan_2.json +23 -0
- chinese_dialect_lexicons/jiashan_2.ocd2 +0 -0
- chinese_dialect_lexicons/jingjiang_2.json +23 -0
- chinese_dialect_lexicons/jingjiang_2.ocd2 +0 -0
- chinese_dialect_lexicons/jyutjyu_2.json +19 -0
- chinese_dialect_lexicons/jyutjyu_2.ocd2 +3 -0
- chinese_dialect_lexicons/linping_2.json +23 -0
- chinese_dialect_lexicons/linping_2.ocd2 +0 -0
- chinese_dialect_lexicons/ningbo_2.json +19 -0
- chinese_dialect_lexicons/ningbo_2.ocd2 +0 -0
- chinese_dialect_lexicons/pinghu_2.json +23 -0
- chinese_dialect_lexicons/pinghu_2.ocd2 +0 -0
- chinese_dialect_lexicons/ruao_2.json +23 -0
- chinese_dialect_lexicons/ruao_2.ocd2 +0 -0
- chinese_dialect_lexicons/sanmen_2.json +23 -0
- chinese_dialect_lexicons/sanmen_2.ocd2 +0 -0
- chinese_dialect_lexicons/shaoxing_2.json +23 -0
- chinese_dialect_lexicons/shaoxing_2.ocd2 +0 -0
- chinese_dialect_lexicons/suichang_2.json +23 -0
- chinese_dialect_lexicons/suichang_2.ocd2 +0 -0
- chinese_dialect_lexicons/suzhou_2.json +19 -0
- chinese_dialect_lexicons/suzhou_2.ocd2 +0 -0
- chinese_dialect_lexicons/tiantai_2.json +23 -0
- chinese_dialect_lexicons/tiantai_2.ocd2 +0 -0
- chinese_dialect_lexicons/tongxiang_2.json +23 -0
- chinese_dialect_lexicons/tongxiang_2.ocd2 +0 -0
- chinese_dialect_lexicons/wenzhou_2.json +23 -0
- chinese_dialect_lexicons/wenzhou_2.ocd2 +0 -0
- chinese_dialect_lexicons/wuxi_2.json +19 -0
- chinese_dialect_lexicons/wuxi_2.ocd2 +0 -0
.gitattributes
CHANGED
@@ -32,3 +32,6 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
32 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
33 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
34 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
32 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
33 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
34 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
35 |
+
chinese_dialect_lexicons/jyutjyu_2.ocd2 filter=lfs diff=lfs merge=lfs -text
|
36 |
+
chinese_dialect_lexicons/zaonhe_2.ocd2 filter=lfs diff=lfs merge=lfs -text
|
37 |
+
chinese_dialect_lexicons/zaonhe.ocd2 filter=lfs diff=lfs merge=lfs -text
|
Dockerfile
ADDED
@@ -0,0 +1,37 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
FROM python:3.10.11-slim-bullseye
|
2 |
+
|
3 |
+
RUN mkdir -p /app
|
4 |
+
WORKDIR /app
|
5 |
+
|
6 |
+
ENV DEBIAN_FRONTEND=noninteractive
|
7 |
+
|
8 |
+
RUN apt-get update && \
|
9 |
+
apt install build-essential -yq && \
|
10 |
+
apt install espeak-ng -yq && \
|
11 |
+
apt install cmake -yq && \
|
12 |
+
apt install -y wget -yq && \
|
13 |
+
apt-get clean && \
|
14 |
+
apt-get purge -y --auto-remove -o APT::AutoRemove::RecommendsImportant=false && \
|
15 |
+
rm -rf /var/lib/apt/lists/*
|
16 |
+
|
17 |
+
RUN pip install MarkupSafe==2.1.2 numpy==1.23.3 cython six==1.16.0
|
18 |
+
|
19 |
+
RUN wget https://raw.githubusercontent.com/Artrajz/archived/main/openjtalk/openjtalk-0.3.0.dev2.tar.gz && \
|
20 |
+
tar -zxvf openjtalk-0.3.0.dev2.tar.gz && \
|
21 |
+
cd openjtalk-0.3.0.dev2 && \
|
22 |
+
rm -rf ./pyopenjtalk/open_jtalk_dic_utf_8-1.11 && \
|
23 |
+
python setup.py install && \
|
24 |
+
cd ../ && \
|
25 |
+
rm -f openjtalk-0.3.0.dev2.tar.gz && \
|
26 |
+
rm -rf openjtalk-0.3.0.dev2
|
27 |
+
|
28 |
+
RUN pip install torch --index-url https://download.pytorch.org/whl/cpu
|
29 |
+
|
30 |
+
COPY requirements.txt /app
|
31 |
+
RUN pip install -r requirements.txt
|
32 |
+
|
33 |
+
COPY . /app
|
34 |
+
|
35 |
+
EXPOSE 23456
|
36 |
+
|
37 |
+
CMD ["python", "/app/app.py"]
|
LICENSE
ADDED
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
MIT License
|
2 |
+
|
3 |
+
Copyright (c) 2023 Artrajz
|
4 |
+
|
5 |
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
6 |
+
of this software and associated documentation files (the "Software"), to deal
|
7 |
+
in the Software without restriction, including without limitation the rights
|
8 |
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
9 |
+
copies of the Software, and to permit persons to whom the Software is
|
10 |
+
furnished to do so, subject to the following conditions:
|
11 |
+
|
12 |
+
The above copyright notice and this permission notice shall be included in all
|
13 |
+
copies or substantial portions of the Software.
|
14 |
+
|
15 |
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
16 |
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
17 |
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
18 |
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
19 |
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
20 |
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
21 |
+
SOFTWARE.
|
LICENSE-MoeGoe
ADDED
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
MIT License
|
2 |
+
|
3 |
+
Copyright (c) 2022 CjangCjengh
|
4 |
+
|
5 |
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
6 |
+
of this software and associated documentation files (the "Software"), to deal
|
7 |
+
in the Software without restriction, including without limitation the rights
|
8 |
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
9 |
+
copies of the Software, and to permit persons to whom the Software is
|
10 |
+
furnished to do so, subject to the following conditions:
|
11 |
+
|
12 |
+
The above copyright notice and this permission notice shall be included in all
|
13 |
+
copies or substantial portions of the Software.
|
14 |
+
|
15 |
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
16 |
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
17 |
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
18 |
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
19 |
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
20 |
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
21 |
+
SOFTWARE.
|
README.md
CHANGED
@@ -1,13 +1,622 @@
|
|
1 |
-
|
2 |
-
|
3 |
-
|
4 |
-
|
5 |
-
|
6 |
-
|
7 |
-
|
8 |
-
|
9 |
-
|
10 |
-
|
11 |
-
|
12 |
-
|
13 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
<div class="title" align=center>
|
2 |
+
<h1>vits-simple-api</h1>
|
3 |
+
<div>Simply call the vits api</div>
|
4 |
+
<br/>
|
5 |
+
<br/>
|
6 |
+
<p>
|
7 |
+
<img src="https://img.shields.io/github/license/Artrajz/vits-simple-api">
|
8 |
+
<img src="https://img.shields.io/badge/python-3.9%7C3.10-green">
|
9 |
+
<a href="https://hub.docker.com/r/artrajz/vits-simple-api">
|
10 |
+
<img src="https://img.shields.io/docker/pulls/artrajz/vits-simple-api"></a>
|
11 |
+
</p>
|
12 |
+
<a href="https://github.com/Artrajz/vits-simple-api/blob/main/README.md">English</a>|<a href="https://github.com/Artrajz/vits-simple-api/blob/main/README_zh.md">中文文档</a>
|
13 |
+
<br/>
|
14 |
+
</div>
|
15 |
+
|
16 |
+
|
17 |
+
|
18 |
+
|
19 |
+
# Feature
|
20 |
+
|
21 |
+
- [x] VITS text-to-speech
|
22 |
+
- [x] VITS voice conversion
|
23 |
+
- [x] HuBert-soft VITS
|
24 |
+
- [x] W2V2 VITS / emotional-vits dimensional emotion model
|
25 |
+
- [x] Support for loading multiple models
|
26 |
+
- [x] Automatic language recognition and processing,set the scope of language type recognition according to model's cleaner,support for custom language type range
|
27 |
+
- [x] Customize default parameters
|
28 |
+
- [x] Long text batch processing
|
29 |
+
- [x] GPU accelerated inference
|
30 |
+
- [x] SSML (Speech Synthesis Markup Language) work in progress...
|
31 |
+
|
32 |
+
<details><summary>Update Logs</summary><pre><code>
|
33 |
+
<h2>2023.5.24</h2>
|
34 |
+
<p>Added api dimensional_emotion,load mutiple npy from folder.Docker add linux/arm64 and linux/arm64/v8 platforms</p>
|
35 |
+
<h2>2023.5.15</h2>
|
36 |
+
<p>Added english_cleaner. To use it, you need to install espeak separately.</p>
|
37 |
+
<h2>2023.5.12</h2>
|
38 |
+
<p>Added support for SSML, but still needs improvement. Refactored some functions and changed "speaker_id" to "id" in hubert_vits.</p>
|
39 |
+
<h2>2023.5.2</h2>
|
40 |
+
<p>Added support for the w2v2-vits/emotional-vits model, updated the speakers mapping table, and added support for the languages corresponding to the model.</p>
|
41 |
+
<h2>2023.4.23</h2>
|
42 |
+
<p>Add API Key authentication, disabled by default, needs to be enabled in config.py.</p>
|
43 |
+
<h2>2023.4.17</h2>
|
44 |
+
<p>Added the feature that the cleaner for a single language needs to be annotated to clean, and added GPU acceleration for inference, but the GPU inference environment needs to be manually installed.</p>
|
45 |
+
<h2>2023.4.12</h2>
|
46 |
+
<p>Renamed the project from MoeGoe-Simple-API to vits-simple-api, added support for batch processing of long texts, and added a segment threshold "max" for long texts.</p>
|
47 |
+
<h2>2023.4.7</h2>
|
48 |
+
<p>Added a configuration file to customize default parameters. This update requires manually updating config.py. See config.py for specific usage.</p>
|
49 |
+
<h2>2023.4.6</h2>
|
50 |
+
<p>Added the "auto" option for automatically recognizing the language of the text. Modified the default value of the "lang" parameter to "auto". Automatic recognition still has some defects, please choose manually.</p>
|
51 |
+
<p>Unified the POST request type as multipart/form-data.</p>
|
52 |
+
</code></pre></details>
|
53 |
+
|
54 |
+
|
55 |
+
|
56 |
+
## demo
|
57 |
+
|
58 |
+
- `https://api.artrajz.cn/py/voice/vits?text=你好,こんにちは&id=142`
|
59 |
+
- excited:`https://api.artrajz.cn/py/voice/w2v2-vits?text=こんにちは&id=3&emotion=111`
|
60 |
+
- whispered:`https://api.artrajz.cn/py/voice/w2v2-vits?text=こんにちは&id=3&emotion=2077`
|
61 |
+
|
62 |
+
https://user-images.githubusercontent.com/73542220/237995061-c1f25b4e-dd86-438a-9363-4bb1fe65b425.mov
|
63 |
+
|
64 |
+
The demo server is unstable due to its relatively low configuration.
|
65 |
+
|
66 |
+
# Deploy
|
67 |
+
|
68 |
+
## Docker
|
69 |
+
|
70 |
+
### Docker image pull script
|
71 |
+
|
72 |
+
```
|
73 |
+
bash -c "$(wget -O- https://raw.githubusercontent.com/Artrajz/vits-simple-api/main/vits-simple-api-installer-latest.sh)"
|
74 |
+
```
|
75 |
+
|
76 |
+
- The platforms currently supported by Docker images are `linux/amd64` and `linux/arm64`.
|
77 |
+
- After a successful pull, the vits model needs to be imported before use. Please follow the steps below to import the model.
|
78 |
+
|
79 |
+
### Download VITS model
|
80 |
+
|
81 |
+
Put the model into `/usr/local/vits-simple-api/Model`
|
82 |
+
|
83 |
+
<details><summary>Folder structure</summary><pre><code>
|
84 |
+
│ hubert-soft-0d54a1f4.pt
|
85 |
+
│ model.onnx
|
86 |
+
│ model.yaml
|
87 |
+
│
|
88 |
+
├─g
|
89 |
+
│ config.json
|
90 |
+
│ G_953000.pth
|
91 |
+
│
|
92 |
+
├─louise
|
93 |
+
│ 360_epochs.pth
|
94 |
+
│ config.json
|
95 |
+
│
|
96 |
+
├─Nene_Nanami_Rong_Tang
|
97 |
+
│ 1374_epochs.pth
|
98 |
+
│ config.json
|
99 |
+
│
|
100 |
+
├─Zero_no_tsukaima
|
101 |
+
│ 1158_epochs.pth
|
102 |
+
│ config.json
|
103 |
+
│
|
104 |
+
└─npy
|
105 |
+
25ecb3f6-f968-11ed-b094-e0d4e84af078.npy
|
106 |
+
all_emotions.npy
|
107 |
+
</code></pre></details>
|
108 |
+
|
109 |
+
|
110 |
+
|
111 |
+
|
112 |
+
|
113 |
+
### Modify model path
|
114 |
+
|
115 |
+
Modify in `/usr/local/vits-simple-api/config.py`
|
116 |
+
|
117 |
+
<details><summary>config.py</summary><pre><code>
|
118 |
+
# Fill in the model path here
|
119 |
+
MODEL_LIST = [
|
120 |
+
# VITS
|
121 |
+
[ABS_PATH + "/Model/Nene_Nanami_Rong_Tang/1374_epochs.pth", ABS_PATH + "/Model/Nene_Nanami_Rong_Tang/config.json"],
|
122 |
+
[ABS_PATH + "/Model/Zero_no_tsukaima/1158_epochs.pth", ABS_PATH + "/Model/Zero_no_tsukaima/config.json"],
|
123 |
+
[ABS_PATH + "/Model/g/G_953000.pth", ABS_PATH + "/Model/g/config.json"],
|
124 |
+
# HuBert-VITS (Need to configure HUBERT_SOFT_MODEL)
|
125 |
+
[ABS_PATH + "/Model/louise/360_epochs.pth", ABS_PATH + "/Model/louise/config.json"],
|
126 |
+
# W2V2-VITS (Need to configure DIMENSIONAL_EMOTION_NPY)
|
127 |
+
[ABS_PATH + "/Model/w2v2-vits/1026_epochs.pth", ABS_PATH + "/Model/w2v2-vits/config.json"],
|
128 |
+
]
|
129 |
+
# hubert-vits: hubert soft model
|
130 |
+
HUBERT_SOFT_MODEL = ABS_PATH + "/Model/hubert-soft-0d54a1f4.pt"
|
131 |
+
# w2v2-vits: Dimensional emotion npy file
|
132 |
+
# load single npy: ABS_PATH+"/all_emotions.npy
|
133 |
+
# load mutiple npy: [ABS_PATH + "/emotions1.npy", ABS_PATH + "/emotions2.npy"]
|
134 |
+
# load mutiple npy from folder: ABS_PATH + "/Model/npy"
|
135 |
+
DIMENSIONAL_EMOTION_NPY = ABS_PATH + "/Model/npy"
|
136 |
+
# w2v2-vits: Need to have both `model.onnx` and `model.yaml` files in the same path.
|
137 |
+
DIMENSIONAL_EMOTION_MODEL = ABS_PATH + "/Model/model.yaml"
|
138 |
+
</code></pre></details>
|
139 |
+
|
140 |
+
|
141 |
+
|
142 |
+
|
143 |
+
|
144 |
+
### Startup
|
145 |
+
|
146 |
+
`docker compose up -d`
|
147 |
+
|
148 |
+
Or execute the pull script again
|
149 |
+
|
150 |
+
### Image update
|
151 |
+
|
152 |
+
Run the docker image pull script again
|
153 |
+
|
154 |
+
## Virtual environment deployment
|
155 |
+
|
156 |
+
### Clone
|
157 |
+
|
158 |
+
`git clone https://github.com/Artrajz/vits-simple-api.git`
|
159 |
+
|
160 |
+
### Download python dependencies
|
161 |
+
|
162 |
+
A python virtual environment is recommended,use python >= 3.9
|
163 |
+
|
164 |
+
`pip install -r requirements.txt`
|
165 |
+
|
166 |
+
Fasttext may not be installed on windows, you can install it with the following command,or download wheels [here](https://www.lfd.uci.edu/~gohlke/pythonlibs/#fasttext)
|
167 |
+
|
168 |
+
```
|
169 |
+
#python3.10 win_amd64
|
170 |
+
pip install https://github.com/Artrajz/archived/raw/main/fasttext/fasttext-0.9.2-cp310-cp310-win_amd64.whl
|
171 |
+
#python3.9 win_amd64
|
172 |
+
pip install https://github.com/Artrajz/archived/raw/main/fasttext/fasttext-0.9.2-cp39-cp39-win_amd64.whl
|
173 |
+
```
|
174 |
+
|
175 |
+
### Download VITS model
|
176 |
+
|
177 |
+
Put the model into `/path/to/vits-simple-api/Model`
|
178 |
+
|
179 |
+
<details><summary>Folder structure</summary><pre><code>
|
180 |
+
│ hubert-soft-0d54a1f4.pt
|
181 |
+
│ model.onnx
|
182 |
+
│ model.yaml
|
183 |
+
│
|
184 |
+
├─g
|
185 |
+
│ config.json
|
186 |
+
│ G_953000.pth
|
187 |
+
│
|
188 |
+
├─louise
|
189 |
+
│ 360_epochs.pth
|
190 |
+
│ config.json
|
191 |
+
│
|
192 |
+
├─Nene_Nanami_Rong_Tang
|
193 |
+
│ 1374_epochs.pth
|
194 |
+
│ config.json
|
195 |
+
│
|
196 |
+
├─Zero_no_tsukaima
|
197 |
+
│ 1158_epochs.pth
|
198 |
+
│ config.json
|
199 |
+
│
|
200 |
+
└─npy
|
201 |
+
25ecb3f6-f968-11ed-b094-e0d4e84af078.npy
|
202 |
+
all_emotions.npy
|
203 |
+
</code></pre></details>
|
204 |
+
|
205 |
+
|
206 |
+
|
207 |
+
### Modify model path
|
208 |
+
|
209 |
+
Modify in `/path/to/vits-simple-api/config.py`
|
210 |
+
|
211 |
+
<details><summary>config.py</summary><pre><code>
|
212 |
+
# Fill in the model path here
|
213 |
+
MODEL_LIST = [
|
214 |
+
# VITS
|
215 |
+
[ABS_PATH + "/Model/Nene_Nanami_Rong_Tang/1374_epochs.pth", ABS_PATH + "/Model/Nene_Nanami_Rong_Tang/config.json"],
|
216 |
+
[ABS_PATH + "/Model/Zero_no_tsukaima/1158_epochs.pth", ABS_PATH + "/Model/Zero_no_tsukaima/config.json"],
|
217 |
+
[ABS_PATH + "/Model/g/G_953000.pth", ABS_PATH + "/Model/g/config.json"],
|
218 |
+
# HuBert-VITS (Need to configure HUBERT_SOFT_MODEL)
|
219 |
+
[ABS_PATH + "/Model/louise/360_epochs.pth", ABS_PATH + "/Model/louise/config.json"],
|
220 |
+
# W2V2-VITS (Need to configure DIMENSIONAL_EMOTION_NPY)
|
221 |
+
[ABS_PATH + "/Model/w2v2-vits/1026_epochs.pth", ABS_PATH + "/Model/w2v2-vits/config.json"],
|
222 |
+
]
|
223 |
+
# hubert-vits: hubert soft model
|
224 |
+
HUBERT_SOFT_MODEL = ABS_PATH + "/Model/hubert-soft-0d54a1f4.pt"
|
225 |
+
# w2v2-vits: Dimensional emotion npy file
|
226 |
+
# load single npy: ABS_PATH+"/all_emotions.npy
|
227 |
+
# load mutiple npy: [ABS_PATH + "/emotions1.npy", ABS_PATH + "/emotions2.npy"]
|
228 |
+
# load mutiple npy from folder: ABS_PATH + "/Model/npy"
|
229 |
+
DIMENSIONAL_EMOTION_NPY = ABS_PATH + "/Model/npy"
|
230 |
+
# w2v2-vits: Need to have both `model.onnx` and `model.yaml` files in the same path.
|
231 |
+
DIMENSIONAL_EMOTION_MODEL = ABS_PATH + "/Model/model.yaml"
|
232 |
+
</code></pre></details>
|
233 |
+
|
234 |
+
|
235 |
+
|
236 |
+
### Startup
|
237 |
+
|
238 |
+
`python app.py`
|
239 |
+
|
240 |
+
# GPU accelerated
|
241 |
+
|
242 |
+
## Windows
|
243 |
+
### Install CUDA
|
244 |
+
Check the highest version of CUDA supported by your graphics card:
|
245 |
+
```
|
246 |
+
nvidia-smi
|
247 |
+
```
|
248 |
+
Taking CUDA 11.7 as an example, download it from the [official website](https://developer.nvidia.com/cuda-11-7-0-download-archive?target_os=Windows&target_arch=x86_64&target_version=10&target_type=exe_local)
|
249 |
+
### Install GPU version of PyTorch
|
250 |
+
```
|
251 |
+
pip3 install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cu117
|
252 |
+
```
|
253 |
+
You can find the corresponding command for the version you need on the [official website](https://pytorch.org/get-started/locally/)
|
254 |
+
## Linux
|
255 |
+
The installation process is similar, but I don't have the environment to test it.
|
256 |
+
|
257 |
+
# Openjtalk Installation Issue
|
258 |
+
|
259 |
+
If you are using an arm64 architecture platform, you may encounter some issues during installation due to the lack of arm64-compatible whl files on the official PyPI website. In such cases, you can use the whl file I have built to install Openjtalk.
|
260 |
+
|
261 |
+
```
|
262 |
+
pip install openjtalk==0.3.0.dev2 --index-url https://pypi.artrajz.cn/simple
|
263 |
+
```
|
264 |
+
|
265 |
+
Alternatively, you can manually build a whl file by following the instructions in this [tutorial](https://artrajz.cn/index.php/archives/167/).
|
266 |
+
|
267 |
+
# API
|
268 |
+
|
269 |
+
## GET
|
270 |
+
|
271 |
+
#### speakers list
|
272 |
+
|
273 |
+
- GET http://127.0.0.1:23456/voice/speakers
|
274 |
+
|
275 |
+
Returns the mapping table of role IDs to speaker names.
|
276 |
+
|
277 |
+
#### voice vits
|
278 |
+
|
279 |
+
- GET http://127.0.0.1/voice?text=text
|
280 |
+
|
281 |
+
Default values are used when other parameters are not specified.
|
282 |
+
|
283 |
+
- GET http://127.0.0.1/voice?text=[ZH]text[ZH][JA]text[JA]&lang=mix
|
284 |
+
|
285 |
+
When lang=mix, the text needs to be annotated.
|
286 |
+
|
287 |
+
- GET http://127.0.0.1/voice?text=text&id=142&format=wav&lang=zh&length=1.4
|
288 |
+
|
289 |
+
The text is "text", the role ID is 142, the audio format is wav, the text language is zh, the speech length is 1.4, and the other parameters are default.
|
290 |
+
|
291 |
+
#### check
|
292 |
+
|
293 |
+
- GET http://127.0.0.1:23456/voice/check?id=0&model=vits
|
294 |
+
|
295 |
+
## POST
|
296 |
+
|
297 |
+
- python
|
298 |
+
|
299 |
+
```python
|
300 |
+
import re
|
301 |
+
import requests
|
302 |
+
import os
|
303 |
+
import random
|
304 |
+
import string
|
305 |
+
from requests_toolbelt.multipart.encoder import MultipartEncoder
|
306 |
+
|
307 |
+
abs_path = os.path.dirname(__file__)
|
308 |
+
base = "http://127.0.0.1:23456"
|
309 |
+
|
310 |
+
|
311 |
+
# 映射表
|
312 |
+
def voice_speakers():
|
313 |
+
url = f"{base}/voice/speakers"
|
314 |
+
|
315 |
+
res = requests.post(url=url)
|
316 |
+
json = res.json()
|
317 |
+
for i in json:
|
318 |
+
print(i)
|
319 |
+
for j in json[i]:
|
320 |
+
print(j)
|
321 |
+
return json
|
322 |
+
|
323 |
+
|
324 |
+
# 语音合成 voice vits
|
325 |
+
def voice_vits(text, id=0, format="wav", lang="auto", length=1, noise=0.667, noisew=0.8, max=50):
|
326 |
+
fields = {
|
327 |
+
"text": text,
|
328 |
+
"id": str(id),
|
329 |
+
"format": format,
|
330 |
+
"lang": lang,
|
331 |
+
"length": str(length),
|
332 |
+
"noise": str(noise),
|
333 |
+
"noisew": str(noisew),
|
334 |
+
"max": str(max)
|
335 |
+
}
|
336 |
+
boundary = '----VoiceConversionFormBoundary' + ''.join(random.sample(string.ascii_letters + string.digits, 16))
|
337 |
+
|
338 |
+
m = MultipartEncoder(fields=fields, boundary=boundary)
|
339 |
+
headers = {"Content-Type": m.content_type}
|
340 |
+
url = f"{base}/voice"
|
341 |
+
|
342 |
+
res = requests.post(url=url, data=m, headers=headers)
|
343 |
+
fname = re.findall("filename=(.+)", res.headers["Content-Disposition"])[0]
|
344 |
+
path = f"{abs_path}/{fname}"
|
345 |
+
|
346 |
+
with open(path, "wb") as f:
|
347 |
+
f.write(res.content)
|
348 |
+
print(path)
|
349 |
+
return path
|
350 |
+
|
351 |
+
|
352 |
+
# 语音转换 hubert-vits
|
353 |
+
def voice_hubert_vits(upload_path, id, format="wav", length=1, noise=0.667, noisew=0.8):
|
354 |
+
upload_name = os.path.basename(upload_path)
|
355 |
+
upload_type = f'audio/{upload_name.split(".")[1]}' # wav,ogg
|
356 |
+
|
357 |
+
with open(upload_path, 'rb') as upload_file:
|
358 |
+
fields = {
|
359 |
+
"upload": (upload_name, upload_file, upload_type),
|
360 |
+
"id": str(id),
|
361 |
+
"format": format,
|
362 |
+
"length": str(length),
|
363 |
+
"noise": str(noise),
|
364 |
+
"noisew": str(noisew),
|
365 |
+
}
|
366 |
+
boundary = '----VoiceConversionFormBoundary' + ''.join(random.sample(string.ascii_letters + string.digits, 16))
|
367 |
+
|
368 |
+
m = MultipartEncoder(fields=fields, boundary=boundary)
|
369 |
+
headers = {"Content-Type": m.content_type}
|
370 |
+
url = f"{base}/voice/hubert-vits"
|
371 |
+
|
372 |
+
res = requests.post(url=url, data=m, headers=headers)
|
373 |
+
fname = re.findall("filename=(.+)", res.headers["Content-Disposition"])[0]
|
374 |
+
path = f"{abs_path}/{fname}"
|
375 |
+
|
376 |
+
with open(path, "wb") as f:
|
377 |
+
f.write(res.content)
|
378 |
+
print(path)
|
379 |
+
return path
|
380 |
+
|
381 |
+
|
382 |
+
# 维度情感模型 w2v2-vits
|
383 |
+
def voice_w2v2_vits(text, id=0, format="wav", lang="auto", length=1, noise=0.667, noisew=0.8, max=50, emotion=0):
|
384 |
+
fields = {
|
385 |
+
"text": text,
|
386 |
+
"id": str(id),
|
387 |
+
"format": format,
|
388 |
+
"lang": lang,
|
389 |
+
"length": str(length),
|
390 |
+
"noise": str(noise),
|
391 |
+
"noisew": str(noisew),
|
392 |
+
"max": str(max),
|
393 |
+
"emotion": str(emotion)
|
394 |
+
}
|
395 |
+
boundary = '----VoiceConversionFormBoundary' + ''.join(random.sample(string.ascii_letters + string.digits, 16))
|
396 |
+
|
397 |
+
m = MultipartEncoder(fields=fields, boundary=boundary)
|
398 |
+
headers = {"Content-Type": m.content_type}
|
399 |
+
url = f"{base}/voice/w2v2-vits"
|
400 |
+
|
401 |
+
res = requests.post(url=url, data=m, headers=headers)
|
402 |
+
fname = re.findall("filename=(.+)", res.headers["Content-Disposition"])[0]
|
403 |
+
path = f"{abs_path}/{fname}"
|
404 |
+
|
405 |
+
with open(path, "wb") as f:
|
406 |
+
f.write(res.content)
|
407 |
+
print(path)
|
408 |
+
return path
|
409 |
+
|
410 |
+
|
411 |
+
# 语音转换 同VITS模型内角色之间的音色转换
|
412 |
+
def voice_conversion(upload_path, original_id, target_id):
|
413 |
+
upload_name = os.path.basename(upload_path)
|
414 |
+
upload_type = f'audio/{upload_name.split(".")[1]}' # wav,ogg
|
415 |
+
|
416 |
+
with open(upload_path, 'rb') as upload_file:
|
417 |
+
fields = {
|
418 |
+
"upload": (upload_name, upload_file, upload_type),
|
419 |
+
"original_id": str(original_id),
|
420 |
+
"target_id": str(target_id),
|
421 |
+
}
|
422 |
+
boundary = '----VoiceConversionFormBoundary' + ''.join(random.sample(string.ascii_letters + string.digits, 16))
|
423 |
+
m = MultipartEncoder(fields=fields, boundary=boundary)
|
424 |
+
|
425 |
+
headers = {"Content-Type": m.content_type}
|
426 |
+
url = f"{base}/voice/conversion"
|
427 |
+
|
428 |
+
res = requests.post(url=url, data=m, headers=headers)
|
429 |
+
|
430 |
+
fname = re.findall("filename=(.+)", res.headers["Content-Disposition"])[0]
|
431 |
+
path = f"{abs_path}/{fname}"
|
432 |
+
|
433 |
+
with open(path, "wb") as f:
|
434 |
+
f.write(res.content)
|
435 |
+
print(path)
|
436 |
+
return path
|
437 |
+
|
438 |
+
|
439 |
+
def voice_ssml(ssml):
|
440 |
+
fields = {
|
441 |
+
"ssml": ssml,
|
442 |
+
}
|
443 |
+
boundary = '----VoiceConversionFormBoundary' + ''.join(random.sample(string.ascii_letters + string.digits, 16))
|
444 |
+
|
445 |
+
m = MultipartEncoder(fields=fields, boundary=boundary)
|
446 |
+
headers = {"Content-Type": m.content_type}
|
447 |
+
url = f"{base}/voice/ssml"
|
448 |
+
|
449 |
+
res = requests.post(url=url, data=m, headers=headers)
|
450 |
+
fname = re.findall("filename=(.+)", res.headers["Content-Disposition"])[0]
|
451 |
+
path = f"{abs_path}/{fname}"
|
452 |
+
|
453 |
+
with open(path, "wb") as f:
|
454 |
+
f.write(res.content)
|
455 |
+
print(path)
|
456 |
+
return path
|
457 |
+
|
458 |
+
def voice_dimensional_emotion(upload_path):
|
459 |
+
upload_name = os.path.basename(upload_path)
|
460 |
+
upload_type = f'audio/{upload_name.split(".")[1]}' # wav,ogg
|
461 |
+
|
462 |
+
with open(upload_path, 'rb') as upload_file:
|
463 |
+
fields = {
|
464 |
+
"upload": (upload_name, upload_file, upload_type),
|
465 |
+
}
|
466 |
+
boundary = '----VoiceConversionFormBoundary' + ''.join(random.sample(string.ascii_letters + string.digits, 16))
|
467 |
+
|
468 |
+
m = MultipartEncoder(fields=fields, boundary=boundary)
|
469 |
+
headers = {"Content-Type": m.content_type}
|
470 |
+
url = f"{base}/voice/dimension-emotion"
|
471 |
+
|
472 |
+
res = requests.post(url=url, data=m, headers=headers)
|
473 |
+
fname = re.findall("filename=(.+)", res.headers["Content-Disposition"])[0]
|
474 |
+
path = f"{abs_path}/{fname}"
|
475 |
+
|
476 |
+
with open(path, "wb") as f:
|
477 |
+
f.write(res.content)
|
478 |
+
print(path)
|
479 |
+
return path
|
480 |
+
```
|
481 |
+
|
482 |
+
## API KEY
|
483 |
+
|
484 |
+
Set `API_KEY_ENABLED = True` in `config.py` to enable API key authentication. The API key is `API_KEY = "api-key"`.
|
485 |
+
After enabling it, you need to add the `api_key` parameter in GET requests and add the `X-API-KEY` parameter in the header for POST requests.
|
486 |
+
|
487 |
+
# Parameter
|
488 |
+
|
489 |
+
## VITS
|
490 |
+
|
491 |
+
| Name | Parameter | Is must | Default | Type | Instruction |
|
492 |
+
| ---------------------- | --------- | ------- | ------- | ----- | ------------------------------------------------------------ |
|
493 |
+
| Synthesized text | text | true | | str | |
|
494 |
+
| Role ID | id | false | 0 | int | |
|
495 |
+
| Audio format | format | false | wav | str | Support for wav,ogg,silk |
|
496 |
+
| Text language | lang | false | auto | str | The language of the text to be synthesized. Available options include auto, zh, ja, and mix. When lang=mix, the text should be wrapped in [ZH] or [JA].The default mode is auto, which automatically detects the language of the text |
|
497 |
+
| Audio length | length | false | 1.0 | float | Adjusts the length of the synthesized speech, which is equivalent to adjusting the speed of the speech. The larger the value, the slower the speed. |
|
498 |
+
| Noise | noise | false | 0.667 | float | |
|
499 |
+
| Noise Weight | noisew | false | 0.8 | float | |
|
500 |
+
| Segmentation threshold | max | false | 50 | int | Divide the text into paragraphs based on punctuation marks, and combine them into one paragraph when the length exceeds max. If max<=0, the text will not be divided into paragraphs. |
|
501 |
+
|
502 |
+
## VITS voice conversion
|
503 |
+
|
504 |
+
| Name | Parameter | Is must | Default | Type | Instruction |
|
505 |
+
| -------------- | ----------- | ------- | ------- | ---- | --------------------------------------------------------- |
|
506 |
+
| Uploaded Audio | upload | true | | file | The audio file to be uploaded. It should be in wav or ogg |
|
507 |
+
| Source Role ID | original_id | true | | int | The ID of the role used to upload the audio file. |
|
508 |
+
| Target Role ID | target_id | true | | int | The ID of the target role to convert the audio to. |
|
509 |
+
|
510 |
+
## HuBert-VITS
|
511 |
+
|
512 |
+
| Name | Parameter | Is must | Default | Type | Instruction |
|
513 |
+
| -------------- | --------- | ------- | ------- | ----- | ------------------------------------------------------------ |
|
514 |
+
| Uploaded Audio | upload | true | | file | he audio file to be uploaded. It should be in wav or ogg format. |
|
515 |
+
| Target Role ID | id | true | | int | |
|
516 |
+
| Audio format | format | true | | str | wav,ogg,silk |
|
517 |
+
| Audio length | length | true | | float | Adjusts the length of the synthesized speech, which is equivalent to adjusting the speed of the speech. The larger the value, the slower the speed. |
|
518 |
+
| Noise | noise | true | | float | |
|
519 |
+
| Noise Weight | noisew | true | | float | |
|
520 |
+
|
521 |
+
## W2V2-VITS
|
522 |
+
|
523 |
+
| Name | Parameter | Is must | Default | Type | Instruction |
|
524 |
+
| ---------------------- | --------- | ------- | ------- | ----- | ------------------------------------------------------------ |
|
525 |
+
| Synthesized text | text | true | | str | |
|
526 |
+
| Role ID | id | false | 0 | int | |
|
527 |
+
| Audio format | format | false | wav | str | Support for wav,ogg,silk |
|
528 |
+
| Text language | lang | false | auto | str | The language of the text to be synthesized. Available options include auto, zh, ja, and mix. When lang=mix, the text should be wrapped in [ZH] or [JA].The default mode is auto, which automatically detects the language of the text |
|
529 |
+
| Audio length | length | false | 1.0 | float | Adjusts the length of the synthesized speech, which is equivalent to adjusting the speed of the speech. The larger the value, the slower the speed. |
|
530 |
+
| Noise | noise | false | 0.667 | float | |
|
531 |
+
| Noise Weight | noisew | false | 0.8 | float | |
|
532 |
+
| Segmentation threshold | max | false | 50 | int | Divide the text into paragraphs based on punctuation marks, and combine them into one paragraph when the length exceeds max. If max<=0, the text will not be divided into paragraphs. |
|
533 |
+
| Dimensional emotion | emotion | false | 0 | int | The range depends on the emotion reference file in npy format, such as the range of the [innnky](https://huggingface.co/spaces/innnky/nene-emotion/tree/main)'s model all_emotions.npy, which is 0-5457. |
|
534 |
+
|
535 |
+
## Dimensional emotion
|
536 |
+
|
537 |
+
| Name | Parameter | Is must | Default | Type | Instruction |
|
538 |
+
| -------------- | --------- | ------- | ------- | ---- | ------------------------------------------------------------ |
|
539 |
+
| Uploaded Audio | upload | true | | file | Return the npy file that stores the dimensional emotion vectors. |
|
540 |
+
|
541 |
+
## SSML (Speech Synthesis Markup Language)
|
542 |
+
|
543 |
+
Supported Elements and Attributes
|
544 |
+
|
545 |
+
`speak` Element
|
546 |
+
|
547 |
+
| Attribute | Instruction | Is must |
|
548 |
+
| --------- | ------------------------------------------------------------ | ------- |
|
549 |
+
| id | Default value is retrieved from `config.py` | false |
|
550 |
+
| lang | Default value is retrieved from `config.py` | false |
|
551 |
+
| length | Default value is retrieved from `config.py` | false |
|
552 |
+
| noise | Default value is retrieved from `config.py` | false |
|
553 |
+
| noisew | Default value is retrieved from `config.py` | false |
|
554 |
+
| max | Splits text into segments based on punctuation marks. When the sum of segment lengths exceeds `max`, it is treated as one segment. `max<=0` means no segmentation. The default value is 0. | false |
|
555 |
+
| model | Default is `vits`. Options: `w2v2-vits`, `emotion-vits` | false |
|
556 |
+
| emotion | Only effective when using `w2v2-vits` or `emotion-vits`. The range depends on the npy emotion reference file. | false |
|
557 |
+
|
558 |
+
`voice` Element
|
559 |
+
|
560 |
+
Higher priority than `speak`.
|
561 |
+
|
562 |
+
| Attribute | Instruction | Is must |
|
563 |
+
| --------- | ------------------------------------------------------------ | ------- |
|
564 |
+
| id | Default value is retrieved from `config.py` | false |
|
565 |
+
| lang | Default value is retrieved from `config.py` | false |
|
566 |
+
| length | Default value is retrieved from `config.py` | false |
|
567 |
+
| noise | Default value is retrieved from `config.py` | false |
|
568 |
+
| noisew | Default value is retrieved from `config.py` | false |
|
569 |
+
| max | Splits text into segments based on punctuation marks. When the sum of segment lengths exceeds `max`, it is treated as one segment. `max<=0` means no segmentation. The default value is 0. | false |
|
570 |
+
| model | Default is `vits`. Options: `w2v2-vits`, `emotion-vits` | false |
|
571 |
+
| emotion | Only effective when using `w2v2-vits` or `emotion-vits` | false |
|
572 |
+
|
573 |
+
`break` Element
|
574 |
+
|
575 |
+
| Attribute | Instruction | Is must |
|
576 |
+
| --------- | ------------------------------------------------------------ | ------- |
|
577 |
+
| strength | x-weak, weak, medium (default), strong, x-strong | false |
|
578 |
+
| time | The absolute duration of a pause in seconds (such as `2s`) or milliseconds (such as `500ms`). Valid values range from 0 to 5000 milliseconds. If you set a value greater than the supported maximum, the service will use `5000ms`. If the `time` attribute is set, the `strength` attribute is ignored. | false |
|
579 |
+
|
580 |
+
| Strength | Relative Duration |
|
581 |
+
| :------- | :---------------- |
|
582 |
+
| x-weak | 250 ms |
|
583 |
+
| weak | 500 ms |
|
584 |
+
| medium | 750 ms |
|
585 |
+
| strong | 1000 ms |
|
586 |
+
| x-strong | 1250 ms |
|
587 |
+
|
588 |
+
Example
|
589 |
+
|
590 |
+
```xml
|
591 |
+
<speak lang="zh" format="mp3" length="1.2">
|
592 |
+
<voice id="92" >这几天心里颇不宁静。</voice>
|
593 |
+
<voice id="125">今晚在院子里坐着乘凉,忽然想起日日走过的荷塘,在这满月的光里,总该另有一番样子吧。</voice>
|
594 |
+
<voice id="142">月亮渐渐地升高了,墙外马路上孩子们的欢笑,已经听不见了;</voice>
|
595 |
+
<voice id="98">妻在屋里拍着闰儿,迷迷糊糊地哼着眠歌。</voice>
|
596 |
+
<voice id="120">我悄悄地披了大衫,带上门出去。</voice><break time="2s"/>
|
597 |
+
<voice id="121">沿着荷塘,是一条曲折的小煤屑路。</voice>
|
598 |
+
<voice id="122">这是一条幽僻的路;白天也少人走,夜晚更加寂寞。</voice>
|
599 |
+
<voice id="123">荷塘四面,长着许多树,蓊蓊郁郁的。</voice>
|
600 |
+
<voice id="124">路的一旁,是些杨柳,和一些不知道名字的树。</voice>
|
601 |
+
<voice id="125">没有月光的晚上,这路上阴森森的,有些怕人。</voice>
|
602 |
+
<voice id="126">今晚却很好,虽然月光也还是淡淡的。</voice><break time="2s"/>
|
603 |
+
<voice id="127">路上只我一个人,背着手踱着。</voice>
|
604 |
+
<voice id="128">这一片天地好像是我的;我也像超出了平常的自己,到了另一个世界里。</voice>
|
605 |
+
<voice id="129">我爱热闹,也爱冷静;<break strength="x-weak"/>爱群居,也爱独处。</voice>
|
606 |
+
<voice id="130">像今晚上,一个人在这苍茫的月下,什么都可以想,什么都可以不想,便觉是个自由的人。</voice>
|
607 |
+
<voice id="131">白天里一定要做的事,一定要说的话,现在都可不理。</voice>
|
608 |
+
<voice id="132">这是独处的妙处,我且受用这无边的荷香月色好了。</voice>
|
609 |
+
</speak>
|
610 |
+
```
|
611 |
+
|
612 |
+
# Communication
|
613 |
+
|
614 |
+
Learning and communication,now there is only Chinese [QQ group](https://qm.qq.com/cgi-bin/qm/qr?k=-1GknIe4uXrkmbDKBGKa1aAUteq40qs_&jump_from=webapi&authKey=x5YYt6Dggs1ZqWxvZqvj3fV8VUnxRyXm5S5Kzntc78+Nv3iXOIawplGip9LWuNR/)
|
615 |
+
|
616 |
+
# Acknowledgements
|
617 |
+
|
618 |
+
- vits:https://github.com/jaywalnut310/vits
|
619 |
+
- MoeGoe:https://github.com/CjangCjengh/MoeGoe
|
620 |
+
- emotional-vits:https://github.com/innnky/emotional-vits
|
621 |
+
- vits-uma-genshin-honkai:https://huggingface.co/spaces/zomehwh/vits-uma-genshin-honkai
|
622 |
+
|
README_zh.md
ADDED
@@ -0,0 +1,619 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
<div class="title" align=center>
|
2 |
+
<h1>vits-simple-api</h1>
|
3 |
+
<div>Simply call the vits api</div>
|
4 |
+
<br/>
|
5 |
+
<br/>
|
6 |
+
<p>
|
7 |
+
<img src="https://img.shields.io/github/license/Artrajz/vits-simple-api">
|
8 |
+
<img src="https://img.shields.io/badge/python-3.9%7C3.10-green">
|
9 |
+
<a href="https://hub.docker.com/r/artrajz/vits-simple-api">
|
10 |
+
<img src="https://img.shields.io/docker/pulls/artrajz/vits-simple-api"></a>
|
11 |
+
</p>
|
12 |
+
<a href="https://github.com/Artrajz/vits-simple-api/blob/main/README.md">English</a>|<a href="https://github.com/Artrajz/vits-simple-api/blob/main/README_zh.md">中文文档</a>
|
13 |
+
<br/>
|
14 |
+
</div>
|
15 |
+
|
16 |
+
|
17 |
+
|
18 |
+
|
19 |
+
# Feature
|
20 |
+
|
21 |
+
- [x] VITS语音合成
|
22 |
+
- [x] VITS语音转换
|
23 |
+
- [x] HuBert-soft VITS模型
|
24 |
+
- [x] W2V2 VITS / emotional-vits维度情感模型
|
25 |
+
- [x] 加载多模型
|
26 |
+
- [x] 自动识别语言并处理,根据模型的cleaner设置语言类型识别的范围,支持自定义语言类型范围
|
27 |
+
- [x] 自定义默认参数
|
28 |
+
- [x] 长文本批处理
|
29 |
+
- [x] GPU加速推理
|
30 |
+
- [x] SSML语音合成标记语言(完善中...)
|
31 |
+
|
32 |
+
<details><summary>Update Logs</summary><pre><code>
|
33 |
+
<h2>2023.5.24</h2>
|
34 |
+
<p>添加dimensional_emotion api,从文件夹加载多个npy文件,Docker添加了Linux/ARM64和Linux/ARM64/v8平台</p>
|
35 |
+
<h2>2023.5.15</h2>
|
36 |
+
<p>增加english_cleaner,需要额外安装espeak才能使用</p>
|
37 |
+
<h2>2023.5.12</h2>
|
38 |
+
<p>增加ssml支持,但仍需完善。重构部分功能,hubert_vits中的speaker_id改为id</p>
|
39 |
+
<h2>2023.5.2</h2>
|
40 |
+
<p>增加w2v2-vits/emotional-vits模型支持,修改了speakers映射表并添加了对应模型支持的语言</p>
|
41 |
+
<h2>2023.4.23</h2>
|
42 |
+
<p>增加api key鉴权,默认禁用,需要在config.py中启用</p>
|
43 |
+
<h2>2023.4.17</h2>
|
44 |
+
<p>修改单语言的cleaner需要标注才会clean,增加GPU加速推理,但需要手动安装gpu推理环境</p>
|
45 |
+
<h2>2023.4.12</h2>
|
46 |
+
<p>项目由MoeGoe-Simple-API更名为vits-simple-api,支持长文本批处理,增加长文本分段阈值max</p>
|
47 |
+
<h2>2023.4.7</h2>
|
48 |
+
<p>增加配置文件可自定义默认参数,本次更新需要手动更新config.py,具体使用方法见config.py</p>
|
49 |
+
<h2>2023.4.6</h2>
|
50 |
+
<p>加入自动识别语种选项auto,lang参数默认修改为auto,自动识别仍有一定缺陷,请自行选择</p>
|
51 |
+
<p>统一POST请求类型为multipart/form-data</p>
|
52 |
+
</code></pre></details>
|
53 |
+
|
54 |
+
|
55 |
+
|
56 |
+
## demo
|
57 |
+
|
58 |
+
- `https://api.artrajz.cn/py/voice/vits?text=你好,こんにちは&id=142`
|
59 |
+
- 激动:`https://api.artrajz.cn/py/voice/w2v2-vits?text=こんにちは&id=3&emotion=111`
|
60 |
+
- 小声:`https://api.artrajz.cn/py/voice/w2v2-vits?text=こんにちは&id=3&emotion=2077`
|
61 |
+
|
62 |
+
https://user-images.githubusercontent.com/73542220/237995061-c1f25b4e-dd86-438a-9363-4bb1fe65b425.mov
|
63 |
+
|
64 |
+
demo服务器配置比较低所以不稳定
|
65 |
+
|
66 |
+
# 部署
|
67 |
+
|
68 |
+
## Docker部署
|
69 |
+
|
70 |
+
### 镜像拉取脚本
|
71 |
+
|
72 |
+
```
|
73 |
+
bash -c "$(wget -O- https://raw.githubusercontent.com/Artrajz/vits-simple-api/main/vits-simple-api-installer-latest.sh)"
|
74 |
+
```
|
75 |
+
|
76 |
+
- 目前docker镜像支持的平台`linux/amd64,linux/arm64`
|
77 |
+
- 在拉取完成后,需要导入VITS模型才能使用,请根据以下步骤导入模型。
|
78 |
+
|
79 |
+
### 下载VITS模型
|
80 |
+
|
81 |
+
将模型放入`/usr/local/vits-simple-api/Model`
|
82 |
+
|
83 |
+
<details><summary>Folder structure</summary><pre><code>
|
84 |
+
│ hubert-soft-0d54a1f4.pt
|
85 |
+
│ model.onnx
|
86 |
+
│ model.yaml
|
87 |
+
├─g
|
88 |
+
│ config.json
|
89 |
+
│ G_953000.pth
|
90 |
+
│
|
91 |
+
├─louise
|
92 |
+
│ 360_epochs.pth
|
93 |
+
│ config.json
|
94 |
+
│
|
95 |
+
├─Nene_Nanami_Rong_Tang
|
96 |
+
│ 1374_epochs.pth
|
97 |
+
│ config.json
|
98 |
+
│
|
99 |
+
├─Zero_no_tsukaima
|
100 |
+
│ 1158_epochs.pth
|
101 |
+
│ config.json
|
102 |
+
│
|
103 |
+
└─npy
|
104 |
+
25ecb3f6-f968-11ed-b094-e0d4e84af078.npy
|
105 |
+
all_emotions.npy
|
106 |
+
</code></pre></details>
|
107 |
+
|
108 |
+
|
109 |
+
|
110 |
+
### 修改模型路径
|
111 |
+
|
112 |
+
Modify in `/usr/local/vits-simple-api/config.py`
|
113 |
+
|
114 |
+
<details><summary>config.py</summary><pre><code>
|
115 |
+
# 在此填写模型路径
|
116 |
+
MODEL_LIST = [
|
117 |
+
# VITS
|
118 |
+
[ABS_PATH + "/Model/Nene_Nanami_Rong_Tang/1374_epochs.pth", ABS_PATH + "/Model/Nene_Nanami_Rong_Tang/config.json"],
|
119 |
+
[ABS_PATH + "/Model/Zero_no_tsukaima/1158_epochs.pth", ABS_PATH + "/Model/Zero_no_tsukaima/config.json"],
|
120 |
+
[ABS_PATH + "/Model/g/G_953000.pth", ABS_PATH + "/Model/g/config.json"],
|
121 |
+
# HuBert-VITS (Need to configure HUBERT_SOFT_MODEL)
|
122 |
+
[ABS_PATH + "/Model/louise/360_epochs.pth", ABS_PATH + "/Model/louise/config.json"],
|
123 |
+
# W2V2-VITS (Need to configure DIMENSIONAL_EMOTION_NPY)
|
124 |
+
[ABS_PATH + "/Model/w2v2-vits/1026_epochs.pth", ABS_PATH + "/Model/w2v2-vits/config.json"],
|
125 |
+
]
|
126 |
+
# hubert-vits: hubert soft 编码器
|
127 |
+
HUBERT_SOFT_MODEL = ABS_PATH + "/Model/hubert-soft-0d54a1f4.pt"
|
128 |
+
# w2v2-vits: Dimensional emotion npy file
|
129 |
+
# 加载单独的npy: ABS_PATH+"/all_emotions.npy
|
130 |
+
# 加载多个npy: [ABS_PATH + "/emotions1.npy", ABS_PATH + "/emotions2.npy"]
|
131 |
+
# 从文件夹里加载npy: ABS_PATH + "/Model/npy"
|
132 |
+
DIMENSIONAL_EMOTION_NPY = ABS_PATH + "/Model/npy"
|
133 |
+
# w2v2-vits: 需要在同一路径下有model.onnx和model.yaml
|
134 |
+
DIMENSIONAL_EMOTION_MODEL = ABS_PATH + "/Model/model.yaml"
|
135 |
+
</code></pre></details>
|
136 |
+
|
137 |
+
|
138 |
+
|
139 |
+
### 启动
|
140 |
+
|
141 |
+
`docker compose up -d`
|
142 |
+
|
143 |
+
或者重新执行拉取脚本
|
144 |
+
|
145 |
+
### 镜像更新
|
146 |
+
|
147 |
+
重新执行docker镜像拉取脚本即可
|
148 |
+
|
149 |
+
## 虚拟环境部署
|
150 |
+
|
151 |
+
### Clone
|
152 |
+
|
153 |
+
`git clone https://github.com/Artrajz/vits-simple-api.git`
|
154 |
+
|
155 |
+
### 下载python依赖
|
156 |
+
|
157 |
+
推荐使用python的虚拟环境,python版本 >= 3.9
|
158 |
+
|
159 |
+
`pip install -r requirements.txt`
|
160 |
+
|
161 |
+
windows下可能安装不了fasttext,可以用以下命令安装,附[wheels下载地址](https://www.lfd.uci.edu/~gohlke/pythonlibs/#fasttext)
|
162 |
+
|
163 |
+
```
|
164 |
+
#python3.10 win_amd64
|
165 |
+
pip install https://github.com/Artrajz/archived/raw/main/fasttext/fasttext-0.9.2-cp310-cp310-win_amd64.whl
|
166 |
+
#python3.9 win_amd64
|
167 |
+
pip install https://github.com/Artrajz/archived/raw/main/fasttext/fasttext-0.9.2-cp39-cp39-win_amd64.whl
|
168 |
+
```
|
169 |
+
|
170 |
+
### 下载VITS模型
|
171 |
+
|
172 |
+
将模型放入 `/path/to/vits-simple-api/Model`
|
173 |
+
|
174 |
+
<details><summary>文件夹结构</summary><pre><code>
|
175 |
+
├─g
|
176 |
+
│ config.json
|
177 |
+
│ G_953000.pth
|
178 |
+
│
|
179 |
+
├─louise
|
180 |
+
│ 360_epochs.pth
|
181 |
+
│ config.json
|
182 |
+
│ hubert-soft-0d54a1f4.pt
|
183 |
+
│
|
184 |
+
├─Nene_Nanami_Rong_Tang
|
185 |
+
│ 1374_epochs.pth
|
186 |
+
│ config.json
|
187 |
+
│
|
188 |
+
└─Zero_no_tsukaima
|
189 |
+
1158_epochs.pth
|
190 |
+
config.json
|
191 |
+
</code></pre></details>
|
192 |
+
|
193 |
+
### 修改模型路径
|
194 |
+
|
195 |
+
在 `/path/to/vits-simple-api/config.py` 修改
|
196 |
+
|
197 |
+
<details><summary>config.py</summary><pre><code>
|
198 |
+
# 在此填写模型路径
|
199 |
+
MODEL_LIST = [
|
200 |
+
# VITS
|
201 |
+
[ABS_PATH + "/Model/Nene_Nanami_Rong_Tang/1374_epochs.pth", ABS_PATH + "/Model/Nene_Nanami_Rong_Tang/config.json"],
|
202 |
+
[ABS_PATH + "/Model/Zero_no_tsukaima/1158_epochs.pth", ABS_PATH + "/Model/Zero_no_tsukaima/config.json"],
|
203 |
+
[ABS_PATH + "/Model/g/G_953000.pth", ABS_PATH + "/Model/g/config.json"],
|
204 |
+
# HuBert-VITS (Need to configure HUBERT_SOFT_MODEL)
|
205 |
+
[ABS_PATH + "/Model/louise/360_epochs.pth", ABS_PATH + "/Model/louise/config.json"],
|
206 |
+
# W2V2-VITS (Need to configure DIMENSIONAL_EMOTION_NPY)
|
207 |
+
[ABS_PATH + "/Model/w2v2-vits/1026_epochs.pth", ABS_PATH + "/Model/w2v2-vits/config.json"],
|
208 |
+
]
|
209 |
+
# hubert-vits: hubert soft 编码器
|
210 |
+
HUBERT_SOFT_MODEL = ABS_PATH + "/Model/hubert-soft-0d54a1f4.pt"
|
211 |
+
# w2v2-vits: Dimensional emotion npy file
|
212 |
+
# 加载单独的npy: ABS_PATH+"/all_emotions.npy
|
213 |
+
# 加载多个npy: [ABS_PATH + "/emotions1.npy", ABS_PATH + "/emotions2.npy"]
|
214 |
+
# 从文件夹里加载npy: ABS_PATH + "/Model/npy"
|
215 |
+
DIMENSIONAL_EMOTION_NPY = ABS_PATH + "/Model/npy"
|
216 |
+
# w2v2-vits: 需要在同一路径下有model.onnx和model.yaml
|
217 |
+
DIMENSIONAL_EMOTION_MODEL = ABS_PATH + "/Model/model.yaml"
|
218 |
+
</code></pre></details>
|
219 |
+
|
220 |
+
|
221 |
+
|
222 |
+
### 启动
|
223 |
+
|
224 |
+
`python app.py`
|
225 |
+
|
226 |
+
# GPU 加速
|
227 |
+
|
228 |
+
## windows
|
229 |
+
|
230 |
+
### 安装CUDA
|
231 |
+
|
232 |
+
查看显卡最高支持CUDA的版本
|
233 |
+
|
234 |
+
```
|
235 |
+
nvidia-smi
|
236 |
+
```
|
237 |
+
|
238 |
+
以CUDA11.7为例,[官网](https://developer.nvidia.com/cuda-11-7-0-download-archive?target_os=Windows&target_arch=x86_64&target_version=10&target_type=exe_local)
|
239 |
+
|
240 |
+
### 安装GPU版pytorch
|
241 |
+
|
242 |
+
CUDA11.7对应的pytorch是用这个命令安装
|
243 |
+
|
244 |
+
```
|
245 |
+
pip3 install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cu117
|
246 |
+
```
|
247 |
+
|
248 |
+
对应版本的命令可以在[官网](https://pytorch.org/get-started/locally/)找到
|
249 |
+
|
250 |
+
## Linux
|
251 |
+
|
252 |
+
安装过程类似,但我没有相应的环境所以没办法测试
|
253 |
+
|
254 |
+
# Openjtalk安装问题
|
255 |
+
|
256 |
+
如果你是arm64架构的平台,由于pypi官网上没有arm64对应的whl,可能安装会出现一些问题,你可以使用我构建的whl来安装
|
257 |
+
|
258 |
+
```
|
259 |
+
pip install openjtalk==0.3.0.dev2 --index-url https://pypi.artrajz.cn/simple
|
260 |
+
```
|
261 |
+
|
262 |
+
或者是自己手动构建一个whl,可以根据[教程](https://artrajz.cn/index.php/archives/167/)来构建
|
263 |
+
|
264 |
+
# API
|
265 |
+
|
266 |
+
## GET
|
267 |
+
|
268 |
+
#### speakers list
|
269 |
+
|
270 |
+
- GET http://127.0.0.1:23456/voice/speakers
|
271 |
+
|
272 |
+
返回id对应角色的映射表
|
273 |
+
|
274 |
+
#### voice vits
|
275 |
+
|
276 |
+
- GET http://127.0.0.1/voice?text=text
|
277 |
+
|
278 |
+
其他参数不指定时均为默认值
|
279 |
+
|
280 |
+
- GET http://127.0.0.1/voice?text=[ZH]text[ZH][JA]text[JA]&lang=mix
|
281 |
+
|
282 |
+
lang=mix时文本要标注
|
283 |
+
|
284 |
+
- GET http://127.0.0.1/voice?text=text&id=142&format=wav&lang=zh&length=1.4
|
285 |
+
|
286 |
+
文本为text,角色id为142,音频格式为wav,文本语言为zh,语音长度为1.4,其余参数默认
|
287 |
+
|
288 |
+
#### check
|
289 |
+
|
290 |
+
- GET http://127.0.0.1:23456/voice/check?id=0&model=vits
|
291 |
+
|
292 |
+
## POST
|
293 |
+
|
294 |
+
- python
|
295 |
+
|
296 |
+
```python
|
297 |
+
import re
|
298 |
+
import requests
|
299 |
+
import os
|
300 |
+
import random
|
301 |
+
import string
|
302 |
+
from requests_toolbelt.multipart.encoder import MultipartEncoder
|
303 |
+
|
304 |
+
abs_path = os.path.dirname(__file__)
|
305 |
+
base = "http://127.0.0.1:23456"
|
306 |
+
|
307 |
+
|
308 |
+
# 映射表
|
309 |
+
def voice_speakers():
|
310 |
+
url = f"{base}/voice/speakers"
|
311 |
+
|
312 |
+
res = requests.post(url=url)
|
313 |
+
json = res.json()
|
314 |
+
for i in json:
|
315 |
+
print(i)
|
316 |
+
for j in json[i]:
|
317 |
+
print(j)
|
318 |
+
return json
|
319 |
+
|
320 |
+
|
321 |
+
# 语音合成 voice vits
|
322 |
+
def voice_vits(text, id=0, format="wav", lang="auto", length=1, noise=0.667, noisew=0.8, max=50):
|
323 |
+
fields = {
|
324 |
+
"text": text,
|
325 |
+
"id": str(id),
|
326 |
+
"format": format,
|
327 |
+
"lang": lang,
|
328 |
+
"length": str(length),
|
329 |
+
"noise": str(noise),
|
330 |
+
"noisew": str(noisew),
|
331 |
+
"max": str(max)
|
332 |
+
}
|
333 |
+
boundary = '----VoiceConversionFormBoundary' + ''.join(random.sample(string.ascii_letters + string.digits, 16))
|
334 |
+
|
335 |
+
m = MultipartEncoder(fields=fields, boundary=boundary)
|
336 |
+
headers = {"Content-Type": m.content_type}
|
337 |
+
url = f"{base}/voice"
|
338 |
+
|
339 |
+
res = requests.post(url=url, data=m, headers=headers)
|
340 |
+
fname = re.findall("filename=(.+)", res.headers["Content-Disposition"])[0]
|
341 |
+
path = f"{abs_path}/{fname}"
|
342 |
+
|
343 |
+
with open(path, "wb") as f:
|
344 |
+
f.write(res.content)
|
345 |
+
print(path)
|
346 |
+
return path
|
347 |
+
|
348 |
+
|
349 |
+
# 语音转换 hubert-vits
|
350 |
+
def voice_hubert_vits(upload_path, id, format="wav", length=1, noise=0.667, noisew=0.8):
|
351 |
+
upload_name = os.path.basename(upload_path)
|
352 |
+
upload_type = f'audio/{upload_name.split(".")[1]}' # wav,ogg
|
353 |
+
|
354 |
+
with open(upload_path, 'rb') as upload_file:
|
355 |
+
fields = {
|
356 |
+
"upload": (upload_name, upload_file, upload_type),
|
357 |
+
"id": str(id),
|
358 |
+
"format": format,
|
359 |
+
"length": str(length),
|
360 |
+
"noise": str(noise),
|
361 |
+
"noisew": str(noisew),
|
362 |
+
}
|
363 |
+
boundary = '----VoiceConversionFormBoundary' + ''.join(random.sample(string.ascii_letters + string.digits, 16))
|
364 |
+
|
365 |
+
m = MultipartEncoder(fields=fields, boundary=boundary)
|
366 |
+
headers = {"Content-Type": m.content_type}
|
367 |
+
url = f"{base}/voice/hubert-vits"
|
368 |
+
|
369 |
+
res = requests.post(url=url, data=m, headers=headers)
|
370 |
+
fname = re.findall("filename=(.+)", res.headers["Content-Disposition"])[0]
|
371 |
+
path = f"{abs_path}/{fname}"
|
372 |
+
|
373 |
+
with open(path, "wb") as f:
|
374 |
+
f.write(res.content)
|
375 |
+
print(path)
|
376 |
+
return path
|
377 |
+
|
378 |
+
|
379 |
+
# 维度情感模型 w2v2-vits
|
380 |
+
def voice_w2v2_vits(text, id=0, format="wav", lang="auto", length=1, noise=0.667, noisew=0.8, max=50, emotion=0):
|
381 |
+
fields = {
|
382 |
+
"text": text,
|
383 |
+
"id": str(id),
|
384 |
+
"format": format,
|
385 |
+
"lang": lang,
|
386 |
+
"length": str(length),
|
387 |
+
"noise": str(noise),
|
388 |
+
"noisew": str(noisew),
|
389 |
+
"max": str(max),
|
390 |
+
"emotion": str(emotion)
|
391 |
+
}
|
392 |
+
boundary = '----VoiceConversionFormBoundary' + ''.join(random.sample(string.ascii_letters + string.digits, 16))
|
393 |
+
|
394 |
+
m = MultipartEncoder(fields=fields, boundary=boundary)
|
395 |
+
headers = {"Content-Type": m.content_type}
|
396 |
+
url = f"{base}/voice/w2v2-vits"
|
397 |
+
|
398 |
+
res = requests.post(url=url, data=m, headers=headers)
|
399 |
+
fname = re.findall("filename=(.+)", res.headers["Content-Disposition"])[0]
|
400 |
+
path = f"{abs_path}/{fname}"
|
401 |
+
|
402 |
+
with open(path, "wb") as f:
|
403 |
+
f.write(res.content)
|
404 |
+
print(path)
|
405 |
+
return path
|
406 |
+
|
407 |
+
|
408 |
+
# 语音转换 同VITS模型内角色之间的音色转换
|
409 |
+
def voice_conversion(upload_path, original_id, target_id):
|
410 |
+
upload_name = os.path.basename(upload_path)
|
411 |
+
upload_type = f'audio/{upload_name.split(".")[1]}' # wav,ogg
|
412 |
+
|
413 |
+
with open(upload_path, 'rb') as upload_file:
|
414 |
+
fields = {
|
415 |
+
"upload": (upload_name, upload_file, upload_type),
|
416 |
+
"original_id": str(original_id),
|
417 |
+
"target_id": str(target_id),
|
418 |
+
}
|
419 |
+
boundary = '----VoiceConversionFormBoundary' + ''.join(random.sample(string.ascii_letters + string.digits, 16))
|
420 |
+
m = MultipartEncoder(fields=fields, boundary=boundary)
|
421 |
+
|
422 |
+
headers = {"Content-Type": m.content_type}
|
423 |
+
url = f"{base}/voice/conversion"
|
424 |
+
|
425 |
+
res = requests.post(url=url, data=m, headers=headers)
|
426 |
+
|
427 |
+
fname = re.findall("filename=(.+)", res.headers["Content-Disposition"])[0]
|
428 |
+
path = f"{abs_path}/{fname}"
|
429 |
+
|
430 |
+
with open(path, "wb") as f:
|
431 |
+
f.write(res.content)
|
432 |
+
print(path)
|
433 |
+
return path
|
434 |
+
|
435 |
+
|
436 |
+
def voice_ssml(ssml):
|
437 |
+
fields = {
|
438 |
+
"ssml": ssml,
|
439 |
+
}
|
440 |
+
boundary = '----VoiceConversionFormBoundary' + ''.join(random.sample(string.ascii_letters + string.digits, 16))
|
441 |
+
|
442 |
+
m = MultipartEncoder(fields=fields, boundary=boundary)
|
443 |
+
headers = {"Content-Type": m.content_type}
|
444 |
+
url = f"{base}/voice/ssml"
|
445 |
+
|
446 |
+
res = requests.post(url=url, data=m, headers=headers)
|
447 |
+
fname = re.findall("filename=(.+)", res.headers["Content-Disposition"])[0]
|
448 |
+
path = f"{abs_path}/{fname}"
|
449 |
+
|
450 |
+
with open(path, "wb") as f:
|
451 |
+
f.write(res.content)
|
452 |
+
print(path)
|
453 |
+
return path
|
454 |
+
|
455 |
+
def voice_dimensional_emotion(upload_path):
|
456 |
+
upload_name = os.path.basename(upload_path)
|
457 |
+
upload_type = f'audio/{upload_name.split(".")[1]}' # wav,ogg
|
458 |
+
|
459 |
+
with open(upload_path, 'rb') as upload_file:
|
460 |
+
fields = {
|
461 |
+
"upload": (upload_name, upload_file, upload_type),
|
462 |
+
}
|
463 |
+
boundary = '----VoiceConversionFormBoundary' + ''.join(random.sample(string.ascii_letters + string.digits, 16))
|
464 |
+
|
465 |
+
m = MultipartEncoder(fields=fields, boundary=boundary)
|
466 |
+
headers = {"Content-Type": m.content_type}
|
467 |
+
url = f"{base}/voice/dimension-emotion"
|
468 |
+
|
469 |
+
res = requests.post(url=url, data=m, headers=headers)
|
470 |
+
fname = re.findall("filename=(.+)", res.headers["Content-Disposition"])[0]
|
471 |
+
path = f"{abs_path}/{fname}"
|
472 |
+
|
473 |
+
with open(path, "wb") as f:
|
474 |
+
f.write(res.content)
|
475 |
+
print(path)
|
476 |
+
return path
|
477 |
+
```
|
478 |
+
|
479 |
+
## API KEY
|
480 |
+
|
481 |
+
在config.py中设置`API_KEY_ENABLED = True`以启用,api key填写:`API_KEY = "api-key"`。
|
482 |
+
|
483 |
+
启用后,GET请求中使用需要增加参数api_key,POST请求中使用需要在header中添加参数`X-API-KEY`。
|
484 |
+
|
485 |
+
# Parameter
|
486 |
+
|
487 |
+
## VITS语音合成
|
488 |
+
|
489 |
+
| Name | Parameter | Is must | Default | Type | Instruction |
|
490 |
+
| ------------- | --------- | ------- | ------- | ----- | ------------------------------------------------------------ |
|
491 |
+
| 合成文本 | text | true | | str | |
|
492 |
+
| 角色id | id | false | 0 | int | |
|
493 |
+
| 音频格式 | format | false | wav | str | wav,ogg,silk |
|
494 |
+
| 文本语言 | lang | false | auto | str | auto为自动识别语言模式,也是默认模式。lang=mix时,文本应该用[ZH] 或 [JA] 包裹。方言无法自动识别。 |
|
495 |
+
| 语音长度/语速 | length | false | 1.0 | float | 调节语音长度,相当于调节语速,该数值越大语速越慢 |
|
496 |
+
| 噪声 | noise | false | 0.667 | float | |
|
497 |
+
| 噪声偏差 | noisew | false | 0.8 | float | |
|
498 |
+
| 分段阈值 | max | false | 50 | int | 按标点符号分段,加起来大于max时为一段文本。max<=0表示不分段。 |
|
499 |
+
|
500 |
+
## VITS 语音转换
|
501 |
+
|
502 |
+
| Name | Parameter | Is must | Default | Type | Instruction |
|
503 |
+
| ---------- | ----------- | ------- | ------- | ---- | ---------------------- |
|
504 |
+
| 上传音频 | upload | true | | file | wav or ogg |
|
505 |
+
| 源角色id | original_id | true | | int | 上传文件所使用的角色id |
|
506 |
+
| 目标角色id | target_id | true | | int | 要转换的目标角色id |
|
507 |
+
|
508 |
+
## HuBert-VITS 语音转换
|
509 |
+
|
510 |
+
| Name | Parameter | Is must | Default | Type | Instruction |
|
511 |
+
| ------------- | --------- | ------- | ------- | ----- | ------------------------------------------------ |
|
512 |
+
| 上传音频 | upload | true | | file | |
|
513 |
+
| 目标角色id | id | true | | int | |
|
514 |
+
| 音频格式 | format | true | | str | wav,ogg,silk |
|
515 |
+
| 语音长度/语速 | length | true | | float | 调节语音长度,相当于调节语速,该数值越大语速越慢 |
|
516 |
+
| 噪声 | noise | true | | float | |
|
517 |
+
| 噪声偏差 | noisew | true | | float | |
|
518 |
+
|
519 |
+
## Dimensional emotion
|
520 |
+
|
521 |
+
| Name | Parameter | Is must | Default | Type | Instruction |
|
522 |
+
| -------- | --------- | ------- | ------- | ---- | ----------------------------- |
|
523 |
+
| 上传音频 | upload | true | | file | 返回存储维度情感向量的npy文件 |
|
524 |
+
|
525 |
+
## W2V2-VITS
|
526 |
+
|
527 |
+
| Name | Parameter | Is must | Default | Type | Instruction |
|
528 |
+
| ------------- | --------- | ------- | ------- | ----- | ------------------------------------------------------------ |
|
529 |
+
| 合成文本 | text | true | | str | |
|
530 |
+
| 角色id | id | false | 0 | int | |
|
531 |
+
| 音频格式 | format | false | wav | str | wav,ogg,silk |
|
532 |
+
| 文本语言 | lang | false | auto | str | auto为自动识别语言模式,也是默认模式。lang=mix时,文本应该用[ZH] 或 [JA] 包裹。方言无法自动识别。 |
|
533 |
+
| 语音长度/语速 | length | false | 1.0 | float | 调节语音长度,相当于调节语速,该数值越大语速越慢 |
|
534 |
+
| 噪声 | noise | false | 0.667 | float | |
|
535 |
+
| 噪声偏差 | noisew | false | 0.8 | float | |
|
536 |
+
| 分段阈值 | max | false | 50 | int | 按标点符号分段,加起来大于max时为一段文本。max<=0表示不分段。 |
|
537 |
+
| 维度情感 | emotion | false | 0 | int | 范围取决于npy情感参考文件,如[innnky](https://huggingface.co/spaces/innnky/nene-emotion/tree/main)的all_emotions.npy模型范围是0-5457 |
|
538 |
+
|
539 |
+
## SSML语音合成标记语言
|
540 |
+
目前支持的元素与属性
|
541 |
+
|
542 |
+
`speak`元素
|
543 |
+
|
544 |
+
| Attribute | Description | Is must |
|
545 |
+
| --------- | ------------------------------------------------------------ | ------- |
|
546 |
+
| id | 默认值从`config.py`中读取 | false |
|
547 |
+
| lang | 默认值从`config.py`中读取 | false |
|
548 |
+
| length | 默认值从`config.py`中读取 | false |
|
549 |
+
| noise | 默认值从`config.py`中读取 | false |
|
550 |
+
| noisew | 默认值从`config.py`中读取 | false |
|
551 |
+
| max | 按标点符号分段,加起来大于max时为一段文本。max<=0表示不分段,这里默认为0。 | false |
|
552 |
+
| model | 默认为vits,可选`w2v2-vits`,`emotion-vits` | false |
|
553 |
+
| emotion | 只有用`w2v2-vits`或`emotion-vits`时`emotion`才生效,范围取决于npy情感参考文件 | false |
|
554 |
+
|
555 |
+
`voice`元素
|
556 |
+
|
557 |
+
优先级大于`speak`
|
558 |
+
|
559 |
+
| Attribute | Description | Is must |
|
560 |
+
| --------- | ------------------------------------------------------------ | ------- |
|
561 |
+
| id | 默认值从`config.py`中读取 | false |
|
562 |
+
| lang | 默认值从`config.py`中读取 | false |
|
563 |
+
| length | 默认值从`config.py`中读取 | false |
|
564 |
+
| noise | 默认值从`config.py`中读取 | false |
|
565 |
+
| noisew | 默认值从`config.py`中读取 | false |
|
566 |
+
| max | 按标点符号分段,加起来大于max时为一段文本。max<=0表示不分段,这里默认为0。 | false |
|
567 |
+
| model | 默认为vits,可选`w2v2-vits`,`emotion-vits` | false |
|
568 |
+
| emotion | 只有用`w2v2-vits`或`emotion-vits`时`emotion`才会生效 | false |
|
569 |
+
|
570 |
+
`break`元素
|
571 |
+
|
572 |
+
| Attribute | Description | Is must |
|
573 |
+
| --------- | ------------------------------------------------------------ | ------- |
|
574 |
+
| strength | x-weak,weak,medium(默认值),strong,x-strong | false |
|
575 |
+
| time | 暂停的绝对持续时间,以秒为单位(例如 `2s`)或以毫秒为单位(例如 `500ms`)。 有效值的范围为 0 到 5000 毫秒。 如果设置的值大于支持的最大值,则服务将使用 `5000ms`。 如果设置了 `time` 属性,则会忽略 `strength` 属性。 | false |
|
576 |
+
|
577 |
+
| Strength | Relative Duration |
|
578 |
+
| :------- | :---------------- |
|
579 |
+
| x-weak | 250 毫秒 |
|
580 |
+
| weak | 500 毫秒 |
|
581 |
+
| Medium | 750 毫秒 |
|
582 |
+
| Strong | 1000 毫秒 |
|
583 |
+
| x-strong | 1250 毫秒 |
|
584 |
+
|
585 |
+
示例
|
586 |
+
|
587 |
+
```xml
|
588 |
+
<speak lang="zh" format="mp3" length="1.2">
|
589 |
+
<voice id="92" >这几天心里颇不宁静。</voice>
|
590 |
+
<voice id="125">今晚在院子里坐着乘凉,忽然想起日日走过的荷塘,在这满月的光里,总该另有一番样子吧。</voice>
|
591 |
+
<voice id="142">月亮渐渐地升高了,墙外马路上孩子们的欢笑,已经听不见了;</voice>
|
592 |
+
<voice id="98">妻在屋里拍着闰儿,迷迷糊糊地哼着眠歌。</voice>
|
593 |
+
<voice id="120">我悄悄地披了大衫,带上门出去。</voice><break time="2s"/>
|
594 |
+
<voice id="121">沿着荷塘,是一条曲折的小煤屑路。</voice>
|
595 |
+
<voice id="122">这是一条幽僻的路;白天也少人走,夜晚更加寂寞。</voice>
|
596 |
+
<voice id="123">荷塘四面,长着许多树,蓊蓊郁郁的。</voice>
|
597 |
+
<voice id="124">路的一旁,是些杨柳,和一些不知道名字的树。</voice>
|
598 |
+
<voice id="125">没有月光的晚上,这路上阴森森的,有些怕人。</voice>
|
599 |
+
<voice id="126">今晚却很好,虽然月光也还是淡淡的。</voice><break time="2s"/>
|
600 |
+
<voice id="127">路上只我一个人,背着手踱着。</voice>
|
601 |
+
<voice id="128">这一片天地好像是我的;我也像超出了平常的自己,到了另一个世界里。</voice>
|
602 |
+
<voice id="129">我爱热闹,也爱冷静;<break strength="x-weak"/>爱群居,也爱独处。</voice>
|
603 |
+
<voice id="130">像今晚上,一个人在这苍茫的月下,什么都可以想,什么都可以不想,便觉是个自由的人。</voice>
|
604 |
+
<voice id="131">白天里一定要做的事,一定要说的话,现在都可不理。</voice>
|
605 |
+
<voice id="132">这是独处的妙处,我且受用这无边的荷香月色好了。</voice>
|
606 |
+
</speak>
|
607 |
+
```
|
608 |
+
|
609 |
+
# 交流平台
|
610 |
+
|
611 |
+
现在只有 [Q群](https://qm.qq.com/cgi-bin/qm/qr?k=-1GknIe4uXrkmbDKBGKa1aAUteq40qs_&jump_from=webapi&authKey=x5YYt6Dggs1ZqWxvZqvj3fV8VUnxRyXm5S5Kzntc78+Nv3iXOIawplGip9LWuNR/)
|
612 |
+
|
613 |
+
# 鸣谢
|
614 |
+
|
615 |
+
- vits:https://github.com/jaywalnut310/vits
|
616 |
+
- MoeGoe:https://github.com/CjangCjengh/MoeGoe
|
617 |
+
- emotional-vits:https://github.com/innnky/emotional-vits
|
618 |
+
- vits-uma-genshin-honkai:https://huggingface.co/spaces/zomehwh/vits-uma-genshin-honkai
|
619 |
+
|
app.py
ADDED
@@ -0,0 +1,384 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import logging
|
3 |
+
import time
|
4 |
+
import logzero
|
5 |
+
import uuid
|
6 |
+
from flask import Flask, request, send_file, jsonify, make_response
|
7 |
+
from werkzeug.utils import secure_filename
|
8 |
+
from flask_apscheduler import APScheduler
|
9 |
+
from functools import wraps
|
10 |
+
from utils.utils import clean_folder, check_is_none
|
11 |
+
from utils.merge import merge_model
|
12 |
+
from io import BytesIO
|
13 |
+
|
14 |
+
app = Flask(__name__)
|
15 |
+
app.config.from_pyfile("config.py")
|
16 |
+
|
17 |
+
scheduler = APScheduler()
|
18 |
+
scheduler.init_app(app)
|
19 |
+
scheduler.start()
|
20 |
+
|
21 |
+
logzero.loglevel(logging.WARNING)
|
22 |
+
logger = logging.getLogger("vits-simple-api")
|
23 |
+
level = app.config.get("LOGGING_LEVEL", "DEBUG")
|
24 |
+
level_dict = {'DEBUG': logging.DEBUG, 'INFO': logging.INFO, 'WARNING': logging.WARNING, 'ERROR': logging.ERROR,
|
25 |
+
'CRITICAL': logging.CRITICAL}
|
26 |
+
logging.basicConfig(level=level_dict[level])
|
27 |
+
logging.getLogger('numba').setLevel(logging.WARNING)
|
28 |
+
|
29 |
+
tts = merge_model(app.config["MODEL_LIST"])
|
30 |
+
|
31 |
+
if not os.path.exists(app.config['UPLOAD_FOLDER']):
|
32 |
+
os.makedirs(app.config['UPLOAD_FOLDER'], exist_ok=True)
|
33 |
+
|
34 |
+
if not os.path.exists(app.config['CACHE_PATH']):
|
35 |
+
os.makedirs(app.config['CACHE_PATH'], exist_ok=True)
|
36 |
+
|
37 |
+
|
38 |
+
def require_api_key(func):
|
39 |
+
@wraps(func)
|
40 |
+
def check_api_key(*args, **kwargs):
|
41 |
+
if not app.config.get('API_KEY_ENABLED', False):
|
42 |
+
return func(*args, **kwargs)
|
43 |
+
else:
|
44 |
+
api_key = request.args.get('api_key') or request.headers.get('X-API-KEY')
|
45 |
+
if api_key and api_key == app.config['API_KEY']:
|
46 |
+
return func(*args, **kwargs)
|
47 |
+
else:
|
48 |
+
return make_response(jsonify({"status": "error", "message": "Invalid API Key"}), 401)
|
49 |
+
|
50 |
+
return check_api_key
|
51 |
+
|
52 |
+
|
53 |
+
@app.route('/', methods=["GET", "POST"])
|
54 |
+
def index():
|
55 |
+
return "vits-simple-api"
|
56 |
+
|
57 |
+
|
58 |
+
@app.route('/voice/speakers', methods=["GET", "POST"])
|
59 |
+
def voice_speakers_api():
|
60 |
+
return jsonify(tts.voice_speakers)
|
61 |
+
|
62 |
+
|
63 |
+
@app.route('/voice', methods=["GET", "POST"])
|
64 |
+
@app.route('/voice/vits', methods=["GET", "POST"])
|
65 |
+
@require_api_key
|
66 |
+
def voice_vits_api():
|
67 |
+
try:
|
68 |
+
if request.method == "GET":
|
69 |
+
text = request.args.get("text", "")
|
70 |
+
id = int(request.args.get("id", app.config.get("ID", 0)))
|
71 |
+
format = request.args.get("format", app.config.get("FORMAT", "wav"))
|
72 |
+
lang = request.args.get("lang", app.config.get("LANG", "auto"))
|
73 |
+
length = float(request.args.get("length", app.config.get("LENGTH", 1)))
|
74 |
+
noise = float(request.args.get("noise", app.config.get("NOISE", 0.667)))
|
75 |
+
noisew = float(request.args.get("noisew", app.config.get("NOISEW", 0.8)))
|
76 |
+
max = int(request.args.get("max", app.config.get("MAX", 50)))
|
77 |
+
elif request.method == "POST":
|
78 |
+
text = request.form.get("text", "")
|
79 |
+
id = int(request.form.get("id", app.config.get("ID", 0)))
|
80 |
+
format = request.form.get("format", app.config.get("FORMAT", "wav"))
|
81 |
+
lang = request.form.get("lang", app.config.get("LANG", "auto"))
|
82 |
+
length = float(request.form.get("length", app.config.get("LENGTH", 1)))
|
83 |
+
noise = float(request.form.get("noise", app.config.get("NOISE", 0.667)))
|
84 |
+
noisew = float(request.form.get("noisew", app.config.get("NOISEW", 0.8)))
|
85 |
+
max = int(request.form.get("max", app.config.get("MAX", 50)))
|
86 |
+
except Exception as e:
|
87 |
+
logger.error(f"[VITS] {e}")
|
88 |
+
return make_response("parameter error", 400)
|
89 |
+
|
90 |
+
logger.info(f"[VITS] id:{id} format:{format} lang:{lang} length:{length} noise:{noise} noisew:{noisew}")
|
91 |
+
logger.info(f"[VITS] len:{len(text)} text:{text}")
|
92 |
+
|
93 |
+
if check_is_none(text):
|
94 |
+
logger.info(f"[VITS] text is empty")
|
95 |
+
return make_response(jsonify({"status": "error", "message": "text is empty"}), 400)
|
96 |
+
|
97 |
+
if check_is_none(id):
|
98 |
+
logger.info(f"[VITS] speaker id is empty")
|
99 |
+
return make_response(jsonify({"status": "error", "message": "speaker id is empty"}), 400)
|
100 |
+
|
101 |
+
if id < 0 or id >= tts.vits_speakers_count:
|
102 |
+
logger.info(f"[VITS] speaker id {id} does not exist")
|
103 |
+
return make_response(jsonify({"status": "error", "message": f"id {id} does not exist"}), 400)
|
104 |
+
|
105 |
+
speaker_lang = tts.voice_speakers["VITS"][id].get('lang')
|
106 |
+
if lang.upper() != "AUTO" and lang.upper() != "MIX" and len(speaker_lang) != 1 and lang not in speaker_lang:
|
107 |
+
logger.info(f"[VITS] lang \"{lang}\" is not in {speaker_lang}")
|
108 |
+
return make_response(jsonify({"status": "error", "message": f"lang '{lang}' is not in {speaker_lang}"}), 400)
|
109 |
+
|
110 |
+
if app.config.get("LANGUAGE_AUTOMATIC_DETECT", []) != []:
|
111 |
+
speaker_lang = app.config.get("LANGUAGE_AUTOMATIC_DETECT")
|
112 |
+
|
113 |
+
fname = f"{str(uuid.uuid1())}.{format}"
|
114 |
+
file_type = f"audio/{format}"
|
115 |
+
|
116 |
+
t1 = time.time()
|
117 |
+
output = tts.vits_infer({"text": text,
|
118 |
+
"id": id,
|
119 |
+
"format": format,
|
120 |
+
"length": length,
|
121 |
+
"noise": noise,
|
122 |
+
"noisew": noisew,
|
123 |
+
"max": max,
|
124 |
+
"lang": lang,
|
125 |
+
"speaker_lang": speaker_lang})
|
126 |
+
t2 = time.time()
|
127 |
+
logger.info(f"[VITS] finish in {(t2 - t1):.2f}s")
|
128 |
+
|
129 |
+
return send_file(path_or_file=output, mimetype=file_type, download_name=fname)
|
130 |
+
|
131 |
+
|
132 |
+
@app.route('/voice/hubert-vits', methods=["POST"])
|
133 |
+
@require_api_key
|
134 |
+
def voice_hubert_api():
|
135 |
+
if request.method == "POST":
|
136 |
+
try:
|
137 |
+
voice = request.files['upload']
|
138 |
+
id = int(request.form.get("id"))
|
139 |
+
format = request.form.get("format", app.config.get("LANG", "auto"))
|
140 |
+
length = float(request.form.get("length", app.config.get("LENGTH", 1)))
|
141 |
+
noise = float(request.form.get("noise", app.config.get("NOISE", 0.667)))
|
142 |
+
noisew = float(request.form.get("noisew", app.config.get("NOISEW", 0.8)))
|
143 |
+
except Exception as e:
|
144 |
+
logger.error(f"[hubert] {e}")
|
145 |
+
return make_response("parameter error", 400)
|
146 |
+
|
147 |
+
logger.info(f"[hubert] id:{id} format:{format} length:{length} noise:{noise} noisew:{noisew}")
|
148 |
+
|
149 |
+
fname = secure_filename(str(uuid.uuid1()) + "." + voice.filename.split(".")[1])
|
150 |
+
voice.save(os.path.join(app.config['UPLOAD_FOLDER'], fname))
|
151 |
+
|
152 |
+
if check_is_none(id):
|
153 |
+
logger.info(f"[hubert] speaker id is empty")
|
154 |
+
return make_response(jsonify({"status": "error", "message": "speaker id is empty"}), 400)
|
155 |
+
|
156 |
+
if id < 0 or id >= tts.hubert_speakers_count:
|
157 |
+
logger.info(f"[hubert] speaker id {id} does not exist")
|
158 |
+
return make_response(jsonify({"status": "error", "message": f"id {id} does not exist"}), 400)
|
159 |
+
|
160 |
+
file_type = f"audio/{format}"
|
161 |
+
|
162 |
+
t1 = time.time()
|
163 |
+
output = tts.hubert_vits_infer({"id": id,
|
164 |
+
"format": format,
|
165 |
+
"length": length,
|
166 |
+
"noise": noise,
|
167 |
+
"noisew": noisew,
|
168 |
+
"audio_path": os.path.join(app.config['UPLOAD_FOLDER'], fname)})
|
169 |
+
t2 = time.time()
|
170 |
+
logger.info(f"[hubert] finish in {(t2 - t1):.2f}s")
|
171 |
+
|
172 |
+
return send_file(path_or_file=output, mimetype=file_type, download_name=fname)
|
173 |
+
|
174 |
+
|
175 |
+
@app.route('/voice/w2v2-vits', methods=["GET", "POST"])
|
176 |
+
@require_api_key
|
177 |
+
def voice_w2v2_api():
|
178 |
+
try:
|
179 |
+
if request.method == "GET":
|
180 |
+
text = request.args.get("text", "")
|
181 |
+
id = int(request.args.get("id", app.config.get("ID", 0)))
|
182 |
+
format = request.args.get("format", app.config.get("FORMAT", "wav"))
|
183 |
+
lang = request.args.get("lang", app.config.get("LANG", "auto"))
|
184 |
+
length = float(request.args.get("length", app.config.get("LENGTH", 1)))
|
185 |
+
noise = float(request.args.get("noise", app.config.get("NOISE", 0.667)))
|
186 |
+
noisew = float(request.args.get("noisew", app.config.get("NOISEW", 0.8)))
|
187 |
+
max = int(request.args.get("max", app.config.get("MAX", 50)))
|
188 |
+
emotion = int(request.args.get("emotion", app.config.get("EMOTION", 0)))
|
189 |
+
elif request.method == "POST":
|
190 |
+
text = request.form.get("text", "")
|
191 |
+
id = int(request.form.get("id", app.config.get("ID", 0)))
|
192 |
+
format = request.form.get("format", app.config.get("FORMAT", "wav"))
|
193 |
+
lang = request.form.get("lang", app.config.get("LANG", "auto"))
|
194 |
+
length = float(request.form.get("length"))
|
195 |
+
noise = float(request.form.get("noise", app.config.get("NOISE", 0.667)))
|
196 |
+
noisew = float(request.form.get("noisew", app.config.get("NOISEW", 0.8)))
|
197 |
+
max = int(request.form.get("max", app.config.get("MAX", 50)))
|
198 |
+
emotion = int(request.form.get("emotion", app.config.get("EMOTION", 0)))
|
199 |
+
except Exception as e:
|
200 |
+
logger.error(f"[w2v2] {e}")
|
201 |
+
return make_response(f"parameter error", 400)
|
202 |
+
|
203 |
+
logger.info(f"[w2v2] id:{id} format:{format} lang:{lang} "
|
204 |
+
f"length:{length} noise:{noise} noisew:{noisew} emotion:{emotion}")
|
205 |
+
logger.info(f"[w2v2] len:{len(text)} text:{text}")
|
206 |
+
|
207 |
+
if check_is_none(text):
|
208 |
+
logger.info(f"[w2v2] text is empty")
|
209 |
+
return make_response(jsonify({"status": "error", "message": "text is empty"}), 400)
|
210 |
+
|
211 |
+
if check_is_none(id):
|
212 |
+
logger.info(f"[w2v2] speaker id is empty")
|
213 |
+
return make_response(jsonify({"status": "error", "message": "speaker id is empty"}), 400)
|
214 |
+
|
215 |
+
if id < 0 or id >= tts.w2v2_speakers_count:
|
216 |
+
logger.info(f"[w2v2] speaker id {id} does not exist")
|
217 |
+
return make_response(jsonify({"status": "error", "message": f"id {id} does not exist"}), 400)
|
218 |
+
|
219 |
+
speaker_lang = tts.voice_speakers["W2V2-VITS"][id].get('lang')
|
220 |
+
if lang.upper() != "AUTO" and lang.upper() != "MIX" and len(speaker_lang) != 1 and lang not in speaker_lang:
|
221 |
+
logger.info(f"[w2v2] lang \"{lang}\" is not in {speaker_lang}")
|
222 |
+
return make_response(jsonify({"status": "error", "message": f"lang '{lang}' is not in {speaker_lang}"}), 400)
|
223 |
+
|
224 |
+
if app.config.get("LANGUAGE_AUTOMATIC_DETECT", []) != []:
|
225 |
+
speaker_lang = app.config.get("LANGUAGE_AUTOMATIC_DETECT")
|
226 |
+
|
227 |
+
fname = f"{str(uuid.uuid1())}.{format}"
|
228 |
+
file_type = f"audio/{format}"
|
229 |
+
|
230 |
+
t1 = time.time()
|
231 |
+
output = tts.w2v2_vits_infer({"text": text,
|
232 |
+
"id": id,
|
233 |
+
"format": format,
|
234 |
+
"length": length,
|
235 |
+
"noise": noise,
|
236 |
+
"noisew": noisew,
|
237 |
+
"max": max,
|
238 |
+
"lang": lang,
|
239 |
+
"emotion": emotion,
|
240 |
+
"speaker_lang": speaker_lang})
|
241 |
+
t2 = time.time()
|
242 |
+
logger.info(f"[w2v2] finish in {(t2 - t1):.2f}s")
|
243 |
+
|
244 |
+
return send_file(path_or_file=output, mimetype=file_type, download_name=fname)
|
245 |
+
|
246 |
+
|
247 |
+
@app.route('/voice/conversion', methods=["POST"])
|
248 |
+
@app.route('/voice/vits/conversion', methods=["POST"])
|
249 |
+
@require_api_key
|
250 |
+
def vits_voice_conversion_api():
|
251 |
+
if request.method == "POST":
|
252 |
+
try:
|
253 |
+
voice = request.files['upload']
|
254 |
+
original_id = int(request.form["original_id"])
|
255 |
+
target_id = int(request.form["target_id"])
|
256 |
+
format = request.form.get("format", voice.filename.split(".")[1])
|
257 |
+
except Exception as e:
|
258 |
+
logger.error(f"[vits_voice_convertsion] {e}")
|
259 |
+
return make_response("parameter error", 400)
|
260 |
+
|
261 |
+
fname = secure_filename(str(uuid.uuid1()) + "." + voice.filename.split(".")[1])
|
262 |
+
audio_path = os.path.join(app.config['UPLOAD_FOLDER'], fname)
|
263 |
+
voice.save(audio_path)
|
264 |
+
file_type = f"audio/{format}"
|
265 |
+
|
266 |
+
logger.info(f"[vits_voice_convertsion] orginal_id:{original_id} target_id:{target_id}")
|
267 |
+
t1 = time.time()
|
268 |
+
try:
|
269 |
+
output = tts.vits_voice_conversion({"audio_path": audio_path,
|
270 |
+
"original_id": original_id,
|
271 |
+
"target_id": target_id,
|
272 |
+
"format": format})
|
273 |
+
except Exception as e:
|
274 |
+
logger.info(f"[vits_voice_convertsion] {e}")
|
275 |
+
return make_response(jsonify({"status": "error", "message": f"synthesis failure"}), 400)
|
276 |
+
t2 = time.time()
|
277 |
+
logger.info(f"finish in {(t2 - t1):.2f}s")
|
278 |
+
|
279 |
+
return send_file(path_or_file=output, mimetype=file_type, download_name=fname)
|
280 |
+
|
281 |
+
|
282 |
+
@app.route('/voice/ssml', methods=["POST"])
|
283 |
+
@require_api_key
|
284 |
+
def ssml():
|
285 |
+
try:
|
286 |
+
ssml = request.form["ssml"]
|
287 |
+
except Exception as e:
|
288 |
+
logger.info(f"[ssml] {e}")
|
289 |
+
return make_response(jsonify({"status": "error", "message": f"parameter error"}), 400)
|
290 |
+
|
291 |
+
logger.debug(ssml)
|
292 |
+
|
293 |
+
t1 = time.time()
|
294 |
+
try:
|
295 |
+
output, format = tts.create_ssml_infer_task(ssml)
|
296 |
+
except Exception as e:
|
297 |
+
logger.info(f"[ssml] {e}")
|
298 |
+
return make_response(jsonify({"status": "error", "message": f"synthesis failure"}), 400)
|
299 |
+
t2 = time.time()
|
300 |
+
|
301 |
+
fname = f"{str(uuid.uuid1())}.{format}"
|
302 |
+
file_type = f"audio/{format}"
|
303 |
+
|
304 |
+
logger.info(f"[ssml] finish in {(t2 - t1):.2f}s")
|
305 |
+
|
306 |
+
return send_file(path_or_file=output, mimetype=file_type, download_name=fname)
|
307 |
+
|
308 |
+
|
309 |
+
@app.route('/voice/dimension-emotion', methods=["POST"])
|
310 |
+
def dimensional_emotion():
|
311 |
+
if request.method == "POST":
|
312 |
+
try:
|
313 |
+
audio = request.files['upload']
|
314 |
+
except Exception as e:
|
315 |
+
logger.error(f"[dimensional_emotion] {e}")
|
316 |
+
return make_response("parameter error", 400)
|
317 |
+
|
318 |
+
content = BytesIO(audio.read())
|
319 |
+
|
320 |
+
file_type = "application/octet-stream; charset=ascii"
|
321 |
+
fname = os.path.splitext(audio.filename)[0] + ".npy"
|
322 |
+
output = tts.get_dimensional_emotion_npy(content)
|
323 |
+
|
324 |
+
return send_file(path_or_file=output, mimetype=file_type, download_name=fname)
|
325 |
+
|
326 |
+
|
327 |
+
@app.route('/voice/check', methods=["GET", "POST"])
|
328 |
+
def check():
|
329 |
+
try:
|
330 |
+
if request.method == "GET":
|
331 |
+
model = request.args.get("model")
|
332 |
+
id = int(request.args.get("id"))
|
333 |
+
elif request.method == "POST":
|
334 |
+
model = request.form["model"]
|
335 |
+
id = int(request.form["id"])
|
336 |
+
except Exception as e:
|
337 |
+
logger.info(f"[check] {e}")
|
338 |
+
return make_response(jsonify({"status": "error", "message": "parameter error"}), 400)
|
339 |
+
|
340 |
+
if check_is_none(model):
|
341 |
+
logger.info(f"[check] model {model} is empty")
|
342 |
+
return make_response(jsonify({"status": "error", "message": "model is empty"}), 400)
|
343 |
+
|
344 |
+
if model.upper() not in ("VITS", "HUBERT", "W2V2"):
|
345 |
+
res = make_response(jsonify({"status": "error", "message": f"model {model} does not exist"}))
|
346 |
+
res.status = 404
|
347 |
+
logger.info(f"[check] speaker id {id} error")
|
348 |
+
return res
|
349 |
+
|
350 |
+
if check_is_none(id):
|
351 |
+
logger.info(f"[check] speaker id is empty")
|
352 |
+
return make_response(jsonify({"status": "error", "message": "speaker id is empty"}), 400)
|
353 |
+
|
354 |
+
if model.upper() == "VITS":
|
355 |
+
speaker_list = tts.voice_speakers["VITS"]
|
356 |
+
elif model.upper() == "HUBERT":
|
357 |
+
speaker_list = tts.voice_speakers["HUBERT-VITS"]
|
358 |
+
elif model.upper() == "W2V2":
|
359 |
+
speaker_list = tts.voice_speakers["W2V2-VITS"]
|
360 |
+
|
361 |
+
if len(speaker_list) == 0:
|
362 |
+
logger.info(f"[check] {model} not loaded")
|
363 |
+
return make_response(jsonify({"status": "error", "message": f"{model} not loaded"}), 400)
|
364 |
+
|
365 |
+
if id < 0 or id >= len(speaker_list):
|
366 |
+
logger.info(f"[check] speaker id {id} does not exist")
|
367 |
+
return make_response(jsonify({"status": "error", "message": f"id {id} does not exist"}), 400)
|
368 |
+
name = str(speaker_list[id]["name"])
|
369 |
+
lang = speaker_list[id]["lang"]
|
370 |
+
logger.info(f"[check] check id:{id} name:{name} lang:{lang}")
|
371 |
+
|
372 |
+
return make_response(jsonify({"status": "success", "id": id, "name": name, "lang": lang}), 200)
|
373 |
+
|
374 |
+
|
375 |
+
# regular cleaning
|
376 |
+
@scheduler.task('interval', id='clean_task', seconds=3600, misfire_grace_time=900)
|
377 |
+
def clean_task():
|
378 |
+
clean_folder(app.config["UPLOAD_FOLDER"])
|
379 |
+
clean_folder(app.config["CACHE_PATH"])
|
380 |
+
|
381 |
+
|
382 |
+
if __name__ == '__main__':
|
383 |
+
app.run(host='0.0.0.0', port=app.config.get("PORT", 23456), debug=app.config.get("DEBUG", False)) # 对外开放
|
384 |
+
# app.run(host='127.0.0.1', port=app.config.get("PORT",23456), debug=True) # 本地运行、调试
|
attentions.py
ADDED
@@ -0,0 +1,300 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import math
|
2 |
+
import torch
|
3 |
+
from torch import nn
|
4 |
+
from torch.nn import functional as F
|
5 |
+
|
6 |
+
import commons
|
7 |
+
from modules import LayerNorm
|
8 |
+
|
9 |
+
|
10 |
+
class Encoder(nn.Module):
|
11 |
+
def __init__(self, hidden_channels, filter_channels, n_heads, n_layers, kernel_size=1, p_dropout=0., window_size=4, **kwargs):
|
12 |
+
super().__init__()
|
13 |
+
self.hidden_channels = hidden_channels
|
14 |
+
self.filter_channels = filter_channels
|
15 |
+
self.n_heads = n_heads
|
16 |
+
self.n_layers = n_layers
|
17 |
+
self.kernel_size = kernel_size
|
18 |
+
self.p_dropout = p_dropout
|
19 |
+
self.window_size = window_size
|
20 |
+
|
21 |
+
self.drop = nn.Dropout(p_dropout)
|
22 |
+
self.attn_layers = nn.ModuleList()
|
23 |
+
self.norm_layers_1 = nn.ModuleList()
|
24 |
+
self.ffn_layers = nn.ModuleList()
|
25 |
+
self.norm_layers_2 = nn.ModuleList()
|
26 |
+
for i in range(self.n_layers):
|
27 |
+
self.attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout, window_size=window_size))
|
28 |
+
self.norm_layers_1.append(LayerNorm(hidden_channels))
|
29 |
+
self.ffn_layers.append(FFN(hidden_channels, hidden_channels, filter_channels, kernel_size, p_dropout=p_dropout))
|
30 |
+
self.norm_layers_2.append(LayerNorm(hidden_channels))
|
31 |
+
|
32 |
+
def forward(self, x, x_mask):
|
33 |
+
attn_mask = x_mask.unsqueeze(2) * x_mask.unsqueeze(-1)
|
34 |
+
x = x * x_mask
|
35 |
+
for i in range(self.n_layers):
|
36 |
+
y = self.attn_layers[i](x, x, attn_mask)
|
37 |
+
y = self.drop(y)
|
38 |
+
x = self.norm_layers_1[i](x + y)
|
39 |
+
|
40 |
+
y = self.ffn_layers[i](x, x_mask)
|
41 |
+
y = self.drop(y)
|
42 |
+
x = self.norm_layers_2[i](x + y)
|
43 |
+
x = x * x_mask
|
44 |
+
return x
|
45 |
+
|
46 |
+
|
47 |
+
class Decoder(nn.Module):
|
48 |
+
def __init__(self, hidden_channels, filter_channels, n_heads, n_layers, kernel_size=1, p_dropout=0., proximal_bias=False, proximal_init=True, **kwargs):
|
49 |
+
super().__init__()
|
50 |
+
self.hidden_channels = hidden_channels
|
51 |
+
self.filter_channels = filter_channels
|
52 |
+
self.n_heads = n_heads
|
53 |
+
self.n_layers = n_layers
|
54 |
+
self.kernel_size = kernel_size
|
55 |
+
self.p_dropout = p_dropout
|
56 |
+
self.proximal_bias = proximal_bias
|
57 |
+
self.proximal_init = proximal_init
|
58 |
+
|
59 |
+
self.drop = nn.Dropout(p_dropout)
|
60 |
+
self.self_attn_layers = nn.ModuleList()
|
61 |
+
self.norm_layers_0 = nn.ModuleList()
|
62 |
+
self.encdec_attn_layers = nn.ModuleList()
|
63 |
+
self.norm_layers_1 = nn.ModuleList()
|
64 |
+
self.ffn_layers = nn.ModuleList()
|
65 |
+
self.norm_layers_2 = nn.ModuleList()
|
66 |
+
for i in range(self.n_layers):
|
67 |
+
self.self_attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout, proximal_bias=proximal_bias, proximal_init=proximal_init))
|
68 |
+
self.norm_layers_0.append(LayerNorm(hidden_channels))
|
69 |
+
self.encdec_attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout))
|
70 |
+
self.norm_layers_1.append(LayerNorm(hidden_channels))
|
71 |
+
self.ffn_layers.append(FFN(hidden_channels, hidden_channels, filter_channels, kernel_size, p_dropout=p_dropout, causal=True))
|
72 |
+
self.norm_layers_2.append(LayerNorm(hidden_channels))
|
73 |
+
|
74 |
+
def forward(self, x, x_mask, h, h_mask):
|
75 |
+
"""
|
76 |
+
x: decoder input
|
77 |
+
h: encoder output
|
78 |
+
"""
|
79 |
+
self_attn_mask = commons.subsequent_mask(x_mask.size(2)).to(device=x.device, dtype=x.dtype)
|
80 |
+
encdec_attn_mask = h_mask.unsqueeze(2) * x_mask.unsqueeze(-1)
|
81 |
+
x = x * x_mask
|
82 |
+
for i in range(self.n_layers):
|
83 |
+
y = self.self_attn_layers[i](x, x, self_attn_mask)
|
84 |
+
y = self.drop(y)
|
85 |
+
x = self.norm_layers_0[i](x + y)
|
86 |
+
|
87 |
+
y = self.encdec_attn_layers[i](x, h, encdec_attn_mask)
|
88 |
+
y = self.drop(y)
|
89 |
+
x = self.norm_layers_1[i](x + y)
|
90 |
+
|
91 |
+
y = self.ffn_layers[i](x, x_mask)
|
92 |
+
y = self.drop(y)
|
93 |
+
x = self.norm_layers_2[i](x + y)
|
94 |
+
x = x * x_mask
|
95 |
+
return x
|
96 |
+
|
97 |
+
|
98 |
+
class MultiHeadAttention(nn.Module):
|
99 |
+
def __init__(self, channels, out_channels, n_heads, p_dropout=0., window_size=None, heads_share=True, block_length=None, proximal_bias=False, proximal_init=False):
|
100 |
+
super().__init__()
|
101 |
+
assert channels % n_heads == 0
|
102 |
+
|
103 |
+
self.channels = channels
|
104 |
+
self.out_channels = out_channels
|
105 |
+
self.n_heads = n_heads
|
106 |
+
self.p_dropout = p_dropout
|
107 |
+
self.window_size = window_size
|
108 |
+
self.heads_share = heads_share
|
109 |
+
self.block_length = block_length
|
110 |
+
self.proximal_bias = proximal_bias
|
111 |
+
self.proximal_init = proximal_init
|
112 |
+
self.attn = None
|
113 |
+
|
114 |
+
self.k_channels = channels // n_heads
|
115 |
+
self.conv_q = nn.Conv1d(channels, channels, 1)
|
116 |
+
self.conv_k = nn.Conv1d(channels, channels, 1)
|
117 |
+
self.conv_v = nn.Conv1d(channels, channels, 1)
|
118 |
+
self.conv_o = nn.Conv1d(channels, out_channels, 1)
|
119 |
+
self.drop = nn.Dropout(p_dropout)
|
120 |
+
|
121 |
+
if window_size is not None:
|
122 |
+
n_heads_rel = 1 if heads_share else n_heads
|
123 |
+
rel_stddev = self.k_channels**-0.5
|
124 |
+
self.emb_rel_k = nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev)
|
125 |
+
self.emb_rel_v = nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev)
|
126 |
+
|
127 |
+
nn.init.xavier_uniform_(self.conv_q.weight)
|
128 |
+
nn.init.xavier_uniform_(self.conv_k.weight)
|
129 |
+
nn.init.xavier_uniform_(self.conv_v.weight)
|
130 |
+
if proximal_init:
|
131 |
+
with torch.no_grad():
|
132 |
+
self.conv_k.weight.copy_(self.conv_q.weight)
|
133 |
+
self.conv_k.bias.copy_(self.conv_q.bias)
|
134 |
+
|
135 |
+
def forward(self, x, c, attn_mask=None):
|
136 |
+
q = self.conv_q(x)
|
137 |
+
k = self.conv_k(c)
|
138 |
+
v = self.conv_v(c)
|
139 |
+
|
140 |
+
x, self.attn = self.attention(q, k, v, mask=attn_mask)
|
141 |
+
|
142 |
+
x = self.conv_o(x)
|
143 |
+
return x
|
144 |
+
|
145 |
+
def attention(self, query, key, value, mask=None):
|
146 |
+
# reshape [b, d, t] -> [b, n_h, t, d_k]
|
147 |
+
b, d, t_s, t_t = (*key.size(), query.size(2))
|
148 |
+
query = query.view(b, self.n_heads, self.k_channels, t_t).transpose(2, 3)
|
149 |
+
key = key.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3)
|
150 |
+
value = value.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3)
|
151 |
+
|
152 |
+
scores = torch.matmul(query / math.sqrt(self.k_channels), key.transpose(-2, -1))
|
153 |
+
if self.window_size is not None:
|
154 |
+
assert t_s == t_t, "Relative attention is only available for self-attention."
|
155 |
+
key_relative_embeddings = self._get_relative_embeddings(self.emb_rel_k, t_s)
|
156 |
+
rel_logits = self._matmul_with_relative_keys(query /math.sqrt(self.k_channels), key_relative_embeddings)
|
157 |
+
scores_local = self._relative_position_to_absolute_position(rel_logits)
|
158 |
+
scores = scores + scores_local
|
159 |
+
if self.proximal_bias:
|
160 |
+
assert t_s == t_t, "Proximal bias is only available for self-attention."
|
161 |
+
scores = scores + self._attention_bias_proximal(t_s).to(device=scores.device, dtype=scores.dtype)
|
162 |
+
if mask is not None:
|
163 |
+
scores = scores.masked_fill(mask == 0, -1e4)
|
164 |
+
if self.block_length is not None:
|
165 |
+
assert t_s == t_t, "Local attention is only available for self-attention."
|
166 |
+
block_mask = torch.ones_like(scores).triu(-self.block_length).tril(self.block_length)
|
167 |
+
scores = scores.masked_fill(block_mask == 0, -1e4)
|
168 |
+
p_attn = F.softmax(scores, dim=-1) # [b, n_h, t_t, t_s]
|
169 |
+
p_attn = self.drop(p_attn)
|
170 |
+
output = torch.matmul(p_attn, value)
|
171 |
+
if self.window_size is not None:
|
172 |
+
relative_weights = self._absolute_position_to_relative_position(p_attn)
|
173 |
+
value_relative_embeddings = self._get_relative_embeddings(self.emb_rel_v, t_s)
|
174 |
+
output = output + self._matmul_with_relative_values(relative_weights, value_relative_embeddings)
|
175 |
+
output = output.transpose(2, 3).contiguous().view(b, d, t_t) # [b, n_h, t_t, d_k] -> [b, d, t_t]
|
176 |
+
return output, p_attn
|
177 |
+
|
178 |
+
def _matmul_with_relative_values(self, x, y):
|
179 |
+
"""
|
180 |
+
x: [b, h, l, m]
|
181 |
+
y: [h or 1, m, d]
|
182 |
+
ret: [b, h, l, d]
|
183 |
+
"""
|
184 |
+
ret = torch.matmul(x, y.unsqueeze(0))
|
185 |
+
return ret
|
186 |
+
|
187 |
+
def _matmul_with_relative_keys(self, x, y):
|
188 |
+
"""
|
189 |
+
x: [b, h, l, d]
|
190 |
+
y: [h or 1, m, d]
|
191 |
+
ret: [b, h, l, m]
|
192 |
+
"""
|
193 |
+
ret = torch.matmul(x, y.unsqueeze(0).transpose(-2, -1))
|
194 |
+
return ret
|
195 |
+
|
196 |
+
def _get_relative_embeddings(self, relative_embeddings, length):
|
197 |
+
max_relative_position = 2 * self.window_size + 1
|
198 |
+
# Pad first before slice to avoid using cond ops.
|
199 |
+
pad_length = max(length - (self.window_size + 1), 0)
|
200 |
+
slice_start_position = max((self.window_size + 1) - length, 0)
|
201 |
+
slice_end_position = slice_start_position + 2 * length - 1
|
202 |
+
if pad_length > 0:
|
203 |
+
padded_relative_embeddings = F.pad(
|
204 |
+
relative_embeddings,
|
205 |
+
commons.convert_pad_shape([[0, 0], [pad_length, pad_length], [0, 0]]))
|
206 |
+
else:
|
207 |
+
padded_relative_embeddings = relative_embeddings
|
208 |
+
used_relative_embeddings = padded_relative_embeddings[:,slice_start_position:slice_end_position]
|
209 |
+
return used_relative_embeddings
|
210 |
+
|
211 |
+
def _relative_position_to_absolute_position(self, x):
|
212 |
+
"""
|
213 |
+
x: [b, h, l, 2*l-1]
|
214 |
+
ret: [b, h, l, l]
|
215 |
+
"""
|
216 |
+
batch, heads, length, _ = x.size()
|
217 |
+
# Concat columns of pad to shift from relative to absolute indexing.
|
218 |
+
x = F.pad(x, commons.convert_pad_shape([[0,0],[0,0],[0,0],[0,1]]))
|
219 |
+
|
220 |
+
# Concat extra elements so to add up to shape (len+1, 2*len-1).
|
221 |
+
x_flat = x.view([batch, heads, length * 2 * length])
|
222 |
+
x_flat = F.pad(x_flat, commons.convert_pad_shape([[0,0],[0,0],[0,length-1]]))
|
223 |
+
|
224 |
+
# Reshape and slice out the padded elements.
|
225 |
+
x_final = x_flat.view([batch, heads, length+1, 2*length-1])[:, :, :length, length-1:]
|
226 |
+
return x_final
|
227 |
+
|
228 |
+
def _absolute_position_to_relative_position(self, x):
|
229 |
+
"""
|
230 |
+
x: [b, h, l, l]
|
231 |
+
ret: [b, h, l, 2*l-1]
|
232 |
+
"""
|
233 |
+
batch, heads, length, _ = x.size()
|
234 |
+
# padd along column
|
235 |
+
x = F.pad(x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, length-1]]))
|
236 |
+
x_flat = x.view([batch, heads, length**2 + length*(length -1)])
|
237 |
+
# add 0's in the beginning that will skew the elements after reshape
|
238 |
+
x_flat = F.pad(x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [length, 0]]))
|
239 |
+
x_final = x_flat.view([batch, heads, length, 2*length])[:,:,:,1:]
|
240 |
+
return x_final
|
241 |
+
|
242 |
+
def _attention_bias_proximal(self, length):
|
243 |
+
"""Bias for self-attention to encourage attention to close positions.
|
244 |
+
Args:
|
245 |
+
length: an integer scalar.
|
246 |
+
Returns:
|
247 |
+
a Tensor with shape [1, 1, length, length]
|
248 |
+
"""
|
249 |
+
r = torch.arange(length, dtype=torch.float32)
|
250 |
+
diff = torch.unsqueeze(r, 0) - torch.unsqueeze(r, 1)
|
251 |
+
return torch.unsqueeze(torch.unsqueeze(-torch.log1p(torch.abs(diff)), 0), 0)
|
252 |
+
|
253 |
+
|
254 |
+
class FFN(nn.Module):
|
255 |
+
def __init__(self, in_channels, out_channels, filter_channels, kernel_size, p_dropout=0., activation=None, causal=False):
|
256 |
+
super().__init__()
|
257 |
+
self.in_channels = in_channels
|
258 |
+
self.out_channels = out_channels
|
259 |
+
self.filter_channels = filter_channels
|
260 |
+
self.kernel_size = kernel_size
|
261 |
+
self.p_dropout = p_dropout
|
262 |
+
self.activation = activation
|
263 |
+
self.causal = causal
|
264 |
+
|
265 |
+
if causal:
|
266 |
+
self.padding = self._causal_padding
|
267 |
+
else:
|
268 |
+
self.padding = self._same_padding
|
269 |
+
|
270 |
+
self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size)
|
271 |
+
self.conv_2 = nn.Conv1d(filter_channels, out_channels, kernel_size)
|
272 |
+
self.drop = nn.Dropout(p_dropout)
|
273 |
+
|
274 |
+
def forward(self, x, x_mask):
|
275 |
+
x = self.conv_1(self.padding(x * x_mask))
|
276 |
+
if self.activation == "gelu":
|
277 |
+
x = x * torch.sigmoid(1.702 * x)
|
278 |
+
else:
|
279 |
+
x = torch.relu(x)
|
280 |
+
x = self.drop(x)
|
281 |
+
x = self.conv_2(self.padding(x * x_mask))
|
282 |
+
return x * x_mask
|
283 |
+
|
284 |
+
def _causal_padding(self, x):
|
285 |
+
if self.kernel_size == 1:
|
286 |
+
return x
|
287 |
+
pad_l = self.kernel_size - 1
|
288 |
+
pad_r = 0
|
289 |
+
padding = [[0, 0], [0, 0], [pad_l, pad_r]]
|
290 |
+
x = F.pad(x, commons.convert_pad_shape(padding))
|
291 |
+
return x
|
292 |
+
|
293 |
+
def _same_padding(self, x):
|
294 |
+
if self.kernel_size == 1:
|
295 |
+
return x
|
296 |
+
pad_l = (self.kernel_size - 1) // 2
|
297 |
+
pad_r = self.kernel_size // 2
|
298 |
+
padding = [[0, 0], [0, 0], [pad_l, pad_r]]
|
299 |
+
x = F.pad(x, commons.convert_pad_shape(padding))
|
300 |
+
return x
|
chinese_dialect_lexicons/changzhou.json
ADDED
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"name": "Changzhou dialect to IPA",
|
3 |
+
"segmentation": {
|
4 |
+
"type": "mmseg",
|
5 |
+
"dict": {
|
6 |
+
"type": "ocd2",
|
7 |
+
"file": "changzhou.ocd2"
|
8 |
+
}
|
9 |
+
},
|
10 |
+
"conversion_chain": [
|
11 |
+
{
|
12 |
+
"dict": {
|
13 |
+
"type": "group",
|
14 |
+
"dicts": [
|
15 |
+
{
|
16 |
+
"type": "ocd2",
|
17 |
+
"file": "changzhou.ocd2"
|
18 |
+
}
|
19 |
+
]
|
20 |
+
}
|
21 |
+
}
|
22 |
+
]
|
23 |
+
}
|
chinese_dialect_lexicons/changzhou.ocd2
ADDED
Binary file (96.1 kB). View file
|
|
chinese_dialect_lexicons/changzhou_3.json
ADDED
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"name": "Changzhou dialect to IPA",
|
3 |
+
"segmentation": {
|
4 |
+
"type": "mmseg",
|
5 |
+
"dict": {
|
6 |
+
"type": "ocd2",
|
7 |
+
"file": "changzhou.ocd2"
|
8 |
+
}
|
9 |
+
},
|
10 |
+
"conversion_chain": [
|
11 |
+
{
|
12 |
+
"dict": {
|
13 |
+
"type": "group",
|
14 |
+
"dicts": [
|
15 |
+
{
|
16 |
+
"type": "ocd2",
|
17 |
+
"file": "changzhou.ocd2"
|
18 |
+
}
|
19 |
+
]
|
20 |
+
}
|
21 |
+
}
|
22 |
+
]
|
23 |
+
}
|
chinese_dialect_lexicons/changzhou_3.ocd2
ADDED
Binary file (96.1 kB). View file
|
|
chinese_dialect_lexicons/cixi_2.json
ADDED
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"name": "Cixi dialect to IPA",
|
3 |
+
"segmentation": {
|
4 |
+
"type": "mmseg",
|
5 |
+
"dict": {
|
6 |
+
"type": "ocd2",
|
7 |
+
"file": "cixi.ocd2"
|
8 |
+
}
|
9 |
+
},
|
10 |
+
"conversion_chain": [
|
11 |
+
{
|
12 |
+
"dict": {
|
13 |
+
"type": "group",
|
14 |
+
"dicts": [
|
15 |
+
{
|
16 |
+
"type": "ocd2",
|
17 |
+
"file": "cixi.ocd2"
|
18 |
+
}
|
19 |
+
]
|
20 |
+
}
|
21 |
+
}
|
22 |
+
]
|
23 |
+
}
|
chinese_dialect_lexicons/cixi_2.ocd2
ADDED
Binary file (98 kB). View file
|
|
chinese_dialect_lexicons/fuyang_2.json
ADDED
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"name": "Fuyang dialect to IPA",
|
3 |
+
"segmentation": {
|
4 |
+
"type": "mmseg",
|
5 |
+
"dict": {
|
6 |
+
"type": "ocd2",
|
7 |
+
"file": "fuyang.ocd2"
|
8 |
+
}
|
9 |
+
},
|
10 |
+
"conversion_chain": [
|
11 |
+
{
|
12 |
+
"dict": {
|
13 |
+
"type": "group",
|
14 |
+
"dicts": [
|
15 |
+
{
|
16 |
+
"type": "ocd2",
|
17 |
+
"file": "fuyang.ocd2"
|
18 |
+
}
|
19 |
+
]
|
20 |
+
}
|
21 |
+
}
|
22 |
+
]
|
23 |
+
}
|
chinese_dialect_lexicons/fuyang_2.ocd2
ADDED
Binary file (83.7 kB). View file
|
|
chinese_dialect_lexicons/hangzhou_2.json
ADDED
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"name": "Hangzhounese to IPA",
|
3 |
+
"segmentation": {
|
4 |
+
"type": "mmseg",
|
5 |
+
"dict": {
|
6 |
+
"type": "ocd2",
|
7 |
+
"file": "hangzhou.ocd2"
|
8 |
+
}
|
9 |
+
},
|
10 |
+
"conversion_chain": [{
|
11 |
+
"dict": {
|
12 |
+
"type": "group",
|
13 |
+
"dicts": [{
|
14 |
+
"type": "ocd2",
|
15 |
+
"file": "hangzhou.ocd2"
|
16 |
+
}]
|
17 |
+
}
|
18 |
+
}]
|
19 |
+
}
|
chinese_dialect_lexicons/hangzhou_2.ocd2
ADDED
Binary file (427 kB). View file
|
|
chinese_dialect_lexicons/jiading_2.json
ADDED
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"name": "Jiading dialect to IPA",
|
3 |
+
"segmentation": {
|
4 |
+
"type": "mmseg",
|
5 |
+
"dict": {
|
6 |
+
"type": "ocd2",
|
7 |
+
"file": "jiading.ocd2"
|
8 |
+
}
|
9 |
+
},
|
10 |
+
"conversion_chain": [
|
11 |
+
{
|
12 |
+
"dict": {
|
13 |
+
"type": "group",
|
14 |
+
"dicts": [
|
15 |
+
{
|
16 |
+
"type": "ocd2",
|
17 |
+
"file": "jiading.ocd2"
|
18 |
+
}
|
19 |
+
]
|
20 |
+
}
|
21 |
+
}
|
22 |
+
]
|
23 |
+
}
|
chinese_dialect_lexicons/jiading_2.ocd2
ADDED
Binary file (111 kB). View file
|
|
chinese_dialect_lexicons/jiashan_2.json
ADDED
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"name": "Jiashan dialect to IPA",
|
3 |
+
"segmentation": {
|
4 |
+
"type": "mmseg",
|
5 |
+
"dict": {
|
6 |
+
"type": "ocd2",
|
7 |
+
"file": "jiashan.ocd2"
|
8 |
+
}
|
9 |
+
},
|
10 |
+
"conversion_chain": [
|
11 |
+
{
|
12 |
+
"dict": {
|
13 |
+
"type": "group",
|
14 |
+
"dicts": [
|
15 |
+
{
|
16 |
+
"type": "ocd2",
|
17 |
+
"file": "jiashan.ocd2"
|
18 |
+
}
|
19 |
+
]
|
20 |
+
}
|
21 |
+
}
|
22 |
+
]
|
23 |
+
}
|
chinese_dialect_lexicons/jiashan_2.ocd2
ADDED
Binary file (71.7 kB). View file
|
|
chinese_dialect_lexicons/jingjiang_2.json
ADDED
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"name": "Jingjiang dialect to IPA",
|
3 |
+
"segmentation": {
|
4 |
+
"type": "mmseg",
|
5 |
+
"dict": {
|
6 |
+
"type": "ocd2",
|
7 |
+
"file": "jingjiang.ocd2"
|
8 |
+
}
|
9 |
+
},
|
10 |
+
"conversion_chain": [
|
11 |
+
{
|
12 |
+
"dict": {
|
13 |
+
"type": "group",
|
14 |
+
"dicts": [
|
15 |
+
{
|
16 |
+
"type": "ocd2",
|
17 |
+
"file": "jingjiang.ocd2"
|
18 |
+
}
|
19 |
+
]
|
20 |
+
}
|
21 |
+
}
|
22 |
+
]
|
23 |
+
}
|
chinese_dialect_lexicons/jingjiang_2.ocd2
ADDED
Binary file (86.1 kB). View file
|
|
chinese_dialect_lexicons/jyutjyu_2.json
ADDED
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"name": "Cantonese to IPA",
|
3 |
+
"segmentation": {
|
4 |
+
"type": "mmseg",
|
5 |
+
"dict": {
|
6 |
+
"type": "ocd2",
|
7 |
+
"file": "jyutjyu_2.ocd2"
|
8 |
+
}
|
9 |
+
},
|
10 |
+
"conversion_chain": [{
|
11 |
+
"dict": {
|
12 |
+
"type": "group",
|
13 |
+
"dicts": [{
|
14 |
+
"type": "ocd2",
|
15 |
+
"file": "jyutjyu_2.ocd2"
|
16 |
+
}]
|
17 |
+
}
|
18 |
+
}]
|
19 |
+
}
|
chinese_dialect_lexicons/jyutjyu_2.ocd2
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:aea11bfe51b184b3f000d20ab49757979b216219203839d2b2e3c1f990a13fa5
|
3 |
+
size 2432991
|
chinese_dialect_lexicons/linping_2.json
ADDED
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"name": "Linping dialect to IPA",
|
3 |
+
"segmentation": {
|
4 |
+
"type": "mmseg",
|
5 |
+
"dict": {
|
6 |
+
"type": "ocd2",
|
7 |
+
"file": "linping.ocd2"
|
8 |
+
}
|
9 |
+
},
|
10 |
+
"conversion_chain": [
|
11 |
+
{
|
12 |
+
"dict": {
|
13 |
+
"type": "group",
|
14 |
+
"dicts": [
|
15 |
+
{
|
16 |
+
"type": "ocd2",
|
17 |
+
"file": "linping.ocd2"
|
18 |
+
}
|
19 |
+
]
|
20 |
+
}
|
21 |
+
}
|
22 |
+
]
|
23 |
+
}
|
chinese_dialect_lexicons/linping_2.ocd2
ADDED
Binary file (65.4 kB). View file
|
|
chinese_dialect_lexicons/ningbo_2.json
ADDED
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"name": "Ningbonese to IPA",
|
3 |
+
"segmentation": {
|
4 |
+
"type": "mmseg",
|
5 |
+
"dict": {
|
6 |
+
"type": "ocd2",
|
7 |
+
"file": "ningbo.ocd2"
|
8 |
+
}
|
9 |
+
},
|
10 |
+
"conversion_chain": [{
|
11 |
+
"dict": {
|
12 |
+
"type": "group",
|
13 |
+
"dicts": [{
|
14 |
+
"type": "ocd2",
|
15 |
+
"file": "ningbo.ocd2"
|
16 |
+
}]
|
17 |
+
}
|
18 |
+
}]
|
19 |
+
}
|
chinese_dialect_lexicons/ningbo_2.ocd2
ADDED
Binary file (386 kB). View file
|
|
chinese_dialect_lexicons/pinghu_2.json
ADDED
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"name": "Pinghu dialect to IPA",
|
3 |
+
"segmentation": {
|
4 |
+
"type": "mmseg",
|
5 |
+
"dict": {
|
6 |
+
"type": "ocd2",
|
7 |
+
"file": "pinghu.ocd2"
|
8 |
+
}
|
9 |
+
},
|
10 |
+
"conversion_chain": [
|
11 |
+
{
|
12 |
+
"dict": {
|
13 |
+
"type": "group",
|
14 |
+
"dicts": [
|
15 |
+
{
|
16 |
+
"type": "ocd2",
|
17 |
+
"file": "pinghu.ocd2"
|
18 |
+
}
|
19 |
+
]
|
20 |
+
}
|
21 |
+
}
|
22 |
+
]
|
23 |
+
}
|
chinese_dialect_lexicons/pinghu_2.ocd2
ADDED
Binary file (69.4 kB). View file
|
|
chinese_dialect_lexicons/ruao_2.json
ADDED
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"name": "Ruao dialect to IPA",
|
3 |
+
"segmentation": {
|
4 |
+
"type": "mmseg",
|
5 |
+
"dict": {
|
6 |
+
"type": "ocd2",
|
7 |
+
"file": "ruao.ocd2"
|
8 |
+
}
|
9 |
+
},
|
10 |
+
"conversion_chain": [
|
11 |
+
{
|
12 |
+
"dict": {
|
13 |
+
"type": "group",
|
14 |
+
"dicts": [
|
15 |
+
{
|
16 |
+
"type": "ocd2",
|
17 |
+
"file": "ruao.ocd2"
|
18 |
+
}
|
19 |
+
]
|
20 |
+
}
|
21 |
+
}
|
22 |
+
]
|
23 |
+
}
|
chinese_dialect_lexicons/ruao_2.ocd2
ADDED
Binary file (58.8 kB). View file
|
|
chinese_dialect_lexicons/sanmen_2.json
ADDED
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"name": "Sanmen dialect to IPA",
|
3 |
+
"segmentation": {
|
4 |
+
"type": "mmseg",
|
5 |
+
"dict": {
|
6 |
+
"type": "ocd2",
|
7 |
+
"file": "sanmen.ocd2"
|
8 |
+
}
|
9 |
+
},
|
10 |
+
"conversion_chain": [
|
11 |
+
{
|
12 |
+
"dict": {
|
13 |
+
"type": "group",
|
14 |
+
"dicts": [
|
15 |
+
{
|
16 |
+
"type": "ocd2",
|
17 |
+
"file": "sanmen.ocd2"
|
18 |
+
}
|
19 |
+
]
|
20 |
+
}
|
21 |
+
}
|
22 |
+
]
|
23 |
+
}
|
chinese_dialect_lexicons/sanmen_2.ocd2
ADDED
Binary file (80.2 kB). View file
|
|
chinese_dialect_lexicons/shaoxing_2.json
ADDED
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"name": "Shaoxing dialect to IPA",
|
3 |
+
"segmentation": {
|
4 |
+
"type": "mmseg",
|
5 |
+
"dict": {
|
6 |
+
"type": "ocd2",
|
7 |
+
"file": "shaoxing.ocd2"
|
8 |
+
}
|
9 |
+
},
|
10 |
+
"conversion_chain": [
|
11 |
+
{
|
12 |
+
"dict": {
|
13 |
+
"type": "group",
|
14 |
+
"dicts": [
|
15 |
+
{
|
16 |
+
"type": "ocd2",
|
17 |
+
"file": "shaoxing.ocd2"
|
18 |
+
}
|
19 |
+
]
|
20 |
+
}
|
21 |
+
}
|
22 |
+
]
|
23 |
+
}
|
chinese_dialect_lexicons/shaoxing_2.ocd2
ADDED
Binary file (113 kB). View file
|
|
chinese_dialect_lexicons/suichang_2.json
ADDED
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"name": "Suichang dialect to IPA",
|
3 |
+
"segmentation": {
|
4 |
+
"type": "mmseg",
|
5 |
+
"dict": {
|
6 |
+
"type": "ocd2",
|
7 |
+
"file": "suichang.ocd2"
|
8 |
+
}
|
9 |
+
},
|
10 |
+
"conversion_chain": [
|
11 |
+
{
|
12 |
+
"dict": {
|
13 |
+
"type": "group",
|
14 |
+
"dicts": [
|
15 |
+
{
|
16 |
+
"type": "ocd2",
|
17 |
+
"file": "suichang.ocd2"
|
18 |
+
}
|
19 |
+
]
|
20 |
+
}
|
21 |
+
}
|
22 |
+
]
|
23 |
+
}
|
chinese_dialect_lexicons/suichang_2.ocd2
ADDED
Binary file (81 kB). View file
|
|
chinese_dialect_lexicons/suzhou_2.json
ADDED
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"name": "Suzhounese to IPA",
|
3 |
+
"segmentation": {
|
4 |
+
"type": "mmseg",
|
5 |
+
"dict": {
|
6 |
+
"type": "ocd2",
|
7 |
+
"file": "suzhou.ocd2"
|
8 |
+
}
|
9 |
+
},
|
10 |
+
"conversion_chain": [{
|
11 |
+
"dict": {
|
12 |
+
"type": "group",
|
13 |
+
"dicts": [{
|
14 |
+
"type": "ocd2",
|
15 |
+
"file": "suzhou.ocd2"
|
16 |
+
}]
|
17 |
+
}
|
18 |
+
}]
|
19 |
+
}
|
chinese_dialect_lexicons/suzhou_2.ocd2
ADDED
Binary file (506 kB). View file
|
|
chinese_dialect_lexicons/tiantai_2.json
ADDED
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"name": "Tiantai dialect to IPA",
|
3 |
+
"segmentation": {
|
4 |
+
"type": "mmseg",
|
5 |
+
"dict": {
|
6 |
+
"type": "ocd2",
|
7 |
+
"file": "tiantai.ocd2"
|
8 |
+
}
|
9 |
+
},
|
10 |
+
"conversion_chain": [
|
11 |
+
{
|
12 |
+
"dict": {
|
13 |
+
"type": "group",
|
14 |
+
"dicts": [
|
15 |
+
{
|
16 |
+
"type": "ocd2",
|
17 |
+
"file": "tiantai.ocd2"
|
18 |
+
}
|
19 |
+
]
|
20 |
+
}
|
21 |
+
}
|
22 |
+
]
|
23 |
+
}
|
chinese_dialect_lexicons/tiantai_2.ocd2
ADDED
Binary file (121 kB). View file
|
|
chinese_dialect_lexicons/tongxiang_2.json
ADDED
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"name": "Tongxiang dialect to IPA",
|
3 |
+
"segmentation": {
|
4 |
+
"type": "mmseg",
|
5 |
+
"dict": {
|
6 |
+
"type": "ocd2",
|
7 |
+
"file": "tongxiang.ocd2"
|
8 |
+
}
|
9 |
+
},
|
10 |
+
"conversion_chain": [
|
11 |
+
{
|
12 |
+
"dict": {
|
13 |
+
"type": "group",
|
14 |
+
"dicts": [
|
15 |
+
{
|
16 |
+
"type": "ocd2",
|
17 |
+
"file": "tongxiang.ocd2"
|
18 |
+
}
|
19 |
+
]
|
20 |
+
}
|
21 |
+
}
|
22 |
+
]
|
23 |
+
}
|
chinese_dialect_lexicons/tongxiang_2.ocd2
ADDED
Binary file (137 kB). View file
|
|
chinese_dialect_lexicons/wenzhou_2.json
ADDED
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"name": "Wenzhou dialect to IPA",
|
3 |
+
"segmentation": {
|
4 |
+
"type": "mmseg",
|
5 |
+
"dict": {
|
6 |
+
"type": "ocd2",
|
7 |
+
"file": "wenzhou.ocd2"
|
8 |
+
}
|
9 |
+
},
|
10 |
+
"conversion_chain": [
|
11 |
+
{
|
12 |
+
"dict": {
|
13 |
+
"type": "group",
|
14 |
+
"dicts": [
|
15 |
+
{
|
16 |
+
"type": "ocd2",
|
17 |
+
"file": "wenzhou.ocd2"
|
18 |
+
}
|
19 |
+
]
|
20 |
+
}
|
21 |
+
}
|
22 |
+
]
|
23 |
+
}
|
chinese_dialect_lexicons/wenzhou_2.ocd2
ADDED
Binary file (83.1 kB). View file
|
|
chinese_dialect_lexicons/wuxi_2.json
ADDED
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"name": "Wuxinese to IPA",
|
3 |
+
"segmentation": {
|
4 |
+
"type": "mmseg",
|
5 |
+
"dict": {
|
6 |
+
"type": "ocd2",
|
7 |
+
"file": "wuxi.ocd2"
|
8 |
+
}
|
9 |
+
},
|
10 |
+
"conversion_chain": [{
|
11 |
+
"dict": {
|
12 |
+
"type": "group",
|
13 |
+
"dicts": [{
|
14 |
+
"type": "ocd2",
|
15 |
+
"file": "wuxi.ocd2"
|
16 |
+
}]
|
17 |
+
}
|
18 |
+
}]
|
19 |
+
}
|
chinese_dialect_lexicons/wuxi_2.ocd2
ADDED
Binary file (359 kB). View file
|
|