drewThomasson
commited on
Commit
•
2b53eee
1
Parent(s):
4ca4986
Upload 12 files
Browse files- README.md +9 -8
- apple_silicon_requirements.txt +189 -0
- notebook/xtts_finetune_webui.ipynb +154 -0
- requirements.txt +2 -6
- xtts_demo.py +697 -0
README.md
CHANGED
@@ -1,10 +1,3 @@
|
|
1 |
-
---
|
2 |
-
license: mit
|
3 |
-
title: Other guys fine tune xtts web ui idk
|
4 |
-
emoji: 🐢
|
5 |
-
short_description: Other guys fine tune xtts web ui idk
|
6 |
-
sdk: gradio
|
7 |
-
---
|
8 |
# xtts-finetune-webui
|
9 |
|
10 |
This webui is a slightly modified copy of the [official webui](https://github.com/coqui-ai/TTS/pull/3296) for finetune xtts.
|
@@ -58,6 +51,9 @@ If you are looking for an option for normal XTTS use look here [https://github.c
|
|
58 |
|
59 |
![image](https://github.com/daswer123/xtts-finetune-webui/assets/22278673/aa05bcd4-8642-4de4-8f2f-bc0f5571af63)
|
60 |
|
|
|
|
|
|
|
61 |
## Install
|
62 |
|
63 |
1. Make sure you have `Cuda` installed
|
@@ -77,4 +73,9 @@ If you are looking for an option for normal XTTS use look here [https://github.c
|
|
77 |
1. Run `bash install.sh`
|
78 |
2. To start the server start `start.sh`
|
79 |
3. Go to the local address `127.0.0.1:5003`
|
80 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
# xtts-finetune-webui
|
2 |
|
3 |
This webui is a slightly modified copy of the [official webui](https://github.com/coqui-ai/TTS/pull/3296) for finetune xtts.
|
|
|
51 |
|
52 |
![image](https://github.com/daswer123/xtts-finetune-webui/assets/22278673/aa05bcd4-8642-4de4-8f2f-bc0f5571af63)
|
53 |
|
54 |
+
## Google colab
|
55 |
+
[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/DrewThomasson/xtts-finetune-webui/blob/main/notebook/xtts_finetune_webui.ipynb)
|
56 |
+
|
57 |
## Install
|
58 |
|
59 |
1. Make sure you have `Cuda` installed
|
|
|
73 |
1. Run `bash install.sh`
|
74 |
2. To start the server start `start.sh`
|
75 |
3. Go to the local address `127.0.0.1:5003`
|
76 |
+
|
77 |
+
### On Apple Silicon Mac (python 3.10 env)
|
78 |
+
1. Run `pip install --no-deps -r apple_silicon_requirements.txt`
|
79 |
+
2. To start the server `python xtts_demo.py`
|
80 |
+
3. Go to the local address `127.0.0.1:5003`
|
81 |
+
~
|
apple_silicon_requirements.txt
ADDED
@@ -0,0 +1,189 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
absl-py==2.1.0
|
2 |
+
aiofiles==23.2.1
|
3 |
+
aiohttp==3.9.5
|
4 |
+
aiosignal==1.3.1
|
5 |
+
altair==5.3.0
|
6 |
+
annotated-types==0.7.0
|
7 |
+
anyascii==0.3.2
|
8 |
+
anyio==3.7.1
|
9 |
+
async-timeout==4.0.3
|
10 |
+
attrs==23.2.0
|
11 |
+
audioread==3.0.1
|
12 |
+
av==12.2.0
|
13 |
+
Babel==2.15.0
|
14 |
+
bangla==0.0.2
|
15 |
+
blinker==1.8.2
|
16 |
+
blis==0.7.11
|
17 |
+
bnnumerizer==0.0.2
|
18 |
+
bnunicodenormalizer==0.1.7
|
19 |
+
catalogue==2.0.10
|
20 |
+
certifi==2024.7.4
|
21 |
+
cffi==1.16.0
|
22 |
+
charset-normalizer==3.3.2
|
23 |
+
click==8.1.7
|
24 |
+
cloudpathlib==0.16.0
|
25 |
+
colorama==0.4.6
|
26 |
+
coloredlogs==15.0.1
|
27 |
+
confection==0.1.5
|
28 |
+
contourpy==1.2.1
|
29 |
+
coqpit==0.0.17
|
30 |
+
coqui-tts==0.24.2
|
31 |
+
coqui-tts-trainer==0.1.4
|
32 |
+
ctranslate2==4.3.1
|
33 |
+
cutlet==0.4.0
|
34 |
+
cycler==0.12.1
|
35 |
+
cymem==2.0.8
|
36 |
+
Cython==3.0.10
|
37 |
+
dateparser==1.1.8
|
38 |
+
decorator==5.1.1
|
39 |
+
dnspython==2.6.1
|
40 |
+
docopt==0.6.2
|
41 |
+
einops==0.8.0
|
42 |
+
email_validator==2.2.0
|
43 |
+
encodec==0.1.1
|
44 |
+
exceptiongroup==1.2.2
|
45 |
+
fastapi==0.103.1
|
46 |
+
fastapi-cli==0.0.4
|
47 |
+
faster-whisper==1.0.2
|
48 |
+
ffmpy==0.3.2
|
49 |
+
filelock==3.15.4
|
50 |
+
Flask==3.0.3
|
51 |
+
flatbuffers==24.3.25
|
52 |
+
fonttools==4.53.1
|
53 |
+
frozenlist==1.4.1
|
54 |
+
fsspec==2024.6.1
|
55 |
+
fugashi==1.3.2
|
56 |
+
g2pkk==0.1.2
|
57 |
+
gradio==4.44.1
|
58 |
+
gradio_client==1.3.0
|
59 |
+
grpcio==1.64.1
|
60 |
+
gruut==2.4.0
|
61 |
+
gruut-ipa==0.13.0
|
62 |
+
gruut_lang_de==2.0.1
|
63 |
+
gruut_lang_en==2.0.1
|
64 |
+
gruut_lang_es==2.0.1
|
65 |
+
gruut_lang_fr==2.0.2
|
66 |
+
h11==0.14.0
|
67 |
+
hangul-romanize==0.1.0
|
68 |
+
httpcore==1.0.5
|
69 |
+
httptools==0.6.1
|
70 |
+
httpx==0.27.0
|
71 |
+
huggingface-hub==0.23.5
|
72 |
+
humanfriendly==10.0
|
73 |
+
idna==3.7
|
74 |
+
importlib_resources==6.4.0
|
75 |
+
inflect==7.3.1
|
76 |
+
itsdangerous==2.2.0
|
77 |
+
jaconv==0.4.0
|
78 |
+
jamo==0.4.1
|
79 |
+
jieba==0.42.1
|
80 |
+
Jinja2==3.1.4
|
81 |
+
joblib==1.4.2
|
82 |
+
jsonlines==1.2.0
|
83 |
+
jsonschema==4.23.0
|
84 |
+
jsonschema-specifications==2023.12.1
|
85 |
+
kiwisolver==1.4.5
|
86 |
+
langcodes==3.4.0
|
87 |
+
language_data==1.2.0
|
88 |
+
lazy_loader==0.4
|
89 |
+
librosa==0.10.2.post1
|
90 |
+
llvmlite==0.43.0
|
91 |
+
marisa-trie==1.2.0
|
92 |
+
Markdown==3.6
|
93 |
+
markdown-it-py==3.0.0
|
94 |
+
MarkupSafe==2.1.5
|
95 |
+
matplotlib==3.8.4
|
96 |
+
mdurl==0.1.2
|
97 |
+
mecab-python3==1.0.9
|
98 |
+
mojimoji==0.0.13
|
99 |
+
more-itertools==10.3.0
|
100 |
+
mpmath==1.3.0
|
101 |
+
msgpack==1.0.8
|
102 |
+
multidict==6.0.5
|
103 |
+
murmurhash==1.0.10
|
104 |
+
networkx==2.8.8
|
105 |
+
nltk==3.8.1
|
106 |
+
num2words==0.5.13
|
107 |
+
numba==0.60.0
|
108 |
+
numpy==1.26.4
|
109 |
+
onnxruntime==1.18.1
|
110 |
+
orjson==3.10.6
|
111 |
+
packaging==24.1
|
112 |
+
pandas==1.5.3
|
113 |
+
pillow==10.4.0
|
114 |
+
platformdirs==4.2.2
|
115 |
+
pooch==1.8.2
|
116 |
+
preshed==3.0.9
|
117 |
+
protobuf==4.25.3
|
118 |
+
psutil==6.0.0
|
119 |
+
pycparser==2.22
|
120 |
+
pydantic==2.3.0
|
121 |
+
pydantic_core==2.6.3
|
122 |
+
pydub==0.25.1
|
123 |
+
pygame==2.6.0
|
124 |
+
Pygments==2.18.0
|
125 |
+
pynndescent==0.5.13
|
126 |
+
pyparsing==3.1.2
|
127 |
+
pypinyin==0.51.0
|
128 |
+
pysbd==0.3.4
|
129 |
+
python-crfsuite==0.9.10
|
130 |
+
python-dateutil==2.9.0.post0
|
131 |
+
python-dotenv==1.0.1
|
132 |
+
python-multipart==0.0.9
|
133 |
+
pytz==2024.1
|
134 |
+
PyYAML==6.0.1
|
135 |
+
referencing==0.35.1
|
136 |
+
regex==2024.5.15
|
137 |
+
requests==2.32.3
|
138 |
+
rich==13.7.1
|
139 |
+
rpds-py==0.19.0
|
140 |
+
ruff==0.5.2
|
141 |
+
safetensors==0.4.3
|
142 |
+
scikit-learn==1.5.1
|
143 |
+
scipy==1.11.4
|
144 |
+
semantic-version==2.10.0
|
145 |
+
shellingham==1.5.4
|
146 |
+
six==1.16.0
|
147 |
+
smart-open==6.4.0
|
148 |
+
sniffio==1.3.1
|
149 |
+
soundfile==0.12.1
|
150 |
+
soxr==0.3.7
|
151 |
+
spacy==3.7.4
|
152 |
+
spacy-legacy==3.0.12
|
153 |
+
spacy-loggers==1.0.5
|
154 |
+
srsly==2.4.8
|
155 |
+
starlette==0.27.0
|
156 |
+
SudachiDict-core==20240409
|
157 |
+
SudachiPy==0.6.8
|
158 |
+
sympy==1.13.0
|
159 |
+
tensorboard==2.17.0
|
160 |
+
tensorboard-data-server==0.7.2
|
161 |
+
thinc==8.2.5
|
162 |
+
threadpoolctl==3.5.0
|
163 |
+
tokenizers==0.19.1
|
164 |
+
tomlkit==0.12.0
|
165 |
+
toolz==0.12.1
|
166 |
+
torch==2.3.1
|
167 |
+
torchaudio==2.3.1
|
168 |
+
tqdm==4.66.4
|
169 |
+
trainer==0.0.36
|
170 |
+
transformers==4.42.4
|
171 |
+
TTS==0.21.3
|
172 |
+
typeguard==4.3.0
|
173 |
+
typer==0.12.5
|
174 |
+
typing_extensions==4.12.2
|
175 |
+
tzdata==2024.1
|
176 |
+
tzlocal==5.2
|
177 |
+
umap-learn==0.5.6
|
178 |
+
Unidecode==1.3.8
|
179 |
+
unidic-lite==1.0.8
|
180 |
+
urllib3==2.2.2
|
181 |
+
uvicorn==0.30.1
|
182 |
+
uvloop==0.19.0
|
183 |
+
wasabi==1.1.3
|
184 |
+
watchfiles==0.22.0
|
185 |
+
weasel==0.3.4
|
186 |
+
websockets==11.0.3
|
187 |
+
Werkzeug==3.0.3
|
188 |
+
wrapt==1.16.0
|
189 |
+
yarl==1.9.4
|
notebook/xtts_finetune_webui.ipynb
ADDED
@@ -0,0 +1,154 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"nbformat": 4,
|
3 |
+
"nbformat_minor": 0,
|
4 |
+
"metadata": {
|
5 |
+
"colab": {
|
6 |
+
"provenance": [],
|
7 |
+
"gpuType": "T4",
|
8 |
+
"authorship_tag": "ABX9TyP4Z6m49+bXNW/J1fP7ZIEB",
|
9 |
+
"include_colab_link": true
|
10 |
+
},
|
11 |
+
"kernelspec": {
|
12 |
+
"name": "python3",
|
13 |
+
"display_name": "Python 3"
|
14 |
+
},
|
15 |
+
"language_info": {
|
16 |
+
"name": "python"
|
17 |
+
},
|
18 |
+
"accelerator": "GPU"
|
19 |
+
},
|
20 |
+
"cells": [
|
21 |
+
{
|
22 |
+
"cell_type": "markdown",
|
23 |
+
"metadata": {
|
24 |
+
"id": "view-in-github",
|
25 |
+
"colab_type": "text"
|
26 |
+
},
|
27 |
+
"source": [
|
28 |
+
"<a href=\"https://colab.research.google.com/github/DrewThomasson/xtts-finetune-webui/blob/main/notebook/xtts_finetune_webui.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>"
|
29 |
+
]
|
30 |
+
},
|
31 |
+
{
|
32 |
+
"cell_type": "markdown",
|
33 |
+
"source": [
|
34 |
+
"## Welcome to the *xtts*-finetune-webui gradio gui!\n",
|
35 |
+
"\n",
|
36 |
+
"This webui is a slightly modified copy of the official webui for finetune xtts.\n",
|
37 |
+
"\n",
|
38 |
+
"If you are looking for an option for normal XTTS use look here https://github.com/daswer123/xtts-webui"
|
39 |
+
],
|
40 |
+
"metadata": {
|
41 |
+
"id": "OVjEG_yGoC2W"
|
42 |
+
}
|
43 |
+
},
|
44 |
+
{
|
45 |
+
"cell_type": "code",
|
46 |
+
"execution_count": null,
|
47 |
+
"metadata": {
|
48 |
+
"cellView": "form",
|
49 |
+
"id": "44HpAIVRfJve"
|
50 |
+
},
|
51 |
+
"outputs": [],
|
52 |
+
"source": [
|
53 |
+
"# @title 🛠️ Install requirments\n",
|
54 |
+
"#!DEBIAN_FRONTEND=noninteractive\n",
|
55 |
+
"!sudo apt-get update # && sudo apt-get -y upgrade\n",
|
56 |
+
"!sudo apt-get -y install libegl1\n",
|
57 |
+
"!sudo apt-get -y install libopengl0\n",
|
58 |
+
"!sudo apt-get -y install libxcb-cursor0\n",
|
59 |
+
"!pip install -r https://raw.githubusercontent.com/daswer123/xtts-finetune-webui/main/requirements.txt\n",
|
60 |
+
"!pip install gradio==4.44.1\n",
|
61 |
+
"!pip install fastapi==0.103.1\n",
|
62 |
+
"!pip install pydantic==2.3.0"
|
63 |
+
]
|
64 |
+
},
|
65 |
+
{
|
66 |
+
"cell_type": "code",
|
67 |
+
"source": [
|
68 |
+
"# @title 🚀 Run interface\n",
|
69 |
+
"%cd /content/\n",
|
70 |
+
"!git clone https://github.com/DrewThomasson/xtts-finetune-webui.git\n",
|
71 |
+
"%cd /content/xtts-finetune-webui\n",
|
72 |
+
"!python xtts_demo.py --share"
|
73 |
+
],
|
74 |
+
"metadata": {
|
75 |
+
"cellView": "form",
|
76 |
+
"id": "62Da1Q5AgN3W"
|
77 |
+
},
|
78 |
+
"execution_count": null,
|
79 |
+
"outputs": []
|
80 |
+
},
|
81 |
+
{
|
82 |
+
"cell_type": "code",
|
83 |
+
"source": [
|
84 |
+
"import shutil\n",
|
85 |
+
"import requests\n",
|
86 |
+
"import os\n",
|
87 |
+
"from tqdm import tqdm # Progress bar library\n",
|
88 |
+
"\n",
|
89 |
+
"# Define the paths\n",
|
90 |
+
"finetune_dir = '/content/xtts-finetune-webui/finetune_models/ready' # @param {type:\"string\"}\n",
|
91 |
+
"dataset_dir = '/content/xtts-finetune-webui/finetune_models/dataset' # @param {type:\"string\"}\n",
|
92 |
+
"\n",
|
93 |
+
"# Create a temporary directory to collect both folders before zipping\n",
|
94 |
+
"temp_dir = \"/content/temp_finetune_dataset\"\n",
|
95 |
+
"os.makedirs(temp_dir, exist_ok=True)\n",
|
96 |
+
"\n",
|
97 |
+
"# Copy both directories into the temporary directory with a progress bar\n",
|
98 |
+
"def copy_with_progress(src, dst):\n",
|
99 |
+
" total_files = sum(len(files) for _, _, files in os.walk(src))\n",
|
100 |
+
" with tqdm(total=total_files, desc=f\"Copying {os.path.basename(src)}\") as pbar:\n",
|
101 |
+
" for root, _, files in os.walk(src):\n",
|
102 |
+
" rel_path = os.path.relpath(root, src)\n",
|
103 |
+
" target_path = os.path.join(dst, rel_path)\n",
|
104 |
+
" os.makedirs(target_path, exist_ok=True)\n",
|
105 |
+
" for file in files:\n",
|
106 |
+
" shutil.copy(os.path.join(root, file), target_path)\n",
|
107 |
+
" pbar.update(1)\n",
|
108 |
+
"\n",
|
109 |
+
"copy_with_progress(finetune_dir, os.path.join(temp_dir, \"ready\"))\n",
|
110 |
+
"copy_with_progress(dataset_dir, os.path.join(temp_dir, \"dataset\"))\n",
|
111 |
+
"\n",
|
112 |
+
"# Create a zip file of the combined directories with progress\n",
|
113 |
+
"zip_filename = \"finetune_and_dataset.zip\"\n",
|
114 |
+
"with tqdm(total=100, desc=\"Zipping files\") as pbar:\n",
|
115 |
+
" shutil.make_archive(\"finetune_and_dataset\", 'zip', root_dir=temp_dir)\n",
|
116 |
+
" pbar.update(100)\n",
|
117 |
+
"\n",
|
118 |
+
"# Define a function to stream the upload with a progress bar\n",
|
119 |
+
"def upload_with_progress(file_path, url):\n",
|
120 |
+
" file_size = os.path.getsize(file_path)\n",
|
121 |
+
" with open(file_path, 'rb') as f, tqdm(\n",
|
122 |
+
" total=file_size, unit='B', unit_scale=True, desc=\"Uploading\"\n",
|
123 |
+
" ) as progress:\n",
|
124 |
+
" response = requests.post(\n",
|
125 |
+
" url,\n",
|
126 |
+
" files={\"file\": (file_path, f)},\n",
|
127 |
+
" stream=True,\n",
|
128 |
+
" headers={\"Connection\": \"keep-alive\"},\n",
|
129 |
+
" )\n",
|
130 |
+
" # Update the progress bar as chunks are sent\n",
|
131 |
+
" for chunk in response.iter_content(chunk_size=4096):\n",
|
132 |
+
" if chunk:\n",
|
133 |
+
" progress.update(len(chunk))\n",
|
134 |
+
" return response\n",
|
135 |
+
"\n",
|
136 |
+
"# Upload the zip file to file.io with a progress bar\n",
|
137 |
+
"response = upload_with_progress(zip_filename, \"https://file.io/?expires=1d\")\n",
|
138 |
+
"\n",
|
139 |
+
"# Parse the response and display the download link\n",
|
140 |
+
"if response.status_code == 200:\n",
|
141 |
+
" download_link = response.json().get('link', 'Error: No link found.')\n",
|
142 |
+
" print(f\"Your file is ready: {download_link}\")\n",
|
143 |
+
"else:\n",
|
144 |
+
" print(f\"Failed to upload: {response.status_code} - {response.text}\")\n"
|
145 |
+
],
|
146 |
+
"metadata": {
|
147 |
+
"cellView": "form",
|
148 |
+
"id": "MYBWgKevr6S3"
|
149 |
+
},
|
150 |
+
"execution_count": null,
|
151 |
+
"outputs": []
|
152 |
+
}
|
153 |
+
]
|
154 |
+
}
|
requirements.txt
CHANGED
@@ -1,11 +1,7 @@
|
|
1 |
-
# Python packages for your Hugging Face Space
|
2 |
faster_whisper==1.0.2
|
3 |
gradio==4.13.0
|
4 |
spacy==3.7.4
|
5 |
-
coqui-tts[languages]==0.24.
|
|
|
6 |
cutlet
|
7 |
fugashi[unidic-lite]
|
8 |
-
|
9 |
-
# CUDA-enabled PyTorch and Torchaudio
|
10 |
-
torch==2.1.1+cu118
|
11 |
-
torchaudio==2.1.1+cu118 --index-url https://download.pytorch.org/whl/cu118
|
|
|
|
|
1 |
faster_whisper==1.0.2
|
2 |
gradio==4.13.0
|
3 |
spacy==3.7.4
|
4 |
+
coqui-tts[languages] == 0.24.2
|
5 |
+
|
6 |
cutlet
|
7 |
fugashi[unidic-lite]
|
|
|
|
|
|
|
|
xtts_demo.py
ADDED
@@ -0,0 +1,697 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import argparse
|
2 |
+
import os
|
3 |
+
import sys
|
4 |
+
import tempfile
|
5 |
+
from pathlib import Path
|
6 |
+
|
7 |
+
import os
|
8 |
+
import shutil
|
9 |
+
import glob
|
10 |
+
|
11 |
+
import gradio as gr
|
12 |
+
import librosa.display
|
13 |
+
import numpy as np
|
14 |
+
|
15 |
+
import torch
|
16 |
+
import torchaudio
|
17 |
+
import traceback
|
18 |
+
from utils.formatter import format_audio_list,find_latest_best_model, list_audios
|
19 |
+
from utils.gpt_train import train_gpt
|
20 |
+
|
21 |
+
from faster_whisper import WhisperModel
|
22 |
+
|
23 |
+
from TTS.tts.configs.xtts_config import XttsConfig
|
24 |
+
from TTS.tts.models.xtts import Xtts
|
25 |
+
|
26 |
+
from TTS.tts.configs.xtts_config import XttsConfig
|
27 |
+
from TTS.tts.models.xtts import Xtts
|
28 |
+
|
29 |
+
# Clear logs
|
30 |
+
def remove_log_file(file_path):
|
31 |
+
log_file = Path(file_path)
|
32 |
+
|
33 |
+
if log_file.exists() and log_file.is_file():
|
34 |
+
log_file.unlink()
|
35 |
+
|
36 |
+
# remove_log_file(str(Path.cwd() / "log.out"))
|
37 |
+
|
38 |
+
def clear_gpu_cache():
|
39 |
+
# clear the GPU cache
|
40 |
+
if torch.cuda.is_available():
|
41 |
+
torch.cuda.empty_cache()
|
42 |
+
|
43 |
+
XTTS_MODEL = None
|
44 |
+
def load_model(xtts_checkpoint, xtts_config, xtts_vocab,xtts_speaker):
|
45 |
+
global XTTS_MODEL
|
46 |
+
clear_gpu_cache()
|
47 |
+
if not xtts_checkpoint or not xtts_config or not xtts_vocab:
|
48 |
+
return "You need to run the previous steps or manually set the `XTTS checkpoint path`, `XTTS config path`, and `XTTS vocab path` fields !!"
|
49 |
+
config = XttsConfig()
|
50 |
+
config.load_json(xtts_config)
|
51 |
+
XTTS_MODEL = Xtts.init_from_config(config)
|
52 |
+
print("Loading XTTS model! ")
|
53 |
+
XTTS_MODEL.load_checkpoint(config, checkpoint_path=xtts_checkpoint, vocab_path=xtts_vocab,speaker_file_path=xtts_speaker, use_deepspeed=False)
|
54 |
+
if torch.cuda.is_available():
|
55 |
+
XTTS_MODEL.cuda()
|
56 |
+
|
57 |
+
print("Model Loaded!")
|
58 |
+
return "Model Loaded!"
|
59 |
+
|
60 |
+
def run_tts(lang, tts_text, speaker_audio_file, temperature, length_penalty,repetition_penalty,top_k,top_p,sentence_split,use_config):
|
61 |
+
if XTTS_MODEL is None or not speaker_audio_file:
|
62 |
+
return "You need to run the previous step to load the model !!", None, None
|
63 |
+
|
64 |
+
gpt_cond_latent, speaker_embedding = XTTS_MODEL.get_conditioning_latents(audio_path=speaker_audio_file, gpt_cond_len=XTTS_MODEL.config.gpt_cond_len, max_ref_length=XTTS_MODEL.config.max_ref_len, sound_norm_refs=XTTS_MODEL.config.sound_norm_refs)
|
65 |
+
|
66 |
+
if use_config:
|
67 |
+
out = XTTS_MODEL.inference(
|
68 |
+
text=tts_text,
|
69 |
+
language=lang,
|
70 |
+
gpt_cond_latent=gpt_cond_latent,
|
71 |
+
speaker_embedding=speaker_embedding,
|
72 |
+
temperature=XTTS_MODEL.config.temperature, # Add custom parameters here
|
73 |
+
length_penalty=XTTS_MODEL.config.length_penalty,
|
74 |
+
repetition_penalty=XTTS_MODEL.config.repetition_penalty,
|
75 |
+
top_k=XTTS_MODEL.config.top_k,
|
76 |
+
top_p=XTTS_MODEL.config.top_p,
|
77 |
+
enable_text_splitting = True
|
78 |
+
)
|
79 |
+
else:
|
80 |
+
out = XTTS_MODEL.inference(
|
81 |
+
text=tts_text,
|
82 |
+
language=lang,
|
83 |
+
gpt_cond_latent=gpt_cond_latent,
|
84 |
+
speaker_embedding=speaker_embedding,
|
85 |
+
temperature=temperature, # Add custom parameters here
|
86 |
+
length_penalty=length_penalty,
|
87 |
+
repetition_penalty=float(repetition_penalty),
|
88 |
+
top_k=top_k,
|
89 |
+
top_p=top_p,
|
90 |
+
enable_text_splitting = sentence_split
|
91 |
+
)
|
92 |
+
|
93 |
+
with tempfile.NamedTemporaryFile(suffix=".wav", delete=False) as fp:
|
94 |
+
out["wav"] = torch.tensor(out["wav"]).unsqueeze(0)
|
95 |
+
out_path = fp.name
|
96 |
+
torchaudio.save(out_path, out["wav"], 24000)
|
97 |
+
|
98 |
+
return "Speech generated !", out_path, speaker_audio_file
|
99 |
+
|
100 |
+
|
101 |
+
def load_params_tts(out_path,version):
|
102 |
+
|
103 |
+
out_path = Path(out_path)
|
104 |
+
|
105 |
+
# base_model_path = Path.cwd() / "models" / version
|
106 |
+
|
107 |
+
# if not base_model_path.exists():
|
108 |
+
# return "Base model not found !","","",""
|
109 |
+
|
110 |
+
ready_model_path = out_path / "ready"
|
111 |
+
|
112 |
+
vocab_path = ready_model_path / "vocab.json"
|
113 |
+
config_path = ready_model_path / "config.json"
|
114 |
+
speaker_path = ready_model_path / "speakers_xtts.pth"
|
115 |
+
reference_path = ready_model_path / "reference.wav"
|
116 |
+
|
117 |
+
model_path = ready_model_path / "model.pth"
|
118 |
+
|
119 |
+
if not model_path.exists():
|
120 |
+
model_path = ready_model_path / "unoptimize_model.pth"
|
121 |
+
if not model_path.exists():
|
122 |
+
return "Params for TTS not found", "", "", ""
|
123 |
+
|
124 |
+
return "Params for TTS loaded", model_path, config_path, vocab_path,speaker_path, reference_path
|
125 |
+
|
126 |
+
|
127 |
+
if __name__ == "__main__":
|
128 |
+
|
129 |
+
parser = argparse.ArgumentParser(
|
130 |
+
description="""XTTS fine-tuning demo\n\n"""
|
131 |
+
"""
|
132 |
+
Example runs:
|
133 |
+
python3 TTS/demos/xtts_ft_demo/xtts_demo.py --port
|
134 |
+
""",
|
135 |
+
formatter_class=argparse.RawTextHelpFormatter,
|
136 |
+
)
|
137 |
+
parser.add_argument(
|
138 |
+
"--share",
|
139 |
+
action="store_true",
|
140 |
+
default=False,
|
141 |
+
help="Enable sharing of the Gradio interface via public link.",
|
142 |
+
)
|
143 |
+
parser.add_argument(
|
144 |
+
"--port",
|
145 |
+
type=int,
|
146 |
+
help="Port to run the gradio demo. Default: 5003",
|
147 |
+
default=5003,
|
148 |
+
)
|
149 |
+
parser.add_argument(
|
150 |
+
"--out_path",
|
151 |
+
type=str,
|
152 |
+
help="Output path (where data and checkpoints will be saved) Default: output/",
|
153 |
+
default=str(Path.cwd() / "finetune_models"),
|
154 |
+
)
|
155 |
+
|
156 |
+
parser.add_argument(
|
157 |
+
"--num_epochs",
|
158 |
+
type=int,
|
159 |
+
help="Number of epochs to train. Default: 6",
|
160 |
+
default=6,
|
161 |
+
)
|
162 |
+
parser.add_argument(
|
163 |
+
"--batch_size",
|
164 |
+
type=int,
|
165 |
+
help="Batch size. Default: 2",
|
166 |
+
default=2,
|
167 |
+
)
|
168 |
+
parser.add_argument(
|
169 |
+
"--grad_acumm",
|
170 |
+
type=int,
|
171 |
+
help="Grad accumulation steps. Default: 1",
|
172 |
+
default=1,
|
173 |
+
)
|
174 |
+
parser.add_argument(
|
175 |
+
"--max_audio_length",
|
176 |
+
type=int,
|
177 |
+
help="Max permitted audio size in seconds. Default: 11",
|
178 |
+
default=11,
|
179 |
+
)
|
180 |
+
|
181 |
+
args = parser.parse_args()
|
182 |
+
|
183 |
+
with gr.Blocks() as demo:
|
184 |
+
with gr.Tab("1 - Data processing"):
|
185 |
+
out_path = gr.Textbox(
|
186 |
+
label="Output path (where data and checkpoints will be saved):",
|
187 |
+
value=args.out_path,
|
188 |
+
)
|
189 |
+
# upload_file = gr.Audio(
|
190 |
+
# sources="upload",
|
191 |
+
# label="Select here the audio files that you want to use for XTTS trainining !",
|
192 |
+
# type="filepath",
|
193 |
+
# )
|
194 |
+
upload_file = gr.File(
|
195 |
+
file_count="multiple",
|
196 |
+
label="Select here the audio files that you want to use for XTTS trainining (Supported formats: wav, mp3, and flac)",
|
197 |
+
)
|
198 |
+
|
199 |
+
audio_folder_path = gr.Textbox(
|
200 |
+
label="Path to the folder with audio files (optional):",
|
201 |
+
value="",
|
202 |
+
)
|
203 |
+
|
204 |
+
whisper_model = gr.Dropdown(
|
205 |
+
label="Whisper Model",
|
206 |
+
value="large-v3",
|
207 |
+
choices=[
|
208 |
+
"large-v3",
|
209 |
+
"large-v2",
|
210 |
+
"large",
|
211 |
+
"medium",
|
212 |
+
"small"
|
213 |
+
],
|
214 |
+
)
|
215 |
+
|
216 |
+
lang = gr.Dropdown(
|
217 |
+
label="Dataset Language",
|
218 |
+
value="en",
|
219 |
+
choices=[
|
220 |
+
"en",
|
221 |
+
"es",
|
222 |
+
"fr",
|
223 |
+
"de",
|
224 |
+
"it",
|
225 |
+
"pt",
|
226 |
+
"pl",
|
227 |
+
"tr",
|
228 |
+
"ru",
|
229 |
+
"nl",
|
230 |
+
"cs",
|
231 |
+
"ar",
|
232 |
+
"zh",
|
233 |
+
"hu",
|
234 |
+
"ko",
|
235 |
+
"ja"
|
236 |
+
],
|
237 |
+
)
|
238 |
+
progress_data = gr.Label(
|
239 |
+
label="Progress:"
|
240 |
+
)
|
241 |
+
# demo.load(read_logs, None, logs, every=1)
|
242 |
+
|
243 |
+
prompt_compute_btn = gr.Button(value="Step 1 - Create dataset")
|
244 |
+
|
245 |
+
def preprocess_dataset(audio_path, audio_folder_path, language, whisper_model, out_path, train_csv, eval_csv, progress=gr.Progress(track_tqdm=True)):
|
246 |
+
clear_gpu_cache()
|
247 |
+
|
248 |
+
train_csv = ""
|
249 |
+
eval_csv = ""
|
250 |
+
|
251 |
+
out_path = os.path.join(out_path, "dataset")
|
252 |
+
os.makedirs(out_path, exist_ok=True)
|
253 |
+
|
254 |
+
if audio_folder_path:
|
255 |
+
audio_files = list(list_audios(audio_folder_path))
|
256 |
+
else:
|
257 |
+
audio_files = audio_path
|
258 |
+
|
259 |
+
if not audio_files:
|
260 |
+
return "No audio files found! Please provide files via Gradio or specify a folder path.", "", ""
|
261 |
+
else:
|
262 |
+
try:
|
263 |
+
# Loading Whisper
|
264 |
+
device = "cuda" if torch.cuda.is_available() else "cpu"
|
265 |
+
|
266 |
+
# Detect compute type
|
267 |
+
if torch.cuda.is_available():
|
268 |
+
compute_type = "float16"
|
269 |
+
else:
|
270 |
+
compute_type = "float32"
|
271 |
+
|
272 |
+
asr_model = WhisperModel(whisper_model, device=device, compute_type=compute_type)
|
273 |
+
train_meta, eval_meta, audio_total_size = format_audio_list(audio_files, asr_model=asr_model, target_language=language, out_path=out_path, gradio_progress=progress)
|
274 |
+
except:
|
275 |
+
traceback.print_exc()
|
276 |
+
error = traceback.format_exc()
|
277 |
+
return f"The data processing was interrupted due an error !! Please check the console to verify the full error message! \n Error summary: {error}", "", ""
|
278 |
+
|
279 |
+
# clear_gpu_cache()
|
280 |
+
|
281 |
+
# if audio total len is less than 2 minutes raise an error
|
282 |
+
if audio_total_size < 120:
|
283 |
+
message = "The sum of the duration of the audios that you provided should be at least 2 minutes!"
|
284 |
+
print(message)
|
285 |
+
return message, "", ""
|
286 |
+
|
287 |
+
print("Dataset Processed!")
|
288 |
+
return "Dataset Processed!", train_meta, eval_meta
|
289 |
+
|
290 |
+
|
291 |
+
with gr.Tab("2 - Fine-tuning XTTS Encoder"):
|
292 |
+
load_params_btn = gr.Button(value="Load Params from output folder")
|
293 |
+
version = gr.Dropdown(
|
294 |
+
label="XTTS base version",
|
295 |
+
value="v2.0.2",
|
296 |
+
choices=[
|
297 |
+
"v2.0.3",
|
298 |
+
"v2.0.2",
|
299 |
+
"v2.0.1",
|
300 |
+
"v2.0.0",
|
301 |
+
"main"
|
302 |
+
],
|
303 |
+
)
|
304 |
+
train_csv = gr.Textbox(
|
305 |
+
label="Train CSV:",
|
306 |
+
)
|
307 |
+
eval_csv = gr.Textbox(
|
308 |
+
label="Eval CSV:",
|
309 |
+
)
|
310 |
+
custom_model = gr.Textbox(
|
311 |
+
label="(Optional) Custom model.pth file , leave blank if you want to use the base file.",
|
312 |
+
value="",
|
313 |
+
)
|
314 |
+
num_epochs = gr.Slider(
|
315 |
+
label="Number of epochs:",
|
316 |
+
minimum=1,
|
317 |
+
maximum=100,
|
318 |
+
step=1,
|
319 |
+
value=args.num_epochs,
|
320 |
+
)
|
321 |
+
batch_size = gr.Slider(
|
322 |
+
label="Batch size:",
|
323 |
+
minimum=2,
|
324 |
+
maximum=512,
|
325 |
+
step=1,
|
326 |
+
value=args.batch_size,
|
327 |
+
)
|
328 |
+
grad_acumm = gr.Slider(
|
329 |
+
label="Grad accumulation steps:",
|
330 |
+
minimum=2,
|
331 |
+
maximum=128,
|
332 |
+
step=1,
|
333 |
+
value=args.grad_acumm,
|
334 |
+
)
|
335 |
+
max_audio_length = gr.Slider(
|
336 |
+
label="Max permitted audio size in seconds:",
|
337 |
+
minimum=2,
|
338 |
+
maximum=20,
|
339 |
+
step=1,
|
340 |
+
value=args.max_audio_length,
|
341 |
+
)
|
342 |
+
clear_train_data = gr.Dropdown(
|
343 |
+
label="Clear train data, you will delete selected folder, after optimizing",
|
344 |
+
value="none",
|
345 |
+
choices=[
|
346 |
+
"none",
|
347 |
+
"run",
|
348 |
+
"dataset",
|
349 |
+
"all"
|
350 |
+
])
|
351 |
+
|
352 |
+
progress_train = gr.Label(
|
353 |
+
label="Progress:"
|
354 |
+
)
|
355 |
+
|
356 |
+
# demo.load(read_logs, None, logs_tts_train, every=1)
|
357 |
+
train_btn = gr.Button(value="Step 2 - Run the training")
|
358 |
+
optimize_model_btn = gr.Button(value="Step 2.5 - Optimize the model")
|
359 |
+
|
360 |
+
import os
|
361 |
+
import shutil
|
362 |
+
from pathlib import Path
|
363 |
+
import traceback
|
364 |
+
|
365 |
+
def train_model(custom_model, version, language, train_csv, eval_csv, num_epochs, batch_size, grad_acumm, output_path, max_audio_length):
|
366 |
+
clear_gpu_cache()
|
367 |
+
|
368 |
+
run_dir = Path(output_path) / "run"
|
369 |
+
|
370 |
+
# Remove train dir
|
371 |
+
if run_dir.exists():
|
372 |
+
shutil.rmtree(run_dir)
|
373 |
+
|
374 |
+
# Check if the dataset language matches the language you specified
|
375 |
+
lang_file_path = Path(output_path) / "dataset" / "lang.txt"
|
376 |
+
|
377 |
+
# Check if lang.txt already exists and contains a different language
|
378 |
+
current_language = None
|
379 |
+
if lang_file_path.exists():
|
380 |
+
with open(lang_file_path, 'r', encoding='utf-8') as existing_lang_file:
|
381 |
+
current_language = existing_lang_file.read().strip()
|
382 |
+
if current_language != language:
|
383 |
+
print("The language that was prepared for the dataset does not match the specified language. Change the language to the one specified in the dataset")
|
384 |
+
language = current_language
|
385 |
+
|
386 |
+
if not train_csv or not eval_csv:
|
387 |
+
return "You need to run the data processing step or manually set `Train CSV` and `Eval CSV` fields !", "", "", "", ""
|
388 |
+
try:
|
389 |
+
# convert seconds to waveform frames
|
390 |
+
max_audio_length = int(max_audio_length * 22050)
|
391 |
+
speaker_xtts_path, config_path, original_xtts_checkpoint, vocab_file, exp_path, speaker_wav = train_gpt(custom_model, version, language, num_epochs, batch_size, grad_acumm, train_csv, eval_csv, output_path=output_path, max_audio_length=max_audio_length)
|
392 |
+
except:
|
393 |
+
traceback.print_exc()
|
394 |
+
error = traceback.format_exc()
|
395 |
+
return f"The training was interrupted due to an error !! Please check the console to check the full error message! \n Error summary: {error}", "", "", "", ""
|
396 |
+
|
397 |
+
ready_dir = Path(output_path) / "ready"
|
398 |
+
|
399 |
+
ft_xtts_checkpoint = os.path.join(exp_path, "best_model.pth")
|
400 |
+
|
401 |
+
shutil.copy(ft_xtts_checkpoint, ready_dir / "unoptimize_model.pth")
|
402 |
+
|
403 |
+
ft_xtts_checkpoint = os.path.join(ready_dir, "unoptimize_model.pth")
|
404 |
+
|
405 |
+
# Move reference audio to output folder and rename it
|
406 |
+
speaker_reference_path = Path(speaker_wav)
|
407 |
+
speaker_reference_new_path = ready_dir / "reference.wav"
|
408 |
+
shutil.copy(speaker_reference_path, speaker_reference_new_path)
|
409 |
+
|
410 |
+
print("Model training done!")
|
411 |
+
return "Model training done!", config_path, vocab_file, ft_xtts_checkpoint, speaker_xtts_path, speaker_reference_new_path
|
412 |
+
|
413 |
+
def optimize_model(out_path, clear_train_data):
|
414 |
+
# print(out_path)
|
415 |
+
out_path = Path(out_path) # Ensure that out_path is a Path object.
|
416 |
+
|
417 |
+
ready_dir = out_path / "ready"
|
418 |
+
run_dir = out_path / "run"
|
419 |
+
dataset_dir = out_path / "dataset"
|
420 |
+
|
421 |
+
# Clear specified training data directories.
|
422 |
+
if clear_train_data in {"run", "all"} and run_dir.exists():
|
423 |
+
try:
|
424 |
+
shutil.rmtree(run_dir)
|
425 |
+
except PermissionError as e:
|
426 |
+
print(f"An error occurred while deleting {run_dir}: {e}")
|
427 |
+
|
428 |
+
if clear_train_data in {"dataset", "all"} and dataset_dir.exists():
|
429 |
+
try:
|
430 |
+
shutil.rmtree(dataset_dir)
|
431 |
+
except PermissionError as e:
|
432 |
+
print(f"An error occurred while deleting {dataset_dir}: {e}")
|
433 |
+
|
434 |
+
# Get full path to model
|
435 |
+
model_path = ready_dir / "unoptimize_model.pth"
|
436 |
+
|
437 |
+
if not model_path.is_file():
|
438 |
+
return "Unoptimized model not found in ready folder", ""
|
439 |
+
|
440 |
+
# Load the checkpoint and remove unnecessary parts.
|
441 |
+
checkpoint = torch.load(model_path, map_location=torch.device("cpu"))
|
442 |
+
del checkpoint["optimizer"]
|
443 |
+
|
444 |
+
for key in list(checkpoint["model"].keys()):
|
445 |
+
if "dvae" in key:
|
446 |
+
del checkpoint["model"][key]
|
447 |
+
|
448 |
+
# Make sure out_path is a Path object or convert it to Path
|
449 |
+
os.remove(model_path)
|
450 |
+
|
451 |
+
# Save the optimized model.
|
452 |
+
optimized_model_file_name="model.pth"
|
453 |
+
optimized_model=ready_dir/optimized_model_file_name
|
454 |
+
|
455 |
+
torch.save(checkpoint, optimized_model)
|
456 |
+
ft_xtts_checkpoint=str(optimized_model)
|
457 |
+
|
458 |
+
clear_gpu_cache()
|
459 |
+
|
460 |
+
return f"Model optimized and saved at {ft_xtts_checkpoint}!", ft_xtts_checkpoint
|
461 |
+
|
462 |
+
def load_params(out_path):
|
463 |
+
path_output = Path(out_path)
|
464 |
+
|
465 |
+
dataset_path = path_output / "dataset"
|
466 |
+
|
467 |
+
if not dataset_path.exists():
|
468 |
+
return "The output folder does not exist!", "", ""
|
469 |
+
|
470 |
+
eval_train = dataset_path / "metadata_train.csv"
|
471 |
+
eval_csv = dataset_path / "metadata_eval.csv"
|
472 |
+
|
473 |
+
# Write the target language to lang.txt in the output directory
|
474 |
+
lang_file_path = dataset_path / "lang.txt"
|
475 |
+
|
476 |
+
# Check if lang.txt already exists and contains a different language
|
477 |
+
current_language = None
|
478 |
+
if os.path.exists(lang_file_path):
|
479 |
+
with open(lang_file_path, 'r', encoding='utf-8') as existing_lang_file:
|
480 |
+
current_language = existing_lang_file.read().strip()
|
481 |
+
|
482 |
+
clear_gpu_cache()
|
483 |
+
|
484 |
+
print(current_language)
|
485 |
+
return "The data has been updated", eval_train, eval_csv, current_language
|
486 |
+
|
487 |
+
with gr.Tab("3 - Inference"):
|
488 |
+
with gr.Row():
|
489 |
+
with gr.Column() as col1:
|
490 |
+
load_params_tts_btn = gr.Button(value="Load params for TTS from output folder")
|
491 |
+
xtts_checkpoint = gr.Textbox(
|
492 |
+
label="XTTS checkpoint path:",
|
493 |
+
value="",
|
494 |
+
)
|
495 |
+
xtts_config = gr.Textbox(
|
496 |
+
label="XTTS config path:",
|
497 |
+
value="",
|
498 |
+
)
|
499 |
+
|
500 |
+
xtts_vocab = gr.Textbox(
|
501 |
+
label="XTTS vocab path:",
|
502 |
+
value="",
|
503 |
+
)
|
504 |
+
xtts_speaker = gr.Textbox(
|
505 |
+
label="XTTS speaker path:",
|
506 |
+
value="",
|
507 |
+
)
|
508 |
+
progress_load = gr.Label(
|
509 |
+
label="Progress:"
|
510 |
+
)
|
511 |
+
load_btn = gr.Button(value="Step 3 - Load Fine-tuned XTTS model")
|
512 |
+
|
513 |
+
with gr.Column() as col2:
|
514 |
+
speaker_reference_audio = gr.Textbox(
|
515 |
+
label="Speaker reference audio:",
|
516 |
+
value="",
|
517 |
+
)
|
518 |
+
tts_language = gr.Dropdown(
|
519 |
+
label="Language",
|
520 |
+
value="en",
|
521 |
+
choices=[
|
522 |
+
"en",
|
523 |
+
"es",
|
524 |
+
"fr",
|
525 |
+
"de",
|
526 |
+
"it",
|
527 |
+
"pt",
|
528 |
+
"pl",
|
529 |
+
"tr",
|
530 |
+
"ru",
|
531 |
+
"nl",
|
532 |
+
"cs",
|
533 |
+
"ar",
|
534 |
+
"zh",
|
535 |
+
"hu",
|
536 |
+
"ko",
|
537 |
+
"ja",
|
538 |
+
]
|
539 |
+
)
|
540 |
+
tts_text = gr.Textbox(
|
541 |
+
label="Input Text.",
|
542 |
+
value="This model sounds really good and above all, it's reasonably fast.",
|
543 |
+
)
|
544 |
+
with gr.Accordion("Advanced settings", open=False) as acr:
|
545 |
+
temperature = gr.Slider(
|
546 |
+
label="temperature",
|
547 |
+
minimum=0,
|
548 |
+
maximum=1,
|
549 |
+
step=0.05,
|
550 |
+
value=0.75,
|
551 |
+
)
|
552 |
+
length_penalty = gr.Slider(
|
553 |
+
label="length_penalty",
|
554 |
+
minimum=-10.0,
|
555 |
+
maximum=10.0,
|
556 |
+
step=0.5,
|
557 |
+
value=1,
|
558 |
+
)
|
559 |
+
repetition_penalty = gr.Slider(
|
560 |
+
label="repetition penalty",
|
561 |
+
minimum=1,
|
562 |
+
maximum=10,
|
563 |
+
step=0.5,
|
564 |
+
value=5,
|
565 |
+
)
|
566 |
+
top_k = gr.Slider(
|
567 |
+
label="top_k",
|
568 |
+
minimum=1,
|
569 |
+
maximum=100,
|
570 |
+
step=1,
|
571 |
+
value=50,
|
572 |
+
)
|
573 |
+
top_p = gr.Slider(
|
574 |
+
label="top_p",
|
575 |
+
minimum=0,
|
576 |
+
maximum=1,
|
577 |
+
step=0.05,
|
578 |
+
value=0.85,
|
579 |
+
)
|
580 |
+
sentence_split = gr.Checkbox(
|
581 |
+
label="Enable text splitting",
|
582 |
+
value=True,
|
583 |
+
)
|
584 |
+
use_config = gr.Checkbox(
|
585 |
+
label="Use Inference settings from config, if disabled use the settings above",
|
586 |
+
value=False,
|
587 |
+
)
|
588 |
+
tts_btn = gr.Button(value="Step 4 - Inference")
|
589 |
+
|
590 |
+
with gr.Column() as col3:
|
591 |
+
progress_gen = gr.Label(
|
592 |
+
label="Progress:"
|
593 |
+
)
|
594 |
+
tts_output_audio = gr.Audio(label="Generated Audio.")
|
595 |
+
reference_audio = gr.Audio(label="Reference audio used.")
|
596 |
+
|
597 |
+
prompt_compute_btn.click(
|
598 |
+
fn=preprocess_dataset,
|
599 |
+
inputs=[
|
600 |
+
upload_file,
|
601 |
+
audio_folder_path,
|
602 |
+
lang,
|
603 |
+
whisper_model,
|
604 |
+
out_path,
|
605 |
+
train_csv,
|
606 |
+
eval_csv
|
607 |
+
],
|
608 |
+
outputs=[
|
609 |
+
progress_data,
|
610 |
+
train_csv,
|
611 |
+
eval_csv,
|
612 |
+
],
|
613 |
+
)
|
614 |
+
|
615 |
+
|
616 |
+
load_params_btn.click(
|
617 |
+
fn=load_params,
|
618 |
+
inputs=[out_path],
|
619 |
+
outputs=[
|
620 |
+
progress_train,
|
621 |
+
train_csv,
|
622 |
+
eval_csv,
|
623 |
+
lang
|
624 |
+
]
|
625 |
+
)
|
626 |
+
|
627 |
+
|
628 |
+
train_btn.click(
|
629 |
+
fn=train_model,
|
630 |
+
inputs=[
|
631 |
+
custom_model,
|
632 |
+
version,
|
633 |
+
lang,
|
634 |
+
train_csv,
|
635 |
+
eval_csv,
|
636 |
+
num_epochs,
|
637 |
+
batch_size,
|
638 |
+
grad_acumm,
|
639 |
+
out_path,
|
640 |
+
max_audio_length,
|
641 |
+
],
|
642 |
+
outputs=[progress_train, xtts_config, xtts_vocab, xtts_checkpoint,xtts_speaker, speaker_reference_audio],
|
643 |
+
)
|
644 |
+
|
645 |
+
optimize_model_btn.click(
|
646 |
+
fn=optimize_model,
|
647 |
+
inputs=[
|
648 |
+
out_path,
|
649 |
+
clear_train_data
|
650 |
+
],
|
651 |
+
outputs=[progress_train,xtts_checkpoint],
|
652 |
+
)
|
653 |
+
|
654 |
+
load_btn.click(
|
655 |
+
fn=load_model,
|
656 |
+
inputs=[
|
657 |
+
xtts_checkpoint,
|
658 |
+
xtts_config,
|
659 |
+
xtts_vocab,
|
660 |
+
xtts_speaker
|
661 |
+
],
|
662 |
+
outputs=[progress_load],
|
663 |
+
)
|
664 |
+
|
665 |
+
tts_btn.click(
|
666 |
+
fn=run_tts,
|
667 |
+
inputs=[
|
668 |
+
tts_language,
|
669 |
+
tts_text,
|
670 |
+
speaker_reference_audio,
|
671 |
+
temperature,
|
672 |
+
length_penalty,
|
673 |
+
repetition_penalty,
|
674 |
+
top_k,
|
675 |
+
top_p,
|
676 |
+
sentence_split,
|
677 |
+
use_config
|
678 |
+
],
|
679 |
+
outputs=[progress_gen, tts_output_audio,reference_audio],
|
680 |
+
)
|
681 |
+
|
682 |
+
load_params_tts_btn.click(
|
683 |
+
fn=load_params_tts,
|
684 |
+
inputs=[
|
685 |
+
out_path,
|
686 |
+
version
|
687 |
+
],
|
688 |
+
outputs=[progress_load,xtts_checkpoint,xtts_config,xtts_vocab,xtts_speaker,speaker_reference_audio],
|
689 |
+
)
|
690 |
+
|
691 |
+
demo.launch(
|
692 |
+
share=args.share,
|
693 |
+
debug=False,
|
694 |
+
server_port=args.port,
|
695 |
+
# inweb=True,
|
696 |
+
# server_name="localhost"
|
697 |
+
)
|