candlend commited on
Commit
3d5f773
1 Parent(s): a299114
Files changed (46) hide show
  1. app.py +2 -3
  2. monotonic_align/core.cpython-38-x86_64-linux-gnu.so +0 -0
  3. text/__pycache__/__init__.cpython-38.pyc +0 -0
  4. text/__pycache__/cantonese.cpython-38.pyc +0 -0
  5. text/__pycache__/cleaners.cpython-38.pyc +0 -0
  6. text/__pycache__/english.cpython-38.pyc +0 -0
  7. text/__pycache__/japanese.cpython-38.pyc +0 -0
  8. text/__pycache__/korean.cpython-38.pyc +0 -0
  9. text/__pycache__/mandarin.cpython-38.pyc +0 -0
  10. text/__pycache__/ngu_dialect.cpython-38.pyc +0 -0
  11. text/__pycache__/sanskrit.cpython-38.pyc +0 -0
  12. text/__pycache__/shanghainese.cpython-38.pyc +0 -0
  13. text/__pycache__/symbols.cpython-38.pyc +0 -0
  14. text/__pycache__/thai.cpython-38.pyc +0 -0
  15. tts_inferencer.py +0 -108
  16. vits/__init__.py +5 -0
  17. attentions.py → vits/attentions.py +0 -0
  18. commons.py → vits/commons.py +0 -0
  19. {configs → vits/configs}/biaobei_base.json +0 -0
  20. {configs → vits/configs}/chinese_base.json +0 -0
  21. {configs → vits/configs}/cjke_base.json +0 -0
  22. {configs → vits/configs}/hoshimi_base.json +0 -0
  23. data_utils.py → vits/data_utils.py +0 -0
  24. mel_processing.py → vits/mel_processing.py +0 -0
  25. models.py → vits/models.py +0 -1
  26. {models → vits/models}/formal/G_200000.pth +0 -0
  27. {models → vits/models}/formal/G_250000.pth +0 -0
  28. {models → vits/models}/normal/G_250000.pth +0 -0
  29. {models → vits/models}/normal/G_300000.pth +0 -0
  30. {models → vits/models}/normal/G_350000.pth +0 -0
  31. modules.py → vits/modules.py +0 -0
  32. {text → vits/text}/LICENSE +0 -0
  33. {text → vits/text}/__init__.py +0 -0
  34. {text → vits/text}/cantonese.py +0 -0
  35. {text → vits/text}/cleaners.py +0 -0
  36. {text → vits/text}/english.py +0 -0
  37. {text → vits/text}/japanese.py +0 -0
  38. {text → vits/text}/korean.py +0 -0
  39. {text → vits/text}/mandarin.py +0 -0
  40. {text → vits/text}/ngu_dialect.py +0 -0
  41. {text → vits/text}/sanskrit.py +0 -0
  42. {text → vits/text}/shanghainese.py +0 -0
  43. {text → vits/text}/symbols.py +0 -0
  44. {text → vits/text}/thai.py +0 -0
  45. transforms.py → vits/transforms.py +0 -0
  46. utils.py → vits/utils.py +0 -0
app.py CHANGED
@@ -1,7 +1,5 @@
1
  import gradio as gr
2
- from tts_inferencer import TTSInferencer
3
-
4
- tts_inferencer = TTSInferencer("./configs/hoshimi_base.json")
5
 
6
  app = gr.Blocks()
7
  with app:
@@ -9,5 +7,6 @@ with app:
9
  gr.HTML(f.read())
10
  with gr.Tabs():
11
  with gr.TabItem("语音合成"):
 
12
  tts_inferencer.render()
13
  app.launch()
 
1
  import gradio as gr
2
+ from vits.tts_inferencer import TTSInferencer
 
 
3
 
4
  app = gr.Blocks()
5
  with app:
 
7
  gr.HTML(f.read())
8
  with gr.Tabs():
9
  with gr.TabItem("语音合成"):
10
+ tts_inferencer = TTSInferencer("vits/configs/hoshimi_base.json")
11
  tts_inferencer.render()
12
  app.launch()
monotonic_align/core.cpython-38-x86_64-linux-gnu.so DELETED
Binary file (937 kB)
 
text/__pycache__/__init__.cpython-38.pyc DELETED
Binary file (2.13 kB)
 
text/__pycache__/cantonese.cpython-38.pyc DELETED
Binary file (1.94 kB)
 
text/__pycache__/cleaners.cpython-38.pyc DELETED
Binary file (6.62 kB)
 
text/__pycache__/english.cpython-38.pyc DELETED
Binary file (4.85 kB)
 
text/__pycache__/japanese.cpython-38.pyc DELETED
Binary file (4.44 kB)
 
text/__pycache__/korean.cpython-38.pyc DELETED
Binary file (5.71 kB)
 
text/__pycache__/mandarin.cpython-38.pyc DELETED
Binary file (6.41 kB)
 
text/__pycache__/ngu_dialect.cpython-38.pyc DELETED
Binary file (1.03 kB)
 
text/__pycache__/sanskrit.cpython-38.pyc DELETED
Binary file (1.68 kB)
 
text/__pycache__/shanghainese.cpython-38.pyc DELETED
Binary file (1.78 kB)
 
text/__pycache__/symbols.cpython-38.pyc DELETED
Binary file (476 Bytes)
 
text/__pycache__/thai.cpython-38.pyc DELETED
Binary file (1.44 kB)
 
tts_inferencer.py DELETED
@@ -1,108 +0,0 @@
1
- import os
2
- import json
3
- import math
4
- import torch
5
- from torch import nn
6
- from torch.nn import functional as F
7
- from torch.utils.data import DataLoader
8
-
9
- import commons
10
- import utils
11
- from models import SynthesizerTrn
12
- from text.symbols import symbols
13
- from text import text_to_sequence
14
- import gradio as gr
15
-
16
- mode_dict = {
17
- "普通声线": "normal",
18
- "营业声线": "formal"
19
- }
20
-
21
- default_mode = "普通声线"
22
- default_noise_scale = 0.667
23
- default_noise_scale_w = 0.8
24
- default_length_scale = 1
25
-
26
- def get_text(text, hps):
27
- text_norm = text_to_sequence(text, hps.data.text_cleaners)
28
- if hps.data.add_blank:
29
- text_norm = commons.intersperse(text_norm, 0)
30
- text_norm = torch.LongTensor(text_norm)
31
- return text_norm
32
-
33
- class TTSInferencer:
34
- def __init__(self, hps_path, device="cpu"):
35
- self.device = torch.device(device)
36
- self.hps = utils.get_hparams_from_file(hps_path)
37
- self.select_mode(default_mode)
38
- self.load_model(self.latest_model_path)
39
-
40
- def select_mode(self, mode):
41
- self.mode = mode
42
- self.model_dir_path = os.path.join("models", mode_dict[mode])
43
- self.models = []
44
- for f in os.listdir(self.model_dir_path):
45
- if (f.startswith("D_")):
46
- continue
47
- if (f.endswith(".pth")):
48
- self.models.append(f)
49
- self.latest_model_path = utils.latest_checkpoint_path(self.model_dir_path, "G_*.pth")
50
-
51
- def infer(self, text, noise_scale=.667, noise_scale_w=0.8, length_scale=1):
52
- stn_tst = get_text(text, self.hps)
53
- with torch.no_grad():
54
- x_tst = stn_tst.unsqueeze(0).to(self.device)
55
- x_tst_lengths = torch.LongTensor([stn_tst.size(0)]).to(self.device)
56
- audio = self.net_g.infer(x_tst, x_tst_lengths, noise_scale=noise_scale, noise_scale_w=noise_scale_w, length_scale=length_scale)[0][0,0].data.float().numpy()
57
- return (self.hps.data.sampling_rate, audio)
58
-
59
- def change_mode(self, mode):
60
- self.select_mode(mode)
61
- return gr.update(choices=self.models, value=os.path.basename(self.latest_model_path))
62
-
63
- def change_model(self, model_file_name):
64
- self.load_model(os.path.join(self.model_dir_path, model_file_name))
65
- return f"载入模型:{model_file_name}({self.mode})"
66
-
67
- def render(self):
68
- choice_mode = gr.Radio(choices=["普通声线", "营业声线"], label="声线选择", value=default_mode)
69
- choice_model = gr.Dropdown(choices=self.models, label=f"模型迭代版本选择", value=os.path.basename(self.pth_path))
70
- # with gr.Row():
71
- # advanced = gr.Checkbox(label="显示高级设置(效果不可控)")
72
- # default = gr.Button("恢复默认设置").style(full_width=False)
73
- noise_scale = gr.Slider(minimum=0, maximum=3, value=default_noise_scale, step=0.001, label="noise_scale(效果不可控,谨慎修改)")
74
- noise_scale_w = gr.Slider(minimum=0, maximum=3, value=default_noise_scale_w, step=0.001, label="noise_scale_w(效果不可控,谨慎修改)")
75
- length_scale = gr.Slider(minimum=0, maximum=3, value=default_length_scale, step=0.001, label="length_scale(数值越大输出音频越长)")
76
-
77
- tts_input = gr.TextArea(
78
- label="请输入文本(目前只支持汉字和单个英文字母,可以使用常用符号和空格来改变语调和停顿,请勿一次性输入过长文本)",
79
- value="这里是爱喝奶茶,穿得也像奶茶魅力点是普通话二乙的星弥吼西咪,晚上齁。")
80
- tts_submit = gr.Button("合成", variant="primary")
81
- tts_output = gr.Audio(label="Output")
82
- tts_model = gr.Markdown(f"载入模型:{os.path.basename(self.latest_model_path)}({self.mode})")
83
- gr.HTML('''
84
- <div style="text-align:right;font-size:12px;color:#4D4D4D">
85
- <div class="font-medium">版权声明</div>
86
- <div>本项目数据集和模型版权属于星弥Hoshimi</div>
87
- <div>仅供学习交流,不可用于任何商业和非法用途,否则后果自负</div>
88
- </div>
89
- ''')
90
- # advanced.change(fn=lambda visible: gr.update(visible=visible), inputs=advanced, outputs=noise_scale)
91
- # advanced.change(fn=lambda visible: gr.update(visible=visible), inputs=advanced, outputs=noise_scale_w)
92
- # default.click(fn=lambda visible: gr.update(value=default_noise_scale), inputs=advanced, outputs=noise_scale)
93
- # default.click(fn=lambda visible: gr.update(value=default_noise_scale_w), inputs=advanced, outputs=noise_scale_w)
94
- # default.click(fn=lambda visible: gr.update(value=default_length_scale), inputs=advanced, outputs=length_scale)
95
- choice_mode.change(self.change_mode, inputs=choice_mode, outputs=choice_model)
96
- choice_model.change(self.change_model, inputs=[choice_model], outputs=[tts_model])
97
- tts_submit.click(self.infer, [tts_input, noise_scale, noise_scale_w, length_scale], [tts_output], api_name=f"infer")
98
-
99
-
100
- def load_model(self, model_path):
101
- self.pth_path = model_path
102
- self.net_g = SynthesizerTrn(
103
- len(symbols),
104
- self.hps.data.filter_length // 2 + 1,
105
- self.hps.train.segment_size // self.hps.data.hop_length,
106
- **self.hps.model).to(self.device)
107
- _ = self.net_g.eval()
108
- _ = utils.load_checkpoint(self.pth_path, self.net_g, None)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
vits/__init__.py ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ import os
2
+ import sys
3
+
4
+ ROOT_PATH = os.path.dirname(os.path.abspath(__file__))
5
+ sys.path.append(ROOT_PATH)
attentions.py → vits/attentions.py RENAMED
File without changes
commons.py → vits/commons.py RENAMED
File without changes
{configs → vits/configs}/biaobei_base.json RENAMED
File without changes
{configs → vits/configs}/chinese_base.json RENAMED
File without changes
{configs → vits/configs}/cjke_base.json RENAMED
File without changes
{configs → vits/configs}/hoshimi_base.json RENAMED
File without changes
data_utils.py → vits/data_utils.py RENAMED
File without changes
mel_processing.py → vits/mel_processing.py RENAMED
File without changes
models.py → vits/models.py RENAMED
@@ -7,7 +7,6 @@ from torch.nn import functional as F
7
  import commons
8
  import modules
9
  import attentions
10
- import monotonic_align
11
 
12
  from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d
13
  from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm
 
7
  import commons
8
  import modules
9
  import attentions
 
10
 
11
  from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d
12
  from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm
{models → vits/models}/formal/G_200000.pth RENAMED
File without changes
{models → vits/models}/formal/G_250000.pth RENAMED
File without changes
{models → vits/models}/normal/G_250000.pth RENAMED
File without changes
{models → vits/models}/normal/G_300000.pth RENAMED
File without changes
{models → vits/models}/normal/G_350000.pth RENAMED
File without changes
modules.py → vits/modules.py RENAMED
File without changes
{text → vits/text}/LICENSE RENAMED
File without changes
{text → vits/text}/__init__.py RENAMED
File without changes
{text → vits/text}/cantonese.py RENAMED
File without changes
{text → vits/text}/cleaners.py RENAMED
File without changes
{text → vits/text}/english.py RENAMED
File without changes
{text → vits/text}/japanese.py RENAMED
File without changes
{text → vits/text}/korean.py RENAMED
File without changes
{text → vits/text}/mandarin.py RENAMED
File without changes
{text → vits/text}/ngu_dialect.py RENAMED
File without changes
{text → vits/text}/sanskrit.py RENAMED
File without changes
{text → vits/text}/shanghainese.py RENAMED
File without changes
{text → vits/text}/symbols.py RENAMED
File without changes
{text → vits/text}/thai.py RENAMED
File without changes
transforms.py → vits/transforms.py RENAMED
File without changes
utils.py → vits/utils.py RENAMED
File without changes