hhim8826 commited on
Commit
a08d927
1 Parent(s): 51aee94

commit from

Browse files
Files changed (3) hide show
  1. app.py +31 -22
  2. logs/ATR/G_74000.pth +0 -3
  3. requirements.txt +3 -1
app.py CHANGED
@@ -43,16 +43,23 @@ from scipy.io.wavfile import write
43
  import gradio as gr
44
  import scipy.io.wavfile
45
  import numpy as np
46
- import torchtext
47
 
48
- def get_text(text, hps):
49
- text_norm = cleaned_text_to_sequence(text)
50
- if hps.data.add_blank:
51
- text_norm = commons.intersperse(text_norm, 0)
52
- text_norm = torch.LongTensor(text_norm)
53
- return text_norm
 
 
 
 
 
 
54
 
55
  hps = utils.get_hparams_from_file("./configs/ATR.json")
 
56
  net_g = SynthesizerTrn(
57
  len(symbols),
58
  hps.data.filter_length // 2 + 1,
@@ -60,10 +67,21 @@ net_g = SynthesizerTrn(
60
  **hps.model)
61
  _ = net_g.eval()
62
 
63
- _ = utils.load_checkpoint("./logs/ATR/G_74000.pth", net_g, None)
 
 
 
 
 
 
 
 
64
 
65
  def jtts(text):
66
- stn_tst = get_text(japanese_phrase_cleaners(text), hps)
 
 
 
67
  with torch.no_grad():
68
  x_tst = stn_tst.unsqueeze(0)
69
  x_tst_lengths = torch.LongTensor([stn_tst.size(0)])
@@ -71,16 +89,7 @@ def jtts(text):
71
  scipy.io.wavfile.write("out.wav", hps.data.sampling_rate, audio)
72
  return "./out.wav"
73
 
74
- inputs = gr.inputs.Textbox(lines=5, label="Input Text")
75
- outputs = gr.outputs.Audio(label="Output Audio")
76
-
77
-
78
- title = "VITS"
79
- description = "demo for VITS: Conditional Variational Autoencoder with Adversarial Learning for End-to-End Text-to-Speech. To use it, simply add your text, or click one of the examples to load them. Read more at the links below."
80
- article = "<p style='text-align: center'><a href='https://arxiv.org/abs/2106.06103'>Conditional Variational Autoencoder with Adversarial Learning for End-to-End Text-to-Speech</a> | <a href='https://github.com/jaywalnut310/vits'>Github Repo</a></p>"
81
-
82
- examples = [
83
- ["吾輩は猫である。名前はまだない"],
84
- ["試験勉強頑張ってくださいね"]]
85
-
86
- gr.Interface(jtts, inputs, outputs, title=title, description=description, article=article, examples=examples).launch()
 
43
  import gradio as gr
44
  import scipy.io.wavfile
45
  import numpy as np
46
+ import re
47
 
48
+ jp_match = re.compile(r'.*?[ぁ|あ|ぃ|い|ぅ|う|ぇ|え|ぉ|お|か|が|き|ぎ|く|ぐ|け|げ|こ|ご|さ|ざ|し|じ|す|ず|せ|ぜ|そ|ぞ|た|だ|ち|ぢ|っ|つ|づ|て|で|と|ど|な|に|ぬ|ね|の|は|ば|ぱ|ひ|び|ぴ|ふ|ぶ|ぷ|へ|べ|ぺ|ほ|ぼ|ぽ|ま|み|む|め|も|ゃ|や|ゅ|ゆ|ょ|よ|ら|り|る|れ|ろ|ゎ|わ|ゐ|ゑ|を|ん|ゔ|ゕ|ゖ|ゝ|ゞ|ゟ|゠|ァ|ア|ィ|イ|ゥ|ウ|ェ|エ|ォ|オ|カ|ガ|キ|ギ|ク|グ|ケ|ゲ|コ|ゴ|サ|ザ|シ|ジ|ス|ズ|セ|ゼ|ソ|ゾ|タ|ダ|チ|ヂ|ッ|ツ|ヅ|テ|デ|ト|ド|ナ|ニ|ヌ|ネ|ノ|ハ|バ|パ|ヒ|ビ|ピ|フ|ブ|プ|ヘ|ベ|ペ|ホ|ボ|ポ|マ|ミ|ム|メ|モ|ャ|ヤ|ュ|ユ|ョ|ヨ|ラ|リ|ル|レ|ロ|ヮ|ワ|ヰ|ヱ|ヲ|ン|ヴ|ヵ|ヶ|ヷ|ヸ|ヹ|ヺ]+')
49
+
50
+ title = "VITS"
51
+ description = "demo for VITS: Conditional Variational Autoencoder with Adversarial Learning for End-to-End Text-to-Speech. To use it, simply add your text, or click one of the examples to load them. Read more at the links below."
52
+ article = "<p style='text-align: center'><a href='https://arxiv.org/abs/2106.06103'>Conditional Variational Autoencoder with Adversarial Learning for End-to-End Text-to-Speech</a> | <a href='https://github.com/jaywalnut310/vits'>Github Repo</a></p>"
53
+
54
+ examples = [
55
+ ["原因不明の海面上昇によって、地表の多くが海に沈んだ近未来。"],
56
+ ["幼い頃の事故によって片足を失った少年・斑鳩夏生は、都市での暮らしに見切りを付け、海辺の田舎町へと移り住んだ。"],
57
+ ["身よりのない彼に遺されたのは、海洋地質学者だった祖母の船と潜水艇、そして借金。"],
58
+ ["nanika acltara itsudemo hanashIte kudasai. gakuiNno kotojanaku, shijini kaNsuru kotodemo nanidemo."]
59
+ ]
60
 
61
  hps = utils.get_hparams_from_file("./configs/ATR.json")
62
+
63
  net_g = SynthesizerTrn(
64
  len(symbols),
65
  hps.data.filter_length // 2 + 1,
 
67
  **hps.model)
68
  _ = net_g.eval()
69
 
70
+ _ = utils.load_checkpoint("./G_85000.pth", net_g, None)
71
+
72
+
73
+ def get_text(text, hps):
74
+ text_norm = cleaned_text_to_sequence(text)
75
+ if hps.data.add_blank:
76
+ text_norm = commons.intersperse(text_norm, 0)
77
+ text_norm = torch.LongTensor(text_norm)
78
+ return text_norm
79
 
80
  def jtts(text):
81
+ if jp_match.match(text):
82
+ stn_tst = get_text(japanese_phrase_cleaners(text), hps)
83
+ else:
84
+ stn_tst = get_text(text, hps)
85
  with torch.no_grad():
86
  x_tst = stn_tst.unsqueeze(0)
87
  x_tst_lengths = torch.LongTensor([stn_tst.size(0)])
 
89
  scipy.io.wavfile.write("out.wav", hps.data.sampling_rate, audio)
90
  return "./out.wav"
91
 
92
+ if __name__ == '__main__':
93
+ inputs = gr.inputs.Textbox(lines=5, label="Input Text")
94
+ outputs = gr.outputs.Audio(label="Output Audio")
95
+ gr.Interface(jtts, inputs, outputs, title=title, description=description, article=article, examples=examples).launch()
 
 
 
 
 
 
 
 
 
logs/ATR/G_74000.pth DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:03465e5b6a4de27bf15e59b714463f8cf5e188ac018f9225e5380b22645e1c2d
3
- size 436558785
 
 
 
 
requirements.txt CHANGED
@@ -8,4 +8,6 @@ tensorboard==2.3.0
8
  torch==1.6.0
9
  torchvision==0.7.0
10
  Unidecode==1.1.1
11
- torchtext
 
 
 
8
  torch==1.6.0
9
  torchvision==0.7.0
10
  Unidecode==1.1.1
11
+ pyopenjtalk
12
+ janome
13
+ regex