File size: 4,027 Bytes
d43ee7e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
96bc42b
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
import matplotlib.pyplot as plt

import os
import json
import math
import torch
from torch import nn
from torch.nn import functional as F
from torch.utils.data import DataLoader

import commons
import utils
from data_utils import TextAudioLoader, TextAudioCollate, TextAudioSpeakerLoader, TextAudioSpeakerCollate

import sys
from subprocess import call

def run_cmd(command):
    try:
        print(command)
        call(command, shell=True)
    except KeyboardInterrupt:
        print("Process interrupted")
        sys.exit(1)

current = os.getcwd()
print(current)
full = current + "/monotonic_align"
print(full)
os.chdir(full)
print(os.getcwd())
run_cmd("python3 setup.py build_ext --inplace")
run_cmd("apt-get install espeak -y")
os.chdir("..")
print(os.getcwd())

from models import SynthesizerTrn
from text.symbols import symbols
from text.cleaners import japanese_phrase_cleaners
from text import cleaned_text_to_sequence

from scipy.io.wavfile import write
import gradio as gr
import scipy.io.wavfile
import numpy as np
import re

jp_match = re.compile(r'.*?[ぁ|あ|ぃ|い|ぅ|う|ぇ|え|ぉ|お|か|が|き|ぎ|く|ぐ|け|げ|こ|ご|さ|ざ|し|じ|す|ず|せ|ぜ|そ|ぞ|た|だ|ち|ぢ|っ|つ|づ|て|で|と|ど|な|に|ぬ|ね|の|は|ば|ぱ|ひ|び|ぴ|ふ|ぶ|ぷ|へ|べ|ぺ|ほ|ぼ|ぽ|ま|み|む|め|も|ゃ|や|ゅ|ゆ|ょ|よ|ら|り|る|れ|ろ|ゎ|わ|ゐ|ゑ|を|ん|ゔ|ゕ|ゖ|ゝ|ゞ|ゟ|゠|ァ|ア|ィ|イ|ゥ|ウ|ェ|エ|ォ|オ|カ|ガ|キ|ギ|ク|グ|ケ|ゲ|コ|ゴ|サ|ザ|シ|ジ|ス|ズ|セ|ゼ|ソ|ゾ|タ|ダ|チ|ヂ|ッ|ツ|ヅ|テ|デ|ト|ド|ナ|ニ|ヌ|ネ|ノ|ハ|バ|パ|ヒ|ビ|ピ|フ|ブ|プ|ヘ|ベ|ペ|ホ|ボ|ポ|マ|ミ|ム|メ|モ|ャ|ヤ|ュ|ユ|ョ|ヨ|ラ|リ|ル|レ|ロ|ヮ|ワ|ヰ|ヱ|ヲ|ン|ヴ|ヵ|ヶ|ヷ|ヸ|ヹ|ヺ]+')

title = "VITS"
description = "demo for VITS: Conditional Variational Autoencoder with Adversarial Learning for End-to-End Text-to-Speech. To use it, simply add your text, or click one of the examples to load them. Read more at the links below."
article = "<p style='text-align: center'><a href='https://arxiv.org/abs/2106.06103'>Conditional Variational Autoencoder with Adversarial Learning for End-to-End Text-to-Speech</a> | <a href='https://github.com/jaywalnut310/vits'>Github Repo</a></p>"

examples = [
["原因不明の海面上昇によって、地表の多くが海に沈んだ近未来。"],
["幼い頃の事故によって片足を失った少年・斑鳩夏生は、都市での暮らしに見切りを付け、海辺の田舎町へと移り住んだ。"],
["身よりのない彼に遺されたのは、海洋地質学者だった祖母の船と潜水艇、そして借金。"],
["nanika acltara itsudemo hanashIte kudasai. gakuiNno kotojanaku, shijini kaNsuru kotodemo nanidemo."]
]

hps = utils.get_hparams_from_file("./configs/ATR.json")

net_g = SynthesizerTrn(
    len(symbols),
    hps.data.filter_length // 2 + 1,
    hps.train.segment_size // hps.data.hop_length,
    **hps.model)
_ = net_g.eval()

_ = utils.load_checkpoint("./G_172000.pth", net_g, None)


def get_text(text, hps):
    text_norm = cleaned_text_to_sequence(text)
    if hps.data.add_blank:
        text_norm = commons.intersperse(text_norm, 0)
    text_norm = torch.LongTensor(text_norm)
    return text_norm

def jtts(text):
  if jp_match.match(text):
    stn_tst = get_text(japanese_phrase_cleaners(text), hps)
  else:
    stn_tst = get_text(text, hps)
  with torch.no_grad():
    x_tst = stn_tst.unsqueeze(0)
    x_tst_lengths = torch.LongTensor([stn_tst.size(0)])
    audio = net_g.infer(x_tst, x_tst_lengths, noise_scale=.667, noise_scale_w=0.8, length_scale=1)[0][0,0].data.float().numpy()
    scipy.io.wavfile.write("out.wav", hps.data.sampling_rate, audio)
    return "./out.wav"

if __name__ == '__main__':
  inputs = gr.inputs.Textbox(lines=5, label="Input Text")
  outputs =  gr.outputs.Audio(label="Output Audio")
  gr.Interface(jtts, inputs, outputs, title=title, description=description, article=article, examples=examples).launch()