rippertnt commited on
Commit
8d763f2
1 Parent(s): 3abb8c1

Upload inference_ms_cpu.py

Browse files
Files changed (1) hide show
  1. inference_ms_cpu.py +80 -0
inference_ms_cpu.py ADDED
@@ -0,0 +1,80 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ## VCTK
2
+ import torch
3
+
4
+ import commons
5
+ import utils
6
+ from models import SynthesizerTrn
7
+ from text.symbols import symbols
8
+ from text import text_to_sequence
9
+ import time
10
+
11
+ from scipy.io.wavfile import write
12
+
13
+
14
+ def get_text(text, hps):
15
+ text_norm = text_to_sequence(text, hps.data.text_cleaners)
16
+ if hps.data.add_blank:
17
+ text_norm = commons.intersperse(text_norm, 0)
18
+ print(text, text_norm)
19
+ text_norm = torch.LongTensor(text_norm)
20
+ return text_norm
21
+
22
+ LANG = 'ru'
23
+ CONFIG_PATH = f"./configs/{LANG}_base.json"
24
+ MODEL_PATH = f"./logs/{LANG}_base/G_40000.pth"
25
+ #TEXT = "I am artificial intelligent voice made by circulus."
26
+ #TEXT = "저는 서큘러스의 AI Voice 모델입니다. 오늘도 즐거운하루 보내세요."
27
+ TEXT = "привет. Я президент Путин, и мне нравятся советские лидеры Сталин и Ленин."
28
+ #TEXT = "Xin chào. Tôi là Tổng thống Putin và tôi thích các nhà lãnh đạo Liên Xô Stalin và Lenin."
29
+ #TEXT = "สวัสดี. ผมเป็นประธานาธิบดีปูติน และผมชอบผู้นำโซเวียตอย่างสตาลินและเลนิน"
30
+ #TEXT = "Halo. Saya Presiden Putin, dan saya menyukai pemimpin Soviet Stalin dan Lenin."
31
+
32
+ hps = utils.get_hparams_from_file(CONFIG_PATH)
33
+
34
+ if (
35
+ "use_mel_posterior_encoder" in hps.model.keys()
36
+ and hps.model.use_mel_posterior_encoder == True
37
+ ):
38
+ print("Using mel posterior encoder for VITS2")
39
+ posterior_channels = 80 # vits2
40
+ hps.data.use_mel_posterior_encoder = True
41
+ else:
42
+ print("Using lin posterior encoder for VITS1")
43
+ posterior_channels = hps.data.filter_length // 2 + 1
44
+ hps.data.use_mel_posterior_encoder = False
45
+
46
+ net_g = SynthesizerTrn(
47
+ len(symbols),
48
+ posterior_channels,
49
+ hps.train.segment_size // hps.data.hop_length,
50
+ n_speakers=hps.data.n_speakers,
51
+ **hps.model
52
+ )
53
+ _ = net_g.eval()
54
+
55
+ _ = utils.load_checkpoint(MODEL_PATH, net_g, None)
56
+
57
+ stn_tst = get_text(TEXT, hps)
58
+
59
+ with torch.no_grad():
60
+
61
+ for i in range(0,hps.data.n_speakers):
62
+ start = time.time()
63
+ x_tst = stn_tst.unsqueeze(0)
64
+ x_tst_lengths = torch.LongTensor([stn_tst.size(0)])
65
+ sid = torch.LongTensor([i])
66
+ audio = (
67
+ net_g.infer(
68
+ x_tst,
69
+ x_tst_lengths,
70
+ sid=sid,
71
+ noise_scale=0.667,
72
+ noise_scale_w=0.8,
73
+ length_scale=1,
74
+ )[0][0, 0]
75
+ .data
76
+ .float()
77
+ .numpy()
78
+ )
79
+ print(i, time.time() - start)
80
+ write(data=audio, rate=hps.data.sampling_rate, filename=f"test_{LANG}_{i}.wav")