parquet-converter commited on
Commit
fbd9f6a
1 Parent(s): 1c4becd

Update parquet files

Browse files
LICENSE DELETED
@@ -1,21 +0,0 @@
1
- MIT License
2
-
3
- Copyright (c) 2021 Jingyi Li
4
-
5
- Permission is hereby granted, free of charge, to any person obtaining a copy
6
- of this software and associated documentation files (the "Software"), to deal
7
- in the Software without restriction, including without limitation the rights
8
- to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
- copies of the Software, and to permit persons to whom the Software is
10
- furnished to do so, subject to the following conditions:
11
-
12
- The above copyright notice and this permission notice shall be included in all
13
- copies or substantial portions of the Software.
14
-
15
- THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
- IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
- FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
- AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
- LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
- OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
- SOFTWARE.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
README.md DELETED
@@ -1,12 +0,0 @@
1
- ---
2
- title: Sovits4.0 V2
3
- emoji: 📚
4
- colorFrom: blue
5
- colorTo: purple
6
- sdk: gradio
7
- sdk_version: 3.19.1
8
- app_file: app.py
9
- pinned: false
10
- ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
app.py DELETED
@@ -1,76 +0,0 @@
1
- import io
2
- import os
3
-
4
- os.system("wget -P hubert/ https://huggingface.co/spaces/innnky/nanami/resolve/main/checkpoint_best_legacy_500.pt")
5
- import gradio as gr
6
- import librosa
7
- import numpy as np
8
- import soundfile
9
- from inference.infer_tool import Svc
10
- import logging
11
-
12
- logging.getLogger('numba').setLevel(logging.WARNING)
13
- logging.getLogger('markdown_it').setLevel(logging.WARNING)
14
- logging.getLogger('urllib3').setLevel(logging.WARNING)
15
- logging.getLogger('matplotlib').setLevel(logging.WARNING)
16
-
17
- model = Svc("logs/44k/G_0.pth", "configs/config.json", cluster_model_path="logs/44k/kmeans_10000.pt")
18
-
19
-
20
-
21
- def vc_fn(sid, input_audio, vc_transform, auto_f0,cluster_ratio, noise_scale):
22
- if input_audio is None:
23
- return "You need to upload an audio", None
24
- sampling_rate, audio = input_audio
25
- # print(audio.shape,sampling_rate)
26
- duration = audio.shape[0] / sampling_rate
27
- if duration > 45:
28
- return "请上传小于45s的音频,需要转换长音频请本地进行转换", None
29
- audio = (audio / np.iinfo(audio.dtype).max).astype(np.float32)
30
- if len(audio.shape) > 1:
31
- audio = librosa.to_mono(audio.transpose(1, 0))
32
- if sampling_rate != 16000:
33
- audio = librosa.resample(audio, orig_sr=sampling_rate, target_sr=16000)
34
- print(audio.shape)
35
- out_wav_path = "temp.wav"
36
- soundfile.write(out_wav_path, audio, 16000, format="wav")
37
- print( cluster_ratio, auto_f0, noise_scale)
38
- out_audio, out_sr = model.infer(sid, vc_transform, out_wav_path,
39
- cluster_infer_ratio=cluster_ratio,
40
- auto_predict_f0=auto_f0,
41
- noice_scale=noise_scale
42
- )
43
- audio = out_audio.numpy()
44
- rms = librosa.feature.rms(audio, frame_length=2048, hop_length=512)[0]
45
- target_rms = 0.1
46
- current_rms = np.mean(rms)
47
- gain = target_rms / current_rms
48
- audio *= gain
49
- return "Success", (44100, audio)
50
-
51
-
52
- app = gr.Blocks()
53
- with app:
54
- with gr.Tabs():
55
- with gr.TabItem("Basic"):
56
- gr.Markdown(value="""
57
- sovits4.0 在线demo
58
-
59
- 此demo为预训练底模在线demo,使用数据:云灏 即霜 辉宇·星AI 派蒙 绫地宁宁
60
- """)
61
- spks = list(model.spk2id.keys())
62
- sid = gr.Dropdown(label="音色", choices=["nen", "yunhao","paimon", "huiyu","jishuang"], value="paimon")
63
- vc_input3 = gr.Audio(label="上传音频(长度小于45秒)")
64
- vc_transform = gr.Number(label="变调(整数,可以正负,半音数量,升高八度就是12)", value=0)
65
- cluster_ratio = gr.Number(label="聚类模型混合比例,0-1之间,默认为0不启用聚类,能提升音色相似度,但会导致咬字下降(如果使用建议0.5左右)", value=0)
66
- auto_f0 = gr.Checkbox(label="自动f0预测,配合聚类模型f0预测效果更好,会导致变调功能失效(仅限转换语音,歌声不要勾选此项会究极跑调)", value=False)
67
- noise_scale = gr.Number(label="noise_scale 建议不要动,会影响音质,玄学参数", value=0.4)
68
- vc_submit = gr.Button("转换", variant="primary")
69
- vc_output1 = gr.Textbox(label="Output Message")
70
- vc_output2 = gr.Audio(label="Output Audio")
71
- vc_submit.click(vc_fn, [sid, vc_input3, vc_transform,auto_f0,cluster_ratio, noise_scale], [vc_output1, vc_output2])
72
-
73
- app.launch()
74
-
75
-
76
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
cluster/__init__.py DELETED
@@ -1,29 +0,0 @@
1
- import numpy as np
2
- import torch
3
- from sklearn.cluster import KMeans
4
-
5
- def get_cluster_model(ckpt_path):
6
- checkpoint = torch.load(ckpt_path)
7
- kmeans_dict = {}
8
- for spk, ckpt in checkpoint.items():
9
- km = KMeans(ckpt["n_features_in_"])
10
- km.__dict__["n_features_in_"] = ckpt["n_features_in_"]
11
- km.__dict__["_n_threads"] = ckpt["_n_threads"]
12
- km.__dict__["cluster_centers_"] = ckpt["cluster_centers_"]
13
- kmeans_dict[spk] = km
14
- return kmeans_dict
15
-
16
- def get_cluster_result(model, x, speaker):
17
- """
18
- x: np.array [t, 256]
19
- return cluster class result
20
- """
21
- return model[speaker].predict(x)
22
-
23
- def get_cluster_center_result(model, x,speaker):
24
- """x: np.array [t, 256]"""
25
- predict = model[speaker].predict(x)
26
- return model[speaker].cluster_centers_[predict]
27
-
28
- def get_center(model, x,speaker):
29
- return model[speaker].cluster_centers_[x]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
cluster/train_cluster.py DELETED
@@ -1,89 +0,0 @@
1
- import os
2
- from glob import glob
3
- from pathlib import Path
4
- import torch
5
- import logging
6
- import argparse
7
- import torch
8
- import numpy as np
9
- from sklearn.cluster import KMeans, MiniBatchKMeans
10
- import tqdm
11
- logging.basicConfig(level=logging.INFO)
12
- logger = logging.getLogger(__name__)
13
- import time
14
- import random
15
-
16
- def train_cluster(in_dir, n_clusters, use_minibatch=True, verbose=False):
17
-
18
- logger.info(f"Loading features from {in_dir}")
19
- features = []
20
- nums = 0
21
- for path in tqdm.tqdm(in_dir.glob("*.soft.pt")):
22
- features.append(torch.load(path).squeeze(0).numpy().T)
23
- # print(features[-1].shape)
24
- features = np.concatenate(features, axis=0)
25
- print(nums, features.nbytes/ 1024**2, "MB , shape:",features.shape, features.dtype)
26
- features = features.astype(np.float32)
27
- logger.info(f"Clustering features of shape: {features.shape}")
28
- t = time.time()
29
- if use_minibatch:
30
- kmeans = MiniBatchKMeans(n_clusters=n_clusters,verbose=verbose, batch_size=4096, max_iter=80).fit(features)
31
- else:
32
- kmeans = KMeans(n_clusters=n_clusters,verbose=verbose).fit(features)
33
- print(time.time()-t, "s")
34
-
35
- x = {
36
- "n_features_in_": kmeans.n_features_in_,
37
- "_n_threads": kmeans._n_threads,
38
- "cluster_centers_": kmeans.cluster_centers_,
39
- }
40
- print("end")
41
-
42
- return x
43
-
44
-
45
- if __name__ == "__main__":
46
-
47
- parser = argparse.ArgumentParser()
48
- parser.add_argument('--dataset', type=Path, default="./dataset/44k",
49
- help='path of training data directory')
50
- parser.add_argument('--output', type=Path, default="logs/44k",
51
- help='path of model output directory')
52
-
53
- args = parser.parse_args()
54
-
55
- checkpoint_dir = args.output
56
- dataset = args.dataset
57
- n_clusters = 10000
58
-
59
- ckpt = {}
60
- for spk in os.listdir(dataset):
61
- if os.path.isdir(dataset/spk):
62
- print(f"train kmeans for {spk}...")
63
- in_dir = dataset/spk
64
- x = train_cluster(in_dir, n_clusters, verbose=False)
65
- ckpt[spk] = x
66
-
67
- checkpoint_path = checkpoint_dir / f"kmeans_{n_clusters}.pt"
68
- checkpoint_path.parent.mkdir(exist_ok=True, parents=True)
69
- torch.save(
70
- ckpt,
71
- checkpoint_path,
72
- )
73
-
74
-
75
- # import cluster
76
- # for spk in tqdm.tqdm(os.listdir("dataset")):
77
- # if os.path.isdir(f"dataset/{spk}"):
78
- # print(f"start kmeans inference for {spk}...")
79
- # for feature_path in tqdm.tqdm(glob(f"dataset/{spk}/*.discrete.npy", recursive=True)):
80
- # mel_path = feature_path.replace(".discrete.npy",".mel.npy")
81
- # mel_spectrogram = np.load(mel_path)
82
- # feature_len = mel_spectrogram.shape[-1]
83
- # c = np.load(feature_path)
84
- # c = utils.tools.repeat_expand_2d(torch.FloatTensor(c), feature_len).numpy()
85
- # feature = c.T
86
- # feature_class = cluster.get_cluster_result(feature, spk)
87
- # np.save(feature_path.replace(".discrete.npy", ".discrete_class.npy"), feature_class)
88
-
89
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
configs/config.json DELETED
@@ -1,106 +0,0 @@
1
- {
2
- "train": {
3
- "log_interval": 50,
4
- "eval_interval": 1000,
5
- "seed": 1234,
6
- "port": 8001,
7
- "epochs": 10000,
8
- "learning_rate": 0.0002,
9
- "betas": [
10
- 0.8,
11
- 0.99
12
- ],
13
- "eps": 1e-09,
14
- "batch_size": 6,
15
- "accumulation_steps": 1,
16
- "fp16_run": false,
17
- "lr_decay": 0.998,
18
- "segment_size": 10240,
19
- "init_lr_ratio": 1,
20
- "warmup_epochs": 0,
21
- "c_mel": 45,
22
- "keep_ckpts":4
23
- },
24
- "data": {
25
- "data_dir": "dataset",
26
- "dataset_type": "SingDataset",
27
- "collate_type": "SingCollate",
28
- "training_filelist": "filelists/train-Copy1.txt",
29
- "validation_filelist": "filelists/val-Copy1.txt",
30
- "max_wav_value": 32768.0,
31
- "sampling_rate": 44100,
32
- "n_fft": 2048,
33
- "fmin": 0,
34
- "fmax": 22050,
35
- "hop_length": 512,
36
- "win_size": 2048,
37
- "acoustic_dim": 80,
38
- "c_dim": 256,
39
- "min_level_db": -115,
40
- "ref_level_db": 20,
41
- "min_db": -115,
42
- "max_abs_value": 4.0,
43
- "n_speakers": 200
44
- },
45
- "model": {
46
- "hidden_channels": 192,
47
- "spk_channels": 192,
48
- "filter_channels": 768,
49
- "n_heads": 2,
50
- "n_layers": 4,
51
- "kernel_size": 3,
52
- "p_dropout": 0.1,
53
- "prior_hidden_channels": 192,
54
- "prior_filter_channels": 768,
55
- "prior_n_heads": 2,
56
- "prior_n_layers": 4,
57
- "prior_kernel_size": 3,
58
- "prior_p_dropout": 0.1,
59
- "resblock": "1",
60
- "use_spectral_norm": false,
61
- "resblock_kernel_sizes": [
62
- 3,
63
- 7,
64
- 11
65
- ],
66
- "resblock_dilation_sizes": [
67
- [
68
- 1,
69
- 3,
70
- 5
71
- ],
72
- [
73
- 1,
74
- 3,
75
- 5
76
- ],
77
- [
78
- 1,
79
- 3,
80
- 5
81
- ]
82
- ],
83
- "upsample_rates": [
84
- 8,
85
- 8,
86
- 4,
87
- 2
88
- ],
89
- "upsample_initial_channel": 256,
90
- "upsample_kernel_sizes": [
91
- 16,
92
- 16,
93
- 8,
94
- 4
95
- ],
96
- "n_harmonic": 64,
97
- "n_bands": 65
98
- },
99
- "spk": {
100
- "jishuang": 0,
101
- "huiyu": 1,
102
- "nen": 2,
103
- "paimon": 3,
104
- "yunhao": 4
105
- }
106
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
data_utils.py DELETED
@@ -1,329 +0,0 @@
1
- import os
2
- import sys
3
- import string
4
- import random
5
- import numpy as np
6
- import math
7
- import json
8
- from torch.utils.data import DataLoader
9
- import torch
10
-
11
- import utils
12
- from modules import audio
13
-
14
- sys.path.append('../..')
15
- from utils import load_wav
16
-
17
-
18
- class BaseDataset(torch.utils.data.Dataset):
19
-
20
- def __init__(self, hparams, fileid_list_path):
21
- self.hparams = hparams
22
- self.fileid_list = self.get_fileid_list(fileid_list_path)
23
- random.seed(hparams.train.seed)
24
- random.shuffle(self.fileid_list)
25
- if (hparams.data.n_speakers > 0):
26
- self.spk2id = hparams.spk
27
-
28
- def get_fileid_list(self, fileid_list_path):
29
- fileid_list = []
30
- with open(fileid_list_path, 'r') as f:
31
- for line in f.readlines():
32
- fileid_list.append(line.strip())
33
-
34
- return fileid_list
35
-
36
- def __len__(self):
37
- return len(self.fileid_list)
38
-
39
-
40
- class SingDataset(BaseDataset):
41
- def __init__(self, hparams, data_dir, fileid_list_path):
42
- BaseDataset.__init__(self, hparams, fileid_list_path)
43
- self.hps = hparams
44
- self.data_dir = data_dir
45
- # self.__filter__()
46
-
47
- def __filter__(self):
48
- new_fileid_list= []
49
- for wav_path in self.fileid_list:
50
- # mel_path = wav_path + ".mel.npy"
51
- # mel = np.load(mel_path)
52
- # if mel.shape[0] < 60:
53
- # print("skip short audio:", wav_path)
54
- # continue
55
- # if mel.shape[0] > 800:
56
- # print("skip long audio:", wav_path)
57
- # continue
58
- # assert mel.shape[1] == 80
59
- new_fileid_list.append(wav_path)
60
- print("original length:", len(self.fileid_list))
61
- print("filtered length:", len(new_fileid_list))
62
- self.fileid_list = new_fileid_list
63
-
64
- def interpolate_f0(self, data):
65
- '''
66
- 对F0进行插值处理
67
- '''
68
- data = np.reshape(data, (data.size, 1))
69
-
70
- vuv_vector = np.zeros((data.size, 1), dtype=np.float32)
71
- vuv_vector[data > 0.0] = 1.0
72
- vuv_vector[data <= 0.0] = 0.0
73
-
74
- ip_data = data
75
-
76
- frame_number = data.size
77
- last_value = 0.0
78
- for i in range(frame_number):
79
- if data[i] <= 0.0:
80
- j = i + 1
81
- for j in range(i + 1, frame_number):
82
- if data[j] > 0.0:
83
- break
84
- if j < frame_number - 1:
85
- if last_value > 0.0:
86
- step = (data[j] - data[i - 1]) / float(j - i)
87
- for k in range(i, j):
88
- ip_data[k] = data[i - 1] + step * (k - i + 1)
89
- else:
90
- for k in range(i, j):
91
- ip_data[k] = data[j]
92
- else:
93
- for k in range(i, frame_number):
94
- ip_data[k] = last_value
95
- else:
96
- ip_data[i] = data[i]
97
- last_value = data[i]
98
-
99
- return ip_data, vuv_vector
100
-
101
- def parse_label(self, pho, pitchid, dur, slur, gtdur):
102
- phos = []
103
- pitchs = []
104
- durs = []
105
- slurs = []
106
- gtdurs = []
107
-
108
- for index in range(len(pho.split())):
109
- phos.append(npu.symbol_converter.ttsing_phone_to_int[pho.strip().split()[index]])
110
- pitchs.append(0)
111
- durs.append(0)
112
- slurs.append(0)
113
- gtdurs.append(float(gtdur.strip().split()[index]))
114
-
115
- phos = np.asarray(phos, dtype=np.int32)
116
- pitchs = np.asarray(pitchs, dtype=np.int32)
117
- durs = np.asarray(durs, dtype=np.float32)
118
- slurs = np.asarray(slurs, dtype=np.int32)
119
- gtdurs = np.asarray(gtdurs, dtype=np.float32)
120
-
121
- acc_duration = np.cumsum(gtdurs)
122
- acc_duration = np.pad(acc_duration, (1, 0), 'constant', constant_values=(0,))
123
- acc_duration_frames = np.ceil(acc_duration / (self.hps.data.hop_length / self.hps.data.sampling_rate))
124
- gtdurs = acc_duration_frames[1:] - acc_duration_frames[:-1]
125
-
126
- # new_phos = []
127
- # new_gtdurs=[]
128
- # for ph, dur in zip(phos, gtdurs):
129
- # for i in range(int(dur)):
130
- # new_phos.append(ph)
131
- # new_gtdurs.append(1)
132
-
133
- phos = torch.LongTensor(phos)
134
- pitchs = torch.LongTensor(pitchs)
135
- durs = torch.FloatTensor(durs)
136
- slurs = torch.LongTensor(slurs)
137
- gtdurs = torch.LongTensor(gtdurs)
138
- return phos, pitchs, durs, slurs, gtdurs
139
-
140
- def __getitem__(self, index):
141
- wav_path = self.fileid_list[index]
142
-
143
- spk = wav_path.split('/')[-2]
144
- spkid = self.spk2id[spk]
145
-
146
- wav = load_wav(wav_path,
147
- raw_sr=self.hparams.data.sampling_rate,
148
- target_sr=self.hparams.data.sampling_rate,
149
- win_size=self.hparams.data.win_size,
150
- hop_size=self.hparams.data.hop_length)
151
-
152
- mel_path = wav_path + ".mel.npy"
153
- if not os.path.exists(mel_path):
154
- mel = audio.melspectrogram(wav, self.hparams.data).astype(np.float32).T
155
- np.save(mel_path, mel)
156
- else:
157
- mel = np.load(mel_path)
158
-
159
- if mel.shape[0] < 30:
160
- print("skip short audio:", self.fileid_list[index])
161
- return None
162
- assert mel.shape[1] == 80
163
- mel = torch.FloatTensor(mel).transpose(0, 1)
164
-
165
- f0_path = wav_path + ".f0.npy"
166
- f0 = np.load(f0_path)
167
- assert abs(f0.shape[0]-mel.shape[1]) < 2, (f0.shape ,mel.shape)
168
- sum_dur = min(f0.shape[0], mel.shape[1])
169
- f0 = f0[:sum_dur]
170
- mel = mel[:, :sum_dur]
171
-
172
- f0, uv = self.interpolate_f0(f0)
173
- f0 = f0.reshape([-1])
174
- f0 = torch.FloatTensor(f0).reshape([1, -1])
175
-
176
- uv = uv.reshape([-1])
177
- uv = torch.FloatTensor(uv).reshape([1, -1])
178
-
179
- wav = wav.reshape(-1)
180
- if (wav.shape[0] != sum_dur * self.hparams.data.hop_length):
181
- if (abs(wav.shape[0] - sum_dur * self.hparams.data.hop_length) > 3 * self.hparams.data.hop_length):
182
- print("dataset error wav : ", wav.shape, sum_dur)
183
- return None
184
- if (wav.shape[0] > sum_dur * self.hparams.data.hop_length):
185
- wav = wav[:sum_dur * self.hparams.data.hop_length]
186
- else:
187
- wav = np.concatenate([wav, np.zeros([sum_dur * self.hparams.data.hop_length - wav.shape[0]])], axis=0)
188
- wav = torch.FloatTensor(wav).reshape([1, -1])
189
-
190
- c_path = wav_path + ".soft.pt"
191
- c = torch.load(c_path)
192
- c = utils.repeat_expand_2d(c.squeeze(0), sum_dur)
193
-
194
- assert f0.shape[1] == mel.shape[1]
195
-
196
- if mel.shape[1] > 800:
197
- start = random.randint(0, mel.shape[1]-800)
198
- end = start + 790
199
- mel = mel[:, start:end]
200
- f0 = f0[:, start:end]
201
- uv = uv[:, start:end]
202
- c = c[:, start:end]
203
- wav = wav[:, start*self.hparams.data.hop_length:end*self.hparams.data.hop_length]
204
- return c, mel, f0, wav, spkid, uv
205
-
206
-
207
- class SingCollate():
208
-
209
- def __init__(self, hparams):
210
- self.hparams = hparams
211
- self.mel_dim = self.hparams.data.acoustic_dim
212
-
213
- def __call__(self, batch):
214
- batch = [b for b in batch if b is not None]
215
-
216
- input_lengths, ids_sorted_decreasing = torch.sort(
217
- torch.LongTensor([len(x[0]) for x in batch]),
218
- dim=0, descending=True)
219
-
220
- max_c_len = max([x[0].size(1) for x in batch])
221
- max_mel_len = max([x[1].size(1) for x in batch])
222
- max_f0_len = max([x[2].size(1) for x in batch])
223
- max_wav_len = max([x[3].size(1) for x in batch])
224
-
225
- c_lengths = torch.LongTensor(len(batch))
226
- mel_lengths = torch.LongTensor(len(batch))
227
- f0_lengths = torch.LongTensor(len(batch))
228
- wav_lengths = torch.LongTensor(len(batch))
229
-
230
- c_padded = torch.FloatTensor(len(batch), self.hparams.data.c_dim, max_mel_len)
231
- mel_padded = torch.FloatTensor(len(batch), self.hparams.data.acoustic_dim, max_mel_len)
232
- f0_padded = torch.FloatTensor(len(batch), 1, max_f0_len)
233
- uv_padded = torch.FloatTensor(len(batch), 1, max_f0_len)
234
- wav_padded = torch.FloatTensor(len(batch), 1, max_wav_len)
235
- spkids = torch.LongTensor(len(batch))
236
-
237
- c_padded.zero_()
238
- mel_padded.zero_()
239
- f0_padded.zero_()
240
- uv_padded.zero_()
241
- wav_padded.zero_()
242
-
243
- for i in range(len(ids_sorted_decreasing)):
244
- row = batch[ids_sorted_decreasing[i]]
245
-
246
- c = row[0]
247
- c_padded[i, :, :c.size(1)] = c
248
- c_lengths[i] = c.size(1)
249
-
250
- mel = row[1]
251
- mel_padded[i, :, :mel.size(1)] = mel
252
- mel_lengths[i] = mel.size(1)
253
-
254
- f0 = row[2]
255
- f0_padded[i, :, :f0.size(1)] = f0
256
- f0_lengths[i] = f0.size(1)
257
-
258
- wav = row[3]
259
- wav_padded[i, :, :wav.size(1)] = wav
260
- wav_lengths[i] = wav.size(1)
261
-
262
- spkids[i] = row[4]
263
-
264
- uv = row[5]
265
- uv_padded[i, :, :uv.size(1)] = uv
266
-
267
-
268
- data_dict = {}
269
-
270
- data_dict["c"] = c_padded
271
- data_dict["mel"] = mel_padded
272
- data_dict["f0"] = f0_padded
273
- data_dict["uv"] = uv_padded
274
- data_dict["wav"] = wav_padded
275
-
276
- data_dict["c_lengths"] = c_lengths
277
- data_dict["mel_lengths"] = mel_lengths
278
- data_dict["f0_lengths"] = f0_lengths
279
- data_dict["wav_lengths"] = wav_lengths
280
- data_dict["spkid"] = spkids
281
-
282
- return data_dict
283
-
284
-
285
- class DatasetConstructor():
286
-
287
- def __init__(self, hparams, num_replicas=1, rank=1):
288
- self.hparams = hparams
289
- self.num_replicas = num_replicas
290
- self.rank = rank
291
- self.dataset_function = {"SingDataset": SingDataset}
292
- self.collate_function = {"SingCollate": SingCollate}
293
- self._get_components()
294
-
295
- def _get_components(self):
296
- self._init_datasets()
297
- self._init_collate()
298
- self._init_data_loaders()
299
-
300
- def _init_datasets(self):
301
- self._train_dataset = self.dataset_function[self.hparams.data.dataset_type](self.hparams,
302
- self.hparams.data.data_dir,
303
- self.hparams.data.training_filelist)
304
- self._valid_dataset = self.dataset_function[self.hparams.data.dataset_type](self.hparams,
305
- self.hparams.data.data_dir,
306
- self.hparams.data.validation_filelist)
307
-
308
- def _init_collate(self):
309
- self._collate_fn = self.collate_function[self.hparams.data.collate_type](self.hparams)
310
-
311
- def _init_data_loaders(self):
312
- train_sampler = torch.utils.data.distributed.DistributedSampler(self._train_dataset,
313
- num_replicas=self.num_replicas, rank=self.rank,
314
- shuffle=True)
315
-
316
- self.train_loader = DataLoader(self._train_dataset, num_workers=4, shuffle=False,
317
- batch_size=self.hparams.train.batch_size, pin_memory=True,
318
- drop_last=True, collate_fn=self._collate_fn, sampler=train_sampler)
319
-
320
- self.valid_loader = DataLoader(self._valid_dataset, num_workers=1, shuffle=False,
321
- batch_size=1, pin_memory=True,
322
- drop_last=True, collate_fn=self._collate_fn)
323
-
324
- def get_train_loader(self):
325
- return self.train_loader
326
-
327
- def get_valid_loader(self):
328
- return self.valid_loader
329
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
filelists/test.txt DELETED
@@ -1,4 +0,0 @@
1
- ./dataset/44k/taffy/000562.wav
2
- ./dataset/44k/nyaru/000011.wav
3
- ./dataset/44k/nyaru/000008.wav
4
- ./dataset/44k/taffy/000563.wav
 
 
 
 
 
filelists/train.txt DELETED
@@ -1,15 +0,0 @@
1
- ./dataset/44k/taffy/000549.wav
2
- ./dataset/44k/nyaru/000004.wav
3
- ./dataset/44k/nyaru/000006.wav
4
- ./dataset/44k/taffy/000551.wav
5
- ./dataset/44k/nyaru/000009.wav
6
- ./dataset/44k/taffy/000561.wav
7
- ./dataset/44k/nyaru/000001.wav
8
- ./dataset/44k/taffy/000553.wav
9
- ./dataset/44k/nyaru/000002.wav
10
- ./dataset/44k/taffy/000560.wav
11
- ./dataset/44k/taffy/000557.wav
12
- ./dataset/44k/nyaru/000005.wav
13
- ./dataset/44k/taffy/000554.wav
14
- ./dataset/44k/taffy/000550.wav
15
- ./dataset/44k/taffy/000559.wav
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
filelists/val.txt DELETED
@@ -1,4 +0,0 @@
1
- ./dataset/44k/nyaru/000003.wav
2
- ./dataset/44k/nyaru/000007.wav
3
- ./dataset/44k/taffy/000558.wav
4
- ./dataset/44k/taffy/000556.wav
 
 
 
 
 
hubert/__init__.py DELETED
File without changes
hubert/hubert_model.py DELETED
@@ -1,222 +0,0 @@
1
- import copy
2
- import random
3
- from typing import Optional, Tuple
4
-
5
- import torch
6
- import torch.nn as nn
7
- import torch.nn.functional as t_func
8
- from torch.nn.modules.utils import consume_prefix_in_state_dict_if_present
9
-
10
-
11
- class Hubert(nn.Module):
12
- def __init__(self, num_label_embeddings: int = 100, mask: bool = True):
13
- super().__init__()
14
- self._mask = mask
15
- self.feature_extractor = FeatureExtractor()
16
- self.feature_projection = FeatureProjection()
17
- self.positional_embedding = PositionalConvEmbedding()
18
- self.norm = nn.LayerNorm(768)
19
- self.dropout = nn.Dropout(0.1)
20
- self.encoder = TransformerEncoder(
21
- nn.TransformerEncoderLayer(
22
- 768, 12, 3072, activation="gelu", batch_first=True
23
- ),
24
- 12,
25
- )
26
- self.proj = nn.Linear(768, 256)
27
-
28
- self.masked_spec_embed = nn.Parameter(torch.FloatTensor(768).uniform_())
29
- self.label_embedding = nn.Embedding(num_label_embeddings, 256)
30
-
31
- def mask(self, x: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
32
- mask = None
33
- if self.training and self._mask:
34
- mask = _compute_mask((x.size(0), x.size(1)), 0.8, 10, x.device, 2)
35
- x[mask] = self.masked_spec_embed.to(x.dtype)
36
- return x, mask
37
-
38
- def encode(
39
- self, x: torch.Tensor, layer: Optional[int] = None
40
- ) -> Tuple[torch.Tensor, torch.Tensor]:
41
- x = self.feature_extractor(x)
42
- x = self.feature_projection(x.transpose(1, 2))
43
- x, mask = self.mask(x)
44
- x = x + self.positional_embedding(x)
45
- x = self.dropout(self.norm(x))
46
- x = self.encoder(x, output_layer=layer)
47
- return x, mask
48
-
49
- def logits(self, x: torch.Tensor) -> torch.Tensor:
50
- logits = torch.cosine_similarity(
51
- x.unsqueeze(2),
52
- self.label_embedding.weight.unsqueeze(0).unsqueeze(0),
53
- dim=-1,
54
- )
55
- return logits / 0.1
56
-
57
- def forward(self, x: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
58
- x, mask = self.encode(x)
59
- x = self.proj(x)
60
- logits = self.logits(x)
61
- return logits, mask
62
-
63
-
64
- class HubertSoft(Hubert):
65
- def __init__(self):
66
- super().__init__()
67
-
68
- @torch.inference_mode()
69
- def units(self, wav: torch.Tensor) -> torch.Tensor:
70
- wav = t_func.pad(wav, ((400 - 320) // 2, (400 - 320) // 2))
71
- x, _ = self.encode(wav)
72
- return self.proj(x)
73
-
74
-
75
- class FeatureExtractor(nn.Module):
76
- def __init__(self):
77
- super().__init__()
78
- self.conv0 = nn.Conv1d(1, 512, 10, 5, bias=False)
79
- self.norm0 = nn.GroupNorm(512, 512)
80
- self.conv1 = nn.Conv1d(512, 512, 3, 2, bias=False)
81
- self.conv2 = nn.Conv1d(512, 512, 3, 2, bias=False)
82
- self.conv3 = nn.Conv1d(512, 512, 3, 2, bias=False)
83
- self.conv4 = nn.Conv1d(512, 512, 3, 2, bias=False)
84
- self.conv5 = nn.Conv1d(512, 512, 2, 2, bias=False)
85
- self.conv6 = nn.Conv1d(512, 512, 2, 2, bias=False)
86
-
87
- def forward(self, x: torch.Tensor) -> torch.Tensor:
88
- x = t_func.gelu(self.norm0(self.conv0(x)))
89
- x = t_func.gelu(self.conv1(x))
90
- x = t_func.gelu(self.conv2(x))
91
- x = t_func.gelu(self.conv3(x))
92
- x = t_func.gelu(self.conv4(x))
93
- x = t_func.gelu(self.conv5(x))
94
- x = t_func.gelu(self.conv6(x))
95
- return x
96
-
97
-
98
- class FeatureProjection(nn.Module):
99
- def __init__(self):
100
- super().__init__()
101
- self.norm = nn.LayerNorm(512)
102
- self.projection = nn.Linear(512, 768)
103
- self.dropout = nn.Dropout(0.1)
104
-
105
- def forward(self, x: torch.Tensor) -> torch.Tensor:
106
- x = self.norm(x)
107
- x = self.projection(x)
108
- x = self.dropout(x)
109
- return x
110
-
111
-
112
- class PositionalConvEmbedding(nn.Module):
113
- def __init__(self):
114
- super().__init__()
115
- self.conv = nn.Conv1d(
116
- 768,
117
- 768,
118
- kernel_size=128,
119
- padding=128 // 2,
120
- groups=16,
121
- )
122
- self.conv = nn.utils.weight_norm(self.conv, name="weight", dim=2)
123
-
124
- def forward(self, x: torch.Tensor) -> torch.Tensor:
125
- x = self.conv(x.transpose(1, 2))
126
- x = t_func.gelu(x[:, :, :-1])
127
- return x.transpose(1, 2)
128
-
129
-
130
- class TransformerEncoder(nn.Module):
131
- def __init__(
132
- self, encoder_layer: nn.TransformerEncoderLayer, num_layers: int
133
- ) -> None:
134
- super(TransformerEncoder, self).__init__()
135
- self.layers = nn.ModuleList(
136
- [copy.deepcopy(encoder_layer) for _ in range(num_layers)]
137
- )
138
- self.num_layers = num_layers
139
-
140
- def forward(
141
- self,
142
- src: torch.Tensor,
143
- mask: torch.Tensor = None,
144
- src_key_padding_mask: torch.Tensor = None,
145
- output_layer: Optional[int] = None,
146
- ) -> torch.Tensor:
147
- output = src
148
- for layer in self.layers[:output_layer]:
149
- output = layer(
150
- output, src_mask=mask, src_key_padding_mask=src_key_padding_mask
151
- )
152
- return output
153
-
154
-
155
- def _compute_mask(
156
- shape: Tuple[int, int],
157
- mask_prob: float,
158
- mask_length: int,
159
- device: torch.device,
160
- min_masks: int = 0,
161
- ) -> torch.Tensor:
162
- batch_size, sequence_length = shape
163
-
164
- if mask_length < 1:
165
- raise ValueError("`mask_length` has to be bigger than 0.")
166
-
167
- if mask_length > sequence_length:
168
- raise ValueError(
169
- f"`mask_length` has to be smaller than `sequence_length`, but got `mask_length`: {mask_length} and `sequence_length`: {sequence_length}`"
170
- )
171
-
172
- # compute number of masked spans in batch
173
- num_masked_spans = int(mask_prob * sequence_length / mask_length + random.random())
174
- num_masked_spans = max(num_masked_spans, min_masks)
175
-
176
- # make sure num masked indices <= sequence_length
177
- if num_masked_spans * mask_length > sequence_length:
178
- num_masked_spans = sequence_length // mask_length
179
-
180
- # SpecAugment mask to fill
181
- mask = torch.zeros((batch_size, sequence_length), device=device, dtype=torch.bool)
182
-
183
- # uniform distribution to sample from, make sure that offset samples are < sequence_length
184
- uniform_dist = torch.ones(
185
- (batch_size, sequence_length - (mask_length - 1)), device=device
186
- )
187
-
188
- # get random indices to mask
189
- mask_indices = torch.multinomial(uniform_dist, num_masked_spans)
190
-
191
- # expand masked indices to masked spans
192
- mask_indices = (
193
- mask_indices.unsqueeze(dim=-1)
194
- .expand((batch_size, num_masked_spans, mask_length))
195
- .reshape(batch_size, num_masked_spans * mask_length)
196
- )
197
- offsets = (
198
- torch.arange(mask_length, device=device)[None, None, :]
199
- .expand((batch_size, num_masked_spans, mask_length))
200
- .reshape(batch_size, num_masked_spans * mask_length)
201
- )
202
- mask_idxs = mask_indices + offsets
203
-
204
- # scatter indices to mask
205
- mask = mask.scatter(1, mask_idxs, True)
206
-
207
- return mask
208
-
209
-
210
- def hubert_soft(
211
- path: str,
212
- ) -> HubertSoft:
213
- r"""HuBERT-Soft from `"A Comparison of Discrete and Soft Speech Units for Improved Voice Conversion"`.
214
- Args:
215
- path (str): path of a pretrained model
216
- """
217
- hubert = HubertSoft()
218
- checkpoint = torch.load(path)
219
- consume_prefix_in_state_dict_if_present(checkpoint, "module.")
220
- hubert.load_state_dict(checkpoint)
221
- hubert.eval()
222
- return hubert
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
hubert/hubert_model_onnx.py DELETED
@@ -1,217 +0,0 @@
1
- import copy
2
- import random
3
- from typing import Optional, Tuple
4
-
5
- import torch
6
- import torch.nn as nn
7
- import torch.nn.functional as t_func
8
- from torch.nn.modules.utils import consume_prefix_in_state_dict_if_present
9
-
10
-
11
- class Hubert(nn.Module):
12
- def __init__(self, num_label_embeddings: int = 100, mask: bool = True):
13
- super().__init__()
14
- self._mask = mask
15
- self.feature_extractor = FeatureExtractor()
16
- self.feature_projection = FeatureProjection()
17
- self.positional_embedding = PositionalConvEmbedding()
18
- self.norm = nn.LayerNorm(768)
19
- self.dropout = nn.Dropout(0.1)
20
- self.encoder = TransformerEncoder(
21
- nn.TransformerEncoderLayer(
22
- 768, 12, 3072, activation="gelu", batch_first=True
23
- ),
24
- 12,
25
- )
26
- self.proj = nn.Linear(768, 256)
27
-
28
- self.masked_spec_embed = nn.Parameter(torch.FloatTensor(768).uniform_())
29
- self.label_embedding = nn.Embedding(num_label_embeddings, 256)
30
-
31
- def mask(self, x: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
32
- mask = None
33
- if self.training and self._mask:
34
- mask = _compute_mask((x.size(0), x.size(1)), 0.8, 10, x.device, 2)
35
- x[mask] = self.masked_spec_embed.to(x.dtype)
36
- return x, mask
37
-
38
- def encode(
39
- self, x: torch.Tensor, layer: Optional[int] = None
40
- ) -> Tuple[torch.Tensor, torch.Tensor]:
41
- x = self.feature_extractor(x)
42
- x = self.feature_projection(x.transpose(1, 2))
43
- x, mask = self.mask(x)
44
- x = x + self.positional_embedding(x)
45
- x = self.dropout(self.norm(x))
46
- x = self.encoder(x, output_layer=layer)
47
- return x, mask
48
-
49
- def logits(self, x: torch.Tensor) -> torch.Tensor:
50
- logits = torch.cosine_similarity(
51
- x.unsqueeze(2),
52
- self.label_embedding.weight.unsqueeze(0).unsqueeze(0),
53
- dim=-1,
54
- )
55
- return logits / 0.1
56
-
57
-
58
- class HubertSoft(Hubert):
59
- def __init__(self):
60
- super().__init__()
61
-
62
- def units(self, wav: torch.Tensor) -> torch.Tensor:
63
- wav = t_func.pad(wav, ((400 - 320) // 2, (400 - 320) // 2))
64
- x, _ = self.encode(wav)
65
- return self.proj(x)
66
-
67
- def forward(self, x):
68
- return self.units(x)
69
-
70
- class FeatureExtractor(nn.Module):
71
- def __init__(self):
72
- super().__init__()
73
- self.conv0 = nn.Conv1d(1, 512, 10, 5, bias=False)
74
- self.norm0 = nn.GroupNorm(512, 512)
75
- self.conv1 = nn.Conv1d(512, 512, 3, 2, bias=False)
76
- self.conv2 = nn.Conv1d(512, 512, 3, 2, bias=False)
77
- self.conv3 = nn.Conv1d(512, 512, 3, 2, bias=False)
78
- self.conv4 = nn.Conv1d(512, 512, 3, 2, bias=False)
79
- self.conv5 = nn.Conv1d(512, 512, 2, 2, bias=False)
80
- self.conv6 = nn.Conv1d(512, 512, 2, 2, bias=False)
81
-
82
- def forward(self, x: torch.Tensor) -> torch.Tensor:
83
- x = t_func.gelu(self.norm0(self.conv0(x)))
84
- x = t_func.gelu(self.conv1(x))
85
- x = t_func.gelu(self.conv2(x))
86
- x = t_func.gelu(self.conv3(x))
87
- x = t_func.gelu(self.conv4(x))
88
- x = t_func.gelu(self.conv5(x))
89
- x = t_func.gelu(self.conv6(x))
90
- return x
91
-
92
-
93
- class FeatureProjection(nn.Module):
94
- def __init__(self):
95
- super().__init__()
96
- self.norm = nn.LayerNorm(512)
97
- self.projection = nn.Linear(512, 768)
98
- self.dropout = nn.Dropout(0.1)
99
-
100
- def forward(self, x: torch.Tensor) -> torch.Tensor:
101
- x = self.norm(x)
102
- x = self.projection(x)
103
- x = self.dropout(x)
104
- return x
105
-
106
-
107
- class PositionalConvEmbedding(nn.Module):
108
- def __init__(self):
109
- super().__init__()
110
- self.conv = nn.Conv1d(
111
- 768,
112
- 768,
113
- kernel_size=128,
114
- padding=128 // 2,
115
- groups=16,
116
- )
117
- self.conv = nn.utils.weight_norm(self.conv, name="weight", dim=2)
118
-
119
- def forward(self, x: torch.Tensor) -> torch.Tensor:
120
- x = self.conv(x.transpose(1, 2))
121
- x = t_func.gelu(x[:, :, :-1])
122
- return x.transpose(1, 2)
123
-
124
-
125
- class TransformerEncoder(nn.Module):
126
- def __init__(
127
- self, encoder_layer: nn.TransformerEncoderLayer, num_layers: int
128
- ) -> None:
129
- super(TransformerEncoder, self).__init__()
130
- self.layers = nn.ModuleList(
131
- [copy.deepcopy(encoder_layer) for _ in range(num_layers)]
132
- )
133
- self.num_layers = num_layers
134
-
135
- def forward(
136
- self,
137
- src: torch.Tensor,
138
- mask: torch.Tensor = None,
139
- src_key_padding_mask: torch.Tensor = None,
140
- output_layer: Optional[int] = None,
141
- ) -> torch.Tensor:
142
- output = src
143
- for layer in self.layers[:output_layer]:
144
- output = layer(
145
- output, src_mask=mask, src_key_padding_mask=src_key_padding_mask
146
- )
147
- return output
148
-
149
-
150
- def _compute_mask(
151
- shape: Tuple[int, int],
152
- mask_prob: float,
153
- mask_length: int,
154
- device: torch.device,
155
- min_masks: int = 0,
156
- ) -> torch.Tensor:
157
- batch_size, sequence_length = shape
158
-
159
- if mask_length < 1:
160
- raise ValueError("`mask_length` has to be bigger than 0.")
161
-
162
- if mask_length > sequence_length:
163
- raise ValueError(
164
- f"`mask_length` has to be smaller than `sequence_length`, but got `mask_length`: {mask_length} and `sequence_length`: {sequence_length}`"
165
- )
166
-
167
- # compute number of masked spans in batch
168
- num_masked_spans = int(mask_prob * sequence_length / mask_length + random.random())
169
- num_masked_spans = max(num_masked_spans, min_masks)
170
-
171
- # make sure num masked indices <= sequence_length
172
- if num_masked_spans * mask_length > sequence_length:
173
- num_masked_spans = sequence_length // mask_length
174
-
175
- # SpecAugment mask to fill
176
- mask = torch.zeros((batch_size, sequence_length), device=device, dtype=torch.bool)
177
-
178
- # uniform distribution to sample from, make sure that offset samples are < sequence_length
179
- uniform_dist = torch.ones(
180
- (batch_size, sequence_length - (mask_length - 1)), device=device
181
- )
182
-
183
- # get random indices to mask
184
- mask_indices = torch.multinomial(uniform_dist, num_masked_spans)
185
-
186
- # expand masked indices to masked spans
187
- mask_indices = (
188
- mask_indices.unsqueeze(dim=-1)
189
- .expand((batch_size, num_masked_spans, mask_length))
190
- .reshape(batch_size, num_masked_spans * mask_length)
191
- )
192
- offsets = (
193
- torch.arange(mask_length, device=device)[None, None, :]
194
- .expand((batch_size, num_masked_spans, mask_length))
195
- .reshape(batch_size, num_masked_spans * mask_length)
196
- )
197
- mask_idxs = mask_indices + offsets
198
-
199
- # scatter indices to mask
200
- mask = mask.scatter(1, mask_idxs, True)
201
-
202
- return mask
203
-
204
-
205
- def hubert_soft(
206
- path: str,
207
- ) -> HubertSoft:
208
- r"""HuBERT-Soft from `"A Comparison of Discrete and Soft Speech Units for Improved Voice Conversion"`.
209
- Args:
210
- path (str): path of a pretrained model
211
- """
212
- hubert = HubertSoft()
213
- checkpoint = torch.load(path)
214
- consume_prefix_in_state_dict_if_present(checkpoint, "module.")
215
- hubert.load_state_dict(checkpoint)
216
- hubert.eval()
217
- return hubert
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
hubert/put_hubert_ckpt_here DELETED
File without changes
inference/__init__.py DELETED
File without changes
inference/chunks_temp.json DELETED
@@ -1 +0,0 @@
1
- {"info": "temp_dict"}
 
 
inference/infer_tool.py DELETED
@@ -1,243 +0,0 @@
1
- import hashlib
2
- import io
3
- import json
4
- import logging
5
- import os
6
- import time
7
- from pathlib import Path
8
- from inference import slicer
9
-
10
- import librosa
11
- import numpy as np
12
- # import onnxruntime
13
- import parselmouth
14
- import soundfile
15
- import torch
16
- import torchaudio
17
-
18
- import cluster
19
- from hubert import hubert_model
20
- import utils
21
- from models import SynthesizerTrn
22
-
23
- logging.getLogger('matplotlib').setLevel(logging.WARNING)
24
-
25
-
26
- def read_temp(file_name):
27
- if not os.path.exists(file_name):
28
- with open(file_name, "w") as f:
29
- f.write(json.dumps({"info": "temp_dict"}))
30
- return {}
31
- else:
32
- try:
33
- with open(file_name, "r") as f:
34
- data = f.read()
35
- data_dict = json.loads(data)
36
- if os.path.getsize(file_name) > 50 * 1024 * 1024:
37
- f_name = file_name.replace("\\", "/").split("/")[-1]
38
- print(f"clean {f_name}")
39
- for wav_hash in list(data_dict.keys()):
40
- if int(time.time()) - int(data_dict[wav_hash]["time"]) > 14 * 24 * 3600:
41
- del data_dict[wav_hash]
42
- except Exception as e:
43
- print(e)
44
- print(f"{file_name} error,auto rebuild file")
45
- data_dict = {"info": "temp_dict"}
46
- return data_dict
47
-
48
-
49
- def write_temp(file_name, data):
50
- with open(file_name, "w") as f:
51
- f.write(json.dumps(data))
52
-
53
-
54
- def timeit(func):
55
- def run(*args, **kwargs):
56
- t = time.time()
57
- res = func(*args, **kwargs)
58
- print('executing \'%s\' costed %.3fs' % (func.__name__, time.time() - t))
59
- return res
60
-
61
- return run
62
-
63
-
64
- def format_wav(audio_path):
65
- if Path(audio_path).suffix == '.wav':
66
- return
67
- raw_audio, raw_sample_rate = librosa.load(audio_path, mono=True, sr=None)
68
- soundfile.write(Path(audio_path).with_suffix(".wav"), raw_audio, raw_sample_rate)
69
-
70
-
71
- def get_end_file(dir_path, end):
72
- file_lists = []
73
- for root, dirs, files in os.walk(dir_path):
74
- files = [f for f in files if f[0] != '.']
75
- dirs[:] = [d for d in dirs if d[0] != '.']
76
- for f_file in files:
77
- if f_file.endswith(end):
78
- file_lists.append(os.path.join(root, f_file).replace("\\", "/"))
79
- return file_lists
80
-
81
-
82
- def get_md5(content):
83
- return hashlib.new("md5", content).hexdigest()
84
-
85
- def fill_a_to_b(a, b):
86
- if len(a) < len(b):
87
- for _ in range(0, len(b) - len(a)):
88
- a.append(a[0])
89
-
90
- def mkdir(paths: list):
91
- for path in paths:
92
- if not os.path.exists(path):
93
- os.mkdir(path)
94
-
95
- def pad_array(arr, target_length):
96
- current_length = arr.shape[0]
97
- if current_length >= target_length:
98
- return arr
99
- else:
100
- pad_width = target_length - current_length
101
- pad_left = pad_width // 2
102
- pad_right = pad_width - pad_left
103
- padded_arr = np.pad(arr, (pad_left, pad_right), 'constant', constant_values=(0, 0))
104
- return padded_arr
105
-
106
-
107
- class Svc(object):
108
- def __init__(self, net_g_path, config_path,
109
- device=None,
110
- cluster_model_path="logs/44k/kmeans_10000.pt"):
111
- self.net_g_path = net_g_path
112
- if device is None:
113
- self.dev = torch.device("cuda" if torch.cuda.is_available() else "cpu")
114
- else:
115
- self.dev = torch.device(device)
116
- self.net_g_ms = None
117
- self.hps_ms = utils.get_hparams_from_file(config_path)
118
- self.target_sample = self.hps_ms.data.sampling_rate
119
- self.hop_size = self.hps_ms.data.hop_length
120
- self.spk2id = self.hps_ms.spk
121
- # 加载hubert
122
- self.hubert_model = utils.get_hubert_model().to(self.dev)
123
- self.load_model()
124
- if os.path.exists(cluster_model_path):
125
- self.cluster_model = cluster.get_cluster_model(cluster_model_path)
126
-
127
- def load_model(self):
128
- # 获取模型配置
129
- self.net_g_ms = SynthesizerTrn(
130
- self.hps_ms
131
- )
132
- _ = utils.load_checkpoint(self.net_g_path, self.net_g_ms, None)
133
- if "half" in self.net_g_path and torch.cuda.is_available():
134
- _ = self.net_g_ms.half().eval().to(self.dev)
135
- else:
136
- _ = self.net_g_ms.eval().to(self.dev)
137
-
138
-
139
-
140
- def get_unit_f0(self, in_path, tran, cluster_infer_ratio, speaker):
141
-
142
- wav, sr = librosa.load(in_path, sr=self.target_sample)
143
-
144
- f0 = utils.compute_f0_parselmouth(wav, sampling_rate=self.target_sample, hop_length=self.hop_size)
145
- f0, uv = utils.interpolate_f0(f0)
146
- f0 = torch.FloatTensor(f0)
147
- uv = torch.FloatTensor(uv)
148
- f0 = f0 * 2 ** (tran / 12)
149
- f0 = f0.unsqueeze(0).to(self.dev)
150
- uv = uv.unsqueeze(0).to(self.dev)
151
-
152
- wav16k = librosa.resample(wav, orig_sr=self.target_sample, target_sr=16000)
153
- wav16k = torch.from_numpy(wav16k).to(self.dev)
154
- c = utils.get_hubert_content(self.hubert_model, wav_16k_tensor=wav16k)
155
- c = utils.repeat_expand_2d(c.squeeze(0), f0.shape[1])
156
-
157
- if cluster_infer_ratio !=0:
158
- cluster_c = cluster.get_cluster_center_result(self.cluster_model, c.cpu().numpy().T, speaker).T
159
- cluster_c = torch.FloatTensor(cluster_c).to(self.dev)
160
- c = cluster_infer_ratio * cluster_c + (1 - cluster_infer_ratio) * c
161
-
162
- c = c.unsqueeze(0)
163
- return c, f0, uv
164
-
165
- def infer(self, speaker, tran, raw_path,
166
- cluster_infer_ratio=0,
167
- auto_predict_f0=False,
168
- noice_scale=0.4):
169
- speaker_id = self.spk2id[speaker]
170
- sid = torch.LongTensor([int(speaker_id)]).to(self.dev).unsqueeze(0)
171
- c, f0, uv = self.get_unit_f0(raw_path, tran, cluster_infer_ratio, speaker)
172
- if "half" in self.net_g_path and torch.cuda.is_available():
173
- c = c.half()
174
- with torch.no_grad():
175
- start = time.time()
176
- audio = self.net_g_ms.infer(c, f0=f0, g=sid, uv=uv, predict_f0=auto_predict_f0, noice_scale=noice_scale)[0][0,0].data.float()
177
- use_time = time.time() - start
178
- print("vits use time:{}".format(use_time))
179
- return audio, audio.shape[-1]
180
-
181
- def slice_inference(self,raw_audio_path, spk, tran, slice_db,cluster_infer_ratio, auto_predict_f0,noice_scale, pad_seconds=0.5):
182
- wav_path = raw_audio_path
183
- chunks = slicer.cut(wav_path, db_thresh=slice_db)
184
- audio_data, audio_sr = slicer.chunks2audio(wav_path, chunks)
185
-
186
- audio = []
187
- for (slice_tag, data) in audio_data:
188
- print(f'#=====segment start, {round(len(data) / audio_sr, 3)}s======')
189
- # padd
190
- pad_len = int(audio_sr * pad_seconds)
191
- data = np.concatenate([np.zeros([pad_len]), data, np.zeros([pad_len])])
192
- length = int(np.ceil(len(data) / audio_sr * self.target_sample))
193
- raw_path = io.BytesIO()
194
- soundfile.write(raw_path, data, audio_sr, format="wav")
195
- raw_path.seek(0)
196
- if slice_tag:
197
- print('jump empty segment')
198
- _audio = np.zeros(length)
199
- else:
200
- out_audio, out_sr = self.infer(spk, tran, raw_path,
201
- cluster_infer_ratio=cluster_infer_ratio,
202
- auto_predict_f0=auto_predict_f0,
203
- noice_scale=noice_scale
204
- )
205
- _audio = out_audio.cpu().numpy()
206
-
207
- pad_len = int(self.target_sample * pad_seconds)
208
- _audio = _audio[pad_len:-pad_len]
209
- audio.extend(list(_audio))
210
- return np.array(audio)
211
-
212
-
213
- class RealTimeVC:
214
- def __init__(self):
215
- self.last_chunk = None
216
- self.last_o = None
217
- self.chunk_len = 16000 # 区块长度
218
- self.pre_len = 3840 # 交叉淡化长度,640的倍数
219
-
220
- """输入输出都是1维numpy 音频波形数组"""
221
-
222
- def process(self, svc_model, speaker_id, f_pitch_change, input_wav_path):
223
- import maad
224
- audio, sr = torchaudio.load(input_wav_path)
225
- audio = audio.cpu().numpy()[0]
226
- temp_wav = io.BytesIO()
227
- if self.last_chunk is None:
228
- input_wav_path.seek(0)
229
- audio, sr = svc_model.infer(speaker_id, f_pitch_change, input_wav_path)
230
- audio = audio.cpu().numpy()
231
- self.last_chunk = audio[-self.pre_len:]
232
- self.last_o = audio
233
- return audio[-self.chunk_len:]
234
- else:
235
- audio = np.concatenate([self.last_chunk, audio])
236
- soundfile.write(temp_wav, audio, sr, format="wav")
237
- temp_wav.seek(0)
238
- audio, sr = svc_model.infer(speaker_id, f_pitch_change, temp_wav)
239
- audio = audio.cpu().numpy()
240
- ret = maad.util.crossfade(self.last_o, audio, self.pre_len)
241
- self.last_chunk = audio[-self.pre_len:]
242
- self.last_o = audio
243
- return ret[self.chunk_len:2 * self.chunk_len]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
inference/infer_tool_grad.py DELETED
@@ -1,160 +0,0 @@
1
- import hashlib
2
- import json
3
- import logging
4
- import os
5
- import time
6
- from pathlib import Path
7
- import io
8
- import librosa
9
- import maad
10
- import numpy as np
11
- from inference import slicer
12
- import parselmouth
13
- import soundfile
14
- import torch
15
- import torchaudio
16
-
17
- from hubert import hubert_model
18
- import utils
19
- from models import SynthesizerTrn
20
- logging.getLogger('numba').setLevel(logging.WARNING)
21
- logging.getLogger('matplotlib').setLevel(logging.WARNING)
22
-
23
- def resize2d_f0(x, target_len):
24
- source = np.array(x)
25
- source[source < 0.001] = np.nan
26
- target = np.interp(np.arange(0, len(source) * target_len, len(source)) / target_len, np.arange(0, len(source)),
27
- source)
28
- res = np.nan_to_num(target)
29
- return res
30
-
31
- def get_f0(x, p_len,f0_up_key=0):
32
-
33
- time_step = 160 / 16000 * 1000
34
- f0_min = 50
35
- f0_max = 1100
36
- f0_mel_min = 1127 * np.log(1 + f0_min / 700)
37
- f0_mel_max = 1127 * np.log(1 + f0_max / 700)
38
-
39
- f0 = parselmouth.Sound(x, 16000).to_pitch_ac(
40
- time_step=time_step / 1000, voicing_threshold=0.6,
41
- pitch_floor=f0_min, pitch_ceiling=f0_max).selected_array['frequency']
42
-
43
- pad_size=(p_len - len(f0) + 1) // 2
44
- if(pad_size>0 or p_len - len(f0) - pad_size>0):
45
- f0 = np.pad(f0,[[pad_size,p_len - len(f0) - pad_size]], mode='constant')
46
-
47
- f0 *= pow(2, f0_up_key / 12)
48
- f0_mel = 1127 * np.log(1 + f0 / 700)
49
- f0_mel[f0_mel > 0] = (f0_mel[f0_mel > 0] - f0_mel_min) * 254 / (f0_mel_max - f0_mel_min) + 1
50
- f0_mel[f0_mel <= 1] = 1
51
- f0_mel[f0_mel > 255] = 255
52
- f0_coarse = np.rint(f0_mel).astype(np.int)
53
- return f0_coarse, f0
54
-
55
- def clean_pitch(input_pitch):
56
- num_nan = np.sum(input_pitch == 1)
57
- if num_nan / len(input_pitch) > 0.9:
58
- input_pitch[input_pitch != 1] = 1
59
- return input_pitch
60
-
61
-
62
- def plt_pitch(input_pitch):
63
- input_pitch = input_pitch.astype(float)
64
- input_pitch[input_pitch == 1] = np.nan
65
- return input_pitch
66
-
67
-
68
- def f0_to_pitch(ff):
69
- f0_pitch = 69 + 12 * np.log2(ff / 440)
70
- return f0_pitch
71
-
72
-
73
- def fill_a_to_b(a, b):
74
- if len(a) < len(b):
75
- for _ in range(0, len(b) - len(a)):
76
- a.append(a[0])
77
-
78
-
79
- def mkdir(paths: list):
80
- for path in paths:
81
- if not os.path.exists(path):
82
- os.mkdir(path)
83
-
84
-
85
- class VitsSvc(object):
86
- def __init__(self):
87
- self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
88
- self.SVCVITS = None
89
- self.hps = None
90
- self.speakers = None
91
- self.hubert_soft = utils.get_hubert_model()
92
-
93
- def set_device(self, device):
94
- self.device = torch.device(device)
95
- self.hubert_soft.to(self.device)
96
- if self.SVCVITS != None:
97
- self.SVCVITS.to(self.device)
98
-
99
- def loadCheckpoint(self, path):
100
- self.hps = utils.get_hparams_from_file(f"checkpoints/{path}/config.json")
101
- self.SVCVITS = SynthesizerTrn(
102
- self.hps.data.filter_length // 2 + 1,
103
- self.hps.train.segment_size // self.hps.data.hop_length,
104
- **self.hps.model)
105
- _ = utils.load_checkpoint(f"checkpoints/{path}/model.pth", self.SVCVITS, None)
106
- _ = self.SVCVITS.eval().to(self.device)
107
- self.speakers = self.hps.spk
108
-
109
- def get_units(self, source, sr):
110
- source = source.unsqueeze(0).to(self.device)
111
- with torch.inference_mode():
112
- units = self.hubert_soft.units(source)
113
- return units
114
-
115
-
116
- def get_unit_pitch(self, in_path, tran):
117
- source, sr = torchaudio.load(in_path)
118
- source = torchaudio.functional.resample(source, sr, 16000)
119
- if len(source.shape) == 2 and source.shape[1] >= 2:
120
- source = torch.mean(source, dim=0).unsqueeze(0)
121
- soft = self.get_units(source, sr).squeeze(0).cpu().numpy()
122
- f0_coarse, f0 = get_f0(source.cpu().numpy()[0], soft.shape[0]*2, tran)
123
- return soft, f0
124
-
125
- def infer(self, speaker_id, tran, raw_path):
126
- speaker_id = self.speakers[speaker_id]
127
- sid = torch.LongTensor([int(speaker_id)]).to(self.device).unsqueeze(0)
128
- soft, pitch = self.get_unit_pitch(raw_path, tran)
129
- f0 = torch.FloatTensor(clean_pitch(pitch)).unsqueeze(0).to(self.device)
130
- stn_tst = torch.FloatTensor(soft)
131
- with torch.no_grad():
132
- x_tst = stn_tst.unsqueeze(0).to(self.device)
133
- x_tst = torch.repeat_interleave(x_tst, repeats=2, dim=1).transpose(1, 2)
134
- audio = self.SVCVITS.infer(x_tst, f0=f0, g=sid)[0,0].data.float()
135
- return audio, audio.shape[-1]
136
-
137
- def inference(self,srcaudio,chara,tran,slice_db):
138
- sampling_rate, audio = srcaudio
139
- audio = (audio / np.iinfo(audio.dtype).max).astype(np.float32)
140
- if len(audio.shape) > 1:
141
- audio = librosa.to_mono(audio.transpose(1, 0))
142
- if sampling_rate != 16000:
143
- audio = librosa.resample(audio, orig_sr=sampling_rate, target_sr=16000)
144
- soundfile.write("tmpwav.wav", audio, 16000, format="wav")
145
- chunks = slicer.cut("tmpwav.wav", db_thresh=slice_db)
146
- audio_data, audio_sr = slicer.chunks2audio("tmpwav.wav", chunks)
147
- audio = []
148
- for (slice_tag, data) in audio_data:
149
- length = int(np.ceil(len(data) / audio_sr * self.hps.data.sampling_rate))
150
- raw_path = io.BytesIO()
151
- soundfile.write(raw_path, data, audio_sr, format="wav")
152
- raw_path.seek(0)
153
- if slice_tag:
154
- _audio = np.zeros(length)
155
- else:
156
- out_audio, out_sr = self.infer(chara, tran, raw_path)
157
- _audio = out_audio.cpu().numpy()
158
- audio.extend(list(_audio))
159
- audio = (np.array(audio) * 32768.0).astype('int16')
160
- return (self.hps.data.sampling_rate,audio)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
inference/slicer.py DELETED
@@ -1,142 +0,0 @@
1
- import librosa
2
- import torch
3
- import torchaudio
4
-
5
-
6
- class Slicer:
7
- def __init__(self,
8
- sr: int,
9
- threshold: float = -40.,
10
- min_length: int = 5000,
11
- min_interval: int = 300,
12
- hop_size: int = 20,
13
- max_sil_kept: int = 5000):
14
- if not min_length >= min_interval >= hop_size:
15
- raise ValueError('The following condition must be satisfied: min_length >= min_interval >= hop_size')
16
- if not max_sil_kept >= hop_size:
17
- raise ValueError('The following condition must be satisfied: max_sil_kept >= hop_size')
18
- min_interval = sr * min_interval / 1000
19
- self.threshold = 10 ** (threshold / 20.)
20
- self.hop_size = round(sr * hop_size / 1000)
21
- self.win_size = min(round(min_interval), 4 * self.hop_size)
22
- self.min_length = round(sr * min_length / 1000 / self.hop_size)
23
- self.min_interval = round(min_interval / self.hop_size)
24
- self.max_sil_kept = round(sr * max_sil_kept / 1000 / self.hop_size)
25
-
26
- def _apply_slice(self, waveform, begin, end):
27
- if len(waveform.shape) > 1:
28
- return waveform[:, begin * self.hop_size: min(waveform.shape[1], end * self.hop_size)]
29
- else:
30
- return waveform[begin * self.hop_size: min(waveform.shape[0], end * self.hop_size)]
31
-
32
- # @timeit
33
- def slice(self, waveform):
34
- if len(waveform.shape) > 1:
35
- samples = librosa.to_mono(waveform)
36
- else:
37
- samples = waveform
38
- if samples.shape[0] <= self.min_length:
39
- return {"0": {"slice": False, "split_time": f"0,{len(waveform)}"}}
40
- rms_list = librosa.feature.rms(y=samples, frame_length=self.win_size, hop_length=self.hop_size).squeeze(0)
41
- sil_tags = []
42
- silence_start = None
43
- clip_start = 0
44
- for i, rms in enumerate(rms_list):
45
- # Keep looping while frame is silent.
46
- if rms < self.threshold:
47
- # Record start of silent frames.
48
- if silence_start is None:
49
- silence_start = i
50
- continue
51
- # Keep looping while frame is not silent and silence start has not been recorded.
52
- if silence_start is None:
53
- continue
54
- # Clear recorded silence start if interval is not enough or clip is too short
55
- is_leading_silence = silence_start == 0 and i > self.max_sil_kept
56
- need_slice_middle = i - silence_start >= self.min_interval and i - clip_start >= self.min_length
57
- if not is_leading_silence and not need_slice_middle:
58
- silence_start = None
59
- continue
60
- # Need slicing. Record the range of silent frames to be removed.
61
- if i - silence_start <= self.max_sil_kept:
62
- pos = rms_list[silence_start: i + 1].argmin() + silence_start
63
- if silence_start == 0:
64
- sil_tags.append((0, pos))
65
- else:
66
- sil_tags.append((pos, pos))
67
- clip_start = pos
68
- elif i - silence_start <= self.max_sil_kept * 2:
69
- pos = rms_list[i - self.max_sil_kept: silence_start + self.max_sil_kept + 1].argmin()
70
- pos += i - self.max_sil_kept
71
- pos_l = rms_list[silence_start: silence_start + self.max_sil_kept + 1].argmin() + silence_start
72
- pos_r = rms_list[i - self.max_sil_kept: i + 1].argmin() + i - self.max_sil_kept
73
- if silence_start == 0:
74
- sil_tags.append((0, pos_r))
75
- clip_start = pos_r
76
- else:
77
- sil_tags.append((min(pos_l, pos), max(pos_r, pos)))
78
- clip_start = max(pos_r, pos)
79
- else:
80
- pos_l = rms_list[silence_start: silence_start + self.max_sil_kept + 1].argmin() + silence_start
81
- pos_r = rms_list[i - self.max_sil_kept: i + 1].argmin() + i - self.max_sil_kept
82
- if silence_start == 0:
83
- sil_tags.append((0, pos_r))
84
- else:
85
- sil_tags.append((pos_l, pos_r))
86
- clip_start = pos_r
87
- silence_start = None
88
- # Deal with trailing silence.
89
- total_frames = rms_list.shape[0]
90
- if silence_start is not None and total_frames - silence_start >= self.min_interval:
91
- silence_end = min(total_frames, silence_start + self.max_sil_kept)
92
- pos = rms_list[silence_start: silence_end + 1].argmin() + silence_start
93
- sil_tags.append((pos, total_frames + 1))
94
- # Apply and return slices.
95
- if len(sil_tags) == 0:
96
- return {"0": {"slice": False, "split_time": f"0,{len(waveform)}"}}
97
- else:
98
- chunks = []
99
- # 第一段静音并非从头开始,补上有声片段
100
- if sil_tags[0][0]:
101
- chunks.append(
102
- {"slice": False, "split_time": f"0,{min(waveform.shape[0], sil_tags[0][0] * self.hop_size)}"})
103
- for i in range(0, len(sil_tags)):
104
- # 标识有声片段(跳过第一段)
105
- if i:
106
- chunks.append({"slice": False,
107
- "split_time": f"{sil_tags[i - 1][1] * self.hop_size},{min(waveform.shape[0], sil_tags[i][0] * self.hop_size)}"})
108
- # 标识所有静音片段
109
- chunks.append({"slice": True,
110
- "split_time": f"{sil_tags[i][0] * self.hop_size},{min(waveform.shape[0], sil_tags[i][1] * self.hop_size)}"})
111
- # 最后一段静音并非结尾,补上结尾片段
112
- if sil_tags[-1][1] * self.hop_size < len(waveform):
113
- chunks.append({"slice": False, "split_time": f"{sil_tags[-1][1] * self.hop_size},{len(waveform)}"})
114
- chunk_dict = {}
115
- for i in range(len(chunks)):
116
- chunk_dict[str(i)] = chunks[i]
117
- return chunk_dict
118
-
119
-
120
- def cut(audio_path, db_thresh=-30, min_len=5000):
121
- audio, sr = librosa.load(audio_path, sr=None)
122
- slicer = Slicer(
123
- sr=sr,
124
- threshold=db_thresh,
125
- min_length=min_len
126
- )
127
- chunks = slicer.slice(audio)
128
- return chunks
129
-
130
-
131
- def chunks2audio(audio_path, chunks):
132
- chunks = dict(chunks)
133
- audio, sr = torchaudio.load(audio_path)
134
- if len(audio.shape) == 2 and audio.shape[1] >= 2:
135
- audio = torch.mean(audio, dim=0).unsqueeze(0)
136
- audio = audio.cpu().numpy()[0]
137
- result = []
138
- for k, v in chunks.items():
139
- tag = v["split_time"].split(",")
140
- if tag[0] != tag[1]:
141
- result.append((v["slice"], audio[int(tag[0]):int(tag[1])]))
142
- return result, sr
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
inference_main.py DELETED
@@ -1,101 +0,0 @@
1
- import io
2
- import logging
3
- import time
4
- from pathlib import Path
5
-
6
- import librosa
7
- import matplotlib.pyplot as plt
8
- import numpy as np
9
- import soundfile
10
-
11
- from inference import infer_tool
12
- from inference import slicer
13
- from inference.infer_tool import Svc
14
-
15
- logging.getLogger('numba').setLevel(logging.WARNING)
16
- chunks_dict = infer_tool.read_temp("inference/chunks_temp.json")
17
-
18
-
19
-
20
- def main():
21
- import argparse
22
-
23
- parser = argparse.ArgumentParser(description='sovits4 inference')
24
-
25
- # 一定要设置的部分
26
- parser.add_argument('-m', '--model_path', type=str, default="/Volumes/Extend/下载/cvecG_23000.pth", help='模型路径')
27
- parser.add_argument('-c', '--config_path', type=str, default="configs/config.json", help='配置文件路径')
28
- parser.add_argument('-n', '--clean_names', type=str, nargs='+', default=["君の知らない物語-src.wav"], help='wav文件名列表,放在raw文件夹下')
29
- parser.add_argument('-t', '--trans', type=int, nargs='+', default=[-5], help='音高调整,支持正负(半音)')
30
- parser.add_argument('-s', '--spk_list', type=str, nargs='+', default=['yunhao'], help='合成目标说话人名称')
31
-
32
- # 可选项部分
33
- parser.add_argument('-a', '--auto_predict_f0', action='store_true', default=False,
34
- help='语音转换自动预测音高,转换歌声时不要打开这个会严重跑调')
35
- parser.add_argument('-cm', '--cluster_model_path', type=str, default="logs/44k/kmeans_10000.pt", help='聚类模型路径,如果没有训练聚类则随便填')
36
- parser.add_argument('-cr', '--cluster_infer_ratio', type=float, default=0, help='聚类方案占比,范围0-1,若没有训练聚类模型则填0即可')
37
-
38
- # 不用动的部分
39
- parser.add_argument('-sd', '--slice_db', type=int, default=-40, help='默认-40,嘈杂的音频可以-30,干声保留呼吸可以-50')
40
- parser.add_argument('-d', '--device', type=str, default=None, help='推理设备,None则为自动选择cpu和gpu')
41
- parser.add_argument('-ns', '--noice_scale', type=float, default=0.4, help='噪音级别,会影响咬字和音质,较为玄学')
42
- parser.add_argument('-p', '--pad_seconds', type=float, default=0.5, help='推理音频pad秒数,由于未知原因开头结尾会有异响,pad一小段静音段后就不会出现')
43
- parser.add_argument('-wf', '--wav_format', type=str, default='flac', help='音频输出格式')
44
-
45
- args = parser.parse_args()
46
-
47
- svc_model = Svc(args.model_path, args.config_path, args.device, args.cluster_model_path)
48
- infer_tool.mkdir(["raw", "results"])
49
- clean_names = args.clean_names
50
- trans = args.trans
51
- spk_list = args.spk_list
52
- slice_db = args.slice_db
53
- wav_format = args.wav_format
54
- auto_predict_f0 = args.auto_predict_f0
55
- cluster_infer_ratio = args.cluster_infer_ratio
56
- noice_scale = args.noice_scale
57
- pad_seconds = args.pad_seconds
58
-
59
- infer_tool.fill_a_to_b(trans, clean_names)
60
- for clean_name, tran in zip(clean_names, trans):
61
- raw_audio_path = f"raw/{clean_name}"
62
- if "." not in raw_audio_path:
63
- raw_audio_path += ".wav"
64
- infer_tool.format_wav(raw_audio_path)
65
- wav_path = Path(raw_audio_path).with_suffix('.wav')
66
- chunks = slicer.cut(wav_path, db_thresh=slice_db)
67
- audio_data, audio_sr = slicer.chunks2audio(wav_path, chunks)
68
-
69
- for spk in spk_list:
70
- audio = []
71
- for (slice_tag, data) in audio_data:
72
- print(f'#=====segment start, {round(len(data) / audio_sr, 3)}s======')
73
-
74
- length = int(np.ceil(len(data) / audio_sr * svc_model.target_sample))
75
- if slice_tag:
76
- print('jump empty segment')
77
- _audio = np.zeros(length)
78
- else:
79
- # padd
80
- pad_len = int(audio_sr * pad_seconds)
81
- data = np.concatenate([np.zeros([pad_len]), data, np.zeros([pad_len])])
82
- raw_path = io.BytesIO()
83
- soundfile.write(raw_path, data, audio_sr, format="wav")
84
- raw_path.seek(0)
85
- out_audio, out_sr = svc_model.infer(spk, tran, raw_path,
86
- cluster_infer_ratio=cluster_infer_ratio,
87
- auto_predict_f0=auto_predict_f0,
88
- noice_scale=noice_scale
89
- )
90
- _audio = out_audio.cpu().numpy()
91
- pad_len = int(svc_model.target_sample * pad_seconds)
92
- _audio = _audio[pad_len:-pad_len]
93
-
94
- audio.extend(list(infer_tool.pad_array(_audio, length)))
95
- key = "auto" if auto_predict_f0 else f"{tran}key"
96
- cluster_name = "" if cluster_infer_ratio == 0 else f"_{cluster_infer_ratio}"
97
- res_path = f'./results/{clean_name}_{key}_{spk}{cluster_name}.{wav_format}'
98
- soundfile.write(res_path, audio, svc_model.target_sample, format=wav_format)
99
-
100
- if __name__ == '__main__':
101
- main()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
hubert/whisper_phone_asr.pth → ioritree--so-vits-svc/text-test.parquet RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:7f1d742befacdd04c7d6037ea8ac70c256c5971912289b0bce328684643a3036
3
- size 17406081
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:28dd48962d9b619f08007fdcadfaca690f681de63214df88162519d0561cdda6
3
+ size 964
ioritree--so-vits-svc/text-train.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8110424a72f0708feedcd93766a98adf5b07a62148af65ae22a48b2bae5fee40
3
+ size 1036
ioritree--so-vits-svc/text-validation.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8132ac6d4904f836a3e62e00e40a3d4bc3584d4f937fd68894cda432c02753ce
3
+ size 959
logs/44k/put_pretrained_model_here DELETED
File without changes
models.py DELETED
@@ -1,1060 +0,0 @@
1
- import sys
2
- import copy
3
- import math
4
- import torch
5
- from torch import nn
6
- from torch.nn import functional as F
7
- from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d
8
- from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm
9
-
10
-
11
- sys.path.append('../..')
12
- import modules.commons as commons
13
- import modules.modules as modules
14
- import modules.attentions as attentions
15
-
16
- from modules.commons import init_weights, get_padding
17
-
18
- from modules.ddsp import mlp, gru, scale_function, remove_above_nyquist, upsample
19
- from modules.ddsp import harmonic_synth, amp_to_impulse_response, fft_convolve
20
- from modules.ddsp import resample
21
- import utils
22
-
23
- from modules.stft import TorchSTFT
24
-
25
- import torch.distributions as D
26
-
27
- from modules.losses import (
28
- generator_loss,
29
- discriminator_loss,
30
- feature_loss,
31
- kl_loss
32
- )
33
-
34
- LRELU_SLOPE = 0.1
35
-
36
-
37
- class PostF0Decoder(nn.Module):
38
- def __init__(self, in_channels, filter_channels, kernel_size, p_dropout, spk_channels=0):
39
- super().__init__()
40
-
41
- self.in_channels = in_channels
42
- self.filter_channels = filter_channels
43
- self.kernel_size = kernel_size
44
- self.p_dropout = p_dropout
45
- self.gin_channels = spk_channels
46
-
47
- self.drop = nn.Dropout(p_dropout)
48
- self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size, padding=kernel_size // 2)
49
- self.norm_1 = modules.LayerNorm(filter_channels)
50
- self.conv_2 = nn.Conv1d(filter_channels, filter_channels, kernel_size, padding=kernel_size // 2)
51
- self.norm_2 = modules.LayerNorm(filter_channels)
52
- self.proj = nn.Conv1d(filter_channels, 1, 1)
53
-
54
- if spk_channels != 0:
55
- self.cond = nn.Conv1d(spk_channels, in_channels, 1)
56
-
57
- def forward(self, x, x_mask, g=None):
58
- x = torch.detach(x)
59
- if g is not None:
60
- g = torch.detach(g)
61
- x = x + self.cond(g)
62
- x = self.conv_1(x * x_mask)
63
- x = torch.relu(x)
64
- x = self.norm_1(x)
65
- x = self.drop(x)
66
- x = self.conv_2(x * x_mask)
67
- x = torch.relu(x)
68
- x = self.norm_2(x)
69
- x = self.drop(x)
70
- x = self.proj(x * x_mask)
71
- return x * x_mask
72
-
73
-
74
- class TextEncoder(nn.Module):
75
- def __init__(self,
76
- c_dim,
77
- out_channels,
78
- hidden_channels,
79
- filter_channels,
80
- n_heads,
81
- n_layers,
82
- kernel_size,
83
- p_dropout):
84
- super().__init__()
85
- self.out_channels = out_channels
86
- self.hidden_channels = hidden_channels
87
- self.filter_channels = filter_channels
88
- self.n_heads = n_heads
89
- self.n_layers = n_layers
90
- self.kernel_size = kernel_size
91
- self.p_dropout = p_dropout
92
-
93
- self.pre_net = torch.nn.Linear(c_dim, hidden_channels)
94
-
95
- self.encoder = attentions.Encoder(
96
- hidden_channels,
97
- filter_channels,
98
- n_heads,
99
- n_layers,
100
- kernel_size,
101
- p_dropout)
102
- self.proj = nn.Conv1d(hidden_channels, out_channels, 1)
103
-
104
- def forward(self, x, x_lengths):
105
- x = x.transpose(1,-1)
106
- x = self.pre_net(x)
107
- x = torch.transpose(x, 1, -1) # [b, h, t]
108
- x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype)
109
- x = self.encoder(x * x_mask, x_mask)
110
- x = self.proj(x) * x_mask
111
- return x, x_mask
112
-
113
-
114
- def pad_v2(input_ele, mel_max_length=None):
115
- if mel_max_length:
116
- max_len = mel_max_length
117
- else:
118
- max_len = max([input_ele[i].size(0) for i in range(len(input_ele))])
119
-
120
- out_list = list()
121
- for i, batch in enumerate(input_ele):
122
- if len(batch.shape) == 1:
123
- one_batch_padded = F.pad(
124
- batch, (0, max_len - batch.size(0)), "constant", 0.0
125
- )
126
- elif len(batch.shape) == 2:
127
- one_batch_padded = F.pad(
128
- batch, (0, 0, 0, max_len - batch.size(0)), "constant", 0.0
129
- )
130
- out_list.append(one_batch_padded)
131
- out_padded = torch.stack(out_list)
132
- return out_padded
133
-
134
-
135
- class LengthRegulator(nn.Module):
136
- """ Length Regulator """
137
-
138
- def __init__(self):
139
- super(LengthRegulator, self).__init__()
140
-
141
- def LR(self, x, duration, max_len):
142
- x = torch.transpose(x, 1, 2)
143
- output = list()
144
- mel_len = list()
145
- for batch, expand_target in zip(x, duration):
146
- expanded = self.expand(batch, expand_target)
147
- output.append(expanded)
148
- mel_len.append(expanded.shape[0])
149
-
150
- if max_len is not None:
151
- output = pad_v2(output, max_len)
152
- else:
153
- output = pad_v2(output)
154
- output = torch.transpose(output, 1, 2)
155
- return output, torch.LongTensor(mel_len)
156
-
157
- def expand(self, batch, predicted):
158
- predicted = torch.squeeze(predicted)
159
- out = list()
160
-
161
- for i, vec in enumerate(batch):
162
- expand_size = predicted[i].item()
163
- state_info_index = torch.unsqueeze(torch.arange(0, expand_size), 1).float()
164
- state_info_length = torch.unsqueeze(torch.Tensor([expand_size] * expand_size), 1).float()
165
- state_info = torch.cat([state_info_index, state_info_length], 1).to(vec.device)
166
- new_vec = vec.expand(max(int(expand_size), 0), -1)
167
- new_vec = torch.cat([new_vec, state_info], 1)
168
- out.append(new_vec)
169
- out = torch.cat(out, 0)
170
- return out
171
-
172
- def forward(self, x, duration, max_len):
173
- output, mel_len = self.LR(x, duration, max_len)
174
- return output, mel_len
175
-
176
-
177
- class PriorDecoder(nn.Module):
178
- def __init__(self,
179
- out_bn_channels,
180
- hidden_channels,
181
- filter_channels,
182
- n_heads,
183
- n_layers,
184
- kernel_size,
185
- p_dropout,
186
- n_speakers=0,
187
- spk_channels=0):
188
- super().__init__()
189
- self.out_bn_channels = out_bn_channels
190
- self.hidden_channels = hidden_channels
191
- self.filter_channels = filter_channels
192
- self.n_heads = n_heads
193
- self.n_layers = n_layers
194
- self.kernel_size = kernel_size
195
- self.p_dropout = p_dropout
196
- self.spk_channels = spk_channels
197
-
198
- self.prenet = nn.Conv1d(hidden_channels , hidden_channels, 3, padding=1)
199
- self.decoder = attentions.FFT(
200
- hidden_channels,
201
- filter_channels,
202
- n_heads,
203
- n_layers,
204
- kernel_size,
205
- p_dropout)
206
- self.proj = nn.Conv1d(hidden_channels, out_bn_channels, 1)
207
-
208
- if n_speakers != 0:
209
- self.cond = nn.Conv1d(spk_channels, hidden_channels, 1)
210
-
211
- def forward(self, x, x_lengths, spk_emb=None):
212
- x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype)
213
-
214
- x = self.prenet(x) * x_mask
215
-
216
- if (spk_emb is not None):
217
- x = x + self.cond(spk_emb)
218
-
219
- x = self.decoder(x * x_mask, x_mask)
220
-
221
- bn = self.proj(x) * x_mask
222
-
223
- return bn, x_mask
224
-
225
-
226
- class Decoder(nn.Module):
227
- def __init__(self,
228
- out_channels,
229
- hidden_channels,
230
- filter_channels,
231
- n_heads,
232
- n_layers,
233
- kernel_size,
234
- p_dropout,
235
- n_speakers=0,
236
- spk_channels=0,
237
- in_channels=None):
238
- super().__init__()
239
- self.out_channels = out_channels
240
- self.hidden_channels = hidden_channels
241
- self.filter_channels = filter_channels
242
- self.n_heads = n_heads
243
- self.n_layers = n_layers
244
- self.kernel_size = kernel_size
245
- self.p_dropout = p_dropout
246
- self.spk_channels = spk_channels
247
-
248
- self.prenet = nn.Conv1d(in_channels if in_channels is not None else hidden_channels, hidden_channels, 3, padding=1)
249
- self.decoder = attentions.FFT(
250
- hidden_channels,
251
- filter_channels,
252
- n_heads,
253
- n_layers,
254
- kernel_size,
255
- p_dropout)
256
- self.proj = nn.Conv1d(hidden_channels, out_channels, 1)
257
-
258
- if n_speakers != 0:
259
- self.cond = nn.Conv1d(spk_channels, hidden_channels, 1)
260
-
261
- def forward(self, x, x_lengths, spk_emb=None):
262
- x = torch.detach(x)
263
- x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype)
264
-
265
- x = self.prenet(x) * x_mask
266
-
267
- if (spk_emb is not None):
268
- x = x + self.cond(spk_emb)
269
-
270
- x = self.decoder(x * x_mask, x_mask)
271
-
272
- x = self.proj(x) * x_mask
273
-
274
- return x, x_mask
275
-
276
- class F0Decoder(nn.Module):
277
- def __init__(self,
278
- out_channels,
279
- hidden_channels,
280
- filter_channels,
281
- n_heads,
282
- n_layers,
283
- kernel_size,
284
- p_dropout,
285
- n_speakers=0,
286
- spk_channels=0,
287
- in_channels=None):
288
- super().__init__()
289
- self.out_channels = out_channels
290
- self.hidden_channels = hidden_channels
291
- self.filter_channels = filter_channels
292
- self.n_heads = n_heads
293
- self.n_layers = n_layers
294
- self.kernel_size = kernel_size
295
- self.p_dropout = p_dropout
296
- self.spk_channels = spk_channels
297
-
298
- self.prenet = nn.Conv1d(in_channels if in_channels is not None else hidden_channels, hidden_channels, 3, padding=1)
299
- self.decoder = attentions.FFT(
300
- hidden_channels,
301
- filter_channels,
302
- n_heads,
303
- n_layers,
304
- kernel_size,
305
- p_dropout)
306
- self.proj = nn.Conv1d(hidden_channels, out_channels, 1)
307
- self.f0_prenet = nn.Conv1d(1, hidden_channels , 3, padding=1)
308
-
309
- if n_speakers != 0:
310
- self.cond = nn.Conv1d(spk_channels, hidden_channels, 1)
311
-
312
- def forward(self, x, norm_f0, x_lengths, spk_emb=None):
313
- x = torch.detach(x)
314
- x += self.f0_prenet(norm_f0)
315
- x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype)
316
-
317
- x = self.prenet(x) * x_mask
318
-
319
- if (spk_emb is not None):
320
- x = x + self.cond(spk_emb)
321
-
322
- x = self.decoder(x * x_mask, x_mask)
323
-
324
- x = self.proj(x) * x_mask
325
-
326
- return x, x_mask
327
-
328
-
329
- class ConvReluNorm(nn.Module):
330
- def __init__(self, in_channels, hidden_channels, out_channels, kernel_size, n_layers, p_dropout):
331
- super().__init__()
332
- self.in_channels = in_channels
333
- self.hidden_channels = hidden_channels
334
- self.out_channels = out_channels
335
- self.kernel_size = kernel_size
336
- self.n_layers = n_layers
337
- self.p_dropout = p_dropout
338
- assert n_layers > 1, "Number of layers should be larger than 0."
339
-
340
- self.conv_layers = nn.ModuleList()
341
- self.norm_layers = nn.ModuleList()
342
- self.conv_layers.append(nn.Conv1d(in_channels, hidden_channels, kernel_size, padding=kernel_size // 2))
343
- self.norm_layers.append(LayerNorm(hidden_channels))
344
- self.relu_drop = nn.Sequential(
345
- nn.ReLU(),
346
- nn.Dropout(p_dropout))
347
- for _ in range(n_layers - 1):
348
- self.conv_layers.append(nn.Conv1d(hidden_channels, hidden_channels, kernel_size, padding=kernel_size // 2))
349
- self.norm_layers.append(LayerNorm(hidden_channels))
350
- self.proj = nn.Conv1d(hidden_channels, out_channels, 1)
351
- self.proj.weight.data.zero_()
352
- self.proj.bias.data.zero_()
353
-
354
- def forward(self, x):
355
- x = self.conv_layers[0](x)
356
- x = self.norm_layers[0](x)
357
- x = self.relu_drop(x)
358
-
359
- for i in range(1, self.n_layers):
360
- x_ = self.conv_layers[i](x)
361
- x_ = self.norm_layers[i](x_)
362
- x_ = self.relu_drop(x_)
363
- x = (x + x_) / 2
364
- x = self.proj(x)
365
- return x
366
-
367
-
368
- class PosteriorEncoder(nn.Module):
369
- def __init__(self,
370
- hps,
371
- in_channels,
372
- out_channels,
373
- hidden_channels,
374
- kernel_size,
375
- dilation_rate,
376
- n_layers):
377
- super().__init__()
378
- self.in_channels = in_channels
379
- self.out_channels = out_channels
380
- self.hidden_channels = hidden_channels
381
- self.kernel_size = kernel_size
382
- self.dilation_rate = dilation_rate
383
- self.n_layers = n_layers
384
-
385
- self.pre = nn.Conv1d(in_channels, hidden_channels, 1)
386
- self.enc = modules.WN(hidden_channels, kernel_size, dilation_rate, n_layers, n_speakers=hps.data.n_speakers, spk_channels=hps.model.spk_channels)
387
- # self.enc = ConvReluNorm(hidden_channels,
388
- # hidden_channels,
389
- # hidden_channels,
390
- # kernel_size,
391
- # n_layers,
392
- # 0.1)
393
- self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
394
-
395
- def forward(self, x, x_lengths, g=None):
396
- x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype)
397
- x = self.pre(x) * x_mask
398
- x = self.enc(x, x_mask, g=g)
399
- stats = self.proj(x) * x_mask
400
- return stats, x_mask
401
-
402
-
403
- class ResBlock3(torch.nn.Module):
404
- def __init__(self, channels, kernel_size=3, dilation=(1, 3)):
405
- super(ResBlock3, self).__init__()
406
- self.convs = nn.ModuleList([
407
- weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0],
408
- padding=get_padding(kernel_size, dilation[0])))
409
- ])
410
- self.convs.apply(init_weights)
411
-
412
- def forward(self, x, x_mask=None):
413
- for c in self.convs:
414
- xt = F.leaky_relu(x, LRELU_SLOPE)
415
- if x_mask is not None:
416
- xt = xt * x_mask
417
- xt = c(xt)
418
- x = xt + x
419
- if x_mask is not None:
420
- x = x * x_mask
421
- return x
422
-
423
- def remove_weight_norm(self):
424
- for l in self.convs:
425
- remove_weight_norm(l)
426
-
427
-
428
- class Generator_Harm(torch.nn.Module):
429
- def __init__(self, hps):
430
- super(Generator_Harm, self).__init__()
431
- self.hps = hps
432
-
433
- self.prenet = Conv1d(hps.model.hidden_channels, hps.model.hidden_channels, 3, padding=1)
434
-
435
- self.net = ConvReluNorm(hps.model.hidden_channels,
436
- hps.model.hidden_channels,
437
- hps.model.hidden_channels,
438
- hps.model.kernel_size,
439
- 8,
440
- hps.model.p_dropout)
441
-
442
- # self.rnn = nn.LSTM(input_size=hps.model.hidden_channels,
443
- # hidden_size=hps.model.hidden_channels,
444
- # num_layers=1,
445
- # bias=True,
446
- # batch_first=True,
447
- # dropout=0.5,
448
- # bidirectional=True)
449
- self.postnet = Conv1d(hps.model.hidden_channels, hps.model.n_harmonic + 1, 3, padding=1)
450
-
451
- def forward(self, f0, harm, mask):
452
- pitch = f0.transpose(1, 2)
453
- harm = self.prenet(harm)
454
-
455
- harm = self.net(harm) * mask
456
- # harm = harm.transpose(1, 2)
457
- # harm, (hs, hc) = self.rnn(harm)
458
- # harm = harm.transpose(1, 2)
459
-
460
- harm = self.postnet(harm)
461
- harm = harm.transpose(1, 2)
462
- param = harm
463
-
464
- param = scale_function(param)
465
- total_amp = param[..., :1]
466
- amplitudes = param[..., 1:]
467
- amplitudes = remove_above_nyquist(
468
- amplitudes,
469
- pitch,
470
- self.hps.data.sampling_rate,
471
- )
472
- amplitudes /= amplitudes.sum(-1, keepdim=True)
473
- amplitudes *= total_amp
474
-
475
- amplitudes = upsample(amplitudes, self.hps.data.hop_length)
476
- pitch = upsample(pitch, self.hps.data.hop_length)
477
-
478
- n_harmonic = amplitudes.shape[-1]
479
- omega = torch.cumsum(2 * math.pi * pitch / self.hps.data.sampling_rate, 1)
480
- omegas = omega * torch.arange(1, n_harmonic + 1).to(omega)
481
- signal_harmonics = (torch.sin(omegas) * amplitudes)
482
- signal_harmonics = signal_harmonics.transpose(1, 2)
483
- return signal_harmonics
484
-
485
-
486
- class Generator(torch.nn.Module):
487
- def __init__(self, hps, initial_channel, resblock, resblock_kernel_sizes, resblock_dilation_sizes, upsample_rates,
488
- upsample_initial_channel, upsample_kernel_sizes, n_speakers=0, spk_channels=0):
489
- super(Generator, self).__init__()
490
- self.num_kernels = len(resblock_kernel_sizes)
491
- self.num_upsamples = len(upsample_rates)
492
- self.conv_pre = Conv1d(initial_channel, upsample_initial_channel, 7, 1, padding=3)
493
- self.upsample_rates = upsample_rates
494
- self.n_speakers = n_speakers
495
-
496
- resblock = modules.ResBlock1 if resblock == '1' else modules.R
497
-
498
- self.downs = nn.ModuleList()
499
- for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)):
500
- i = len(upsample_rates) - 1 - i
501
- u = upsample_rates[i]
502
- k = upsample_kernel_sizes[i]
503
- # print("down: ",upsample_initial_channel//(2**(i+1))," -> ", upsample_initial_channel//(2**i))
504
- self.downs.append(weight_norm(
505
- Conv1d(hps.model.n_harmonic + 2, hps.model.n_harmonic + 2,
506
- k, u, padding=k // 2)))
507
-
508
- self.resblocks_downs = nn.ModuleList()
509
- for i in range(len(self.downs)):
510
- j = len(upsample_rates) - 1 - i
511
- self.resblocks_downs.append(ResBlock3(hps.model.n_harmonic + 2, 3, (1, 3)))
512
-
513
- self.concat_pre = Conv1d(upsample_initial_channel + hps.model.n_harmonic + 2, upsample_initial_channel, 3, 1,
514
- padding=1)
515
- self.concat_conv = nn.ModuleList()
516
- for i in range(len(upsample_rates)):
517
- ch = upsample_initial_channel // (2 ** (i + 1))
518
- self.concat_conv.append(Conv1d(ch + hps.model.n_harmonic + 2, ch, 3, 1, padding=1, bias=False))
519
-
520
- self.ups = nn.ModuleList()
521
- for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)):
522
- self.ups.append(weight_norm(
523
- ConvTranspose1d(upsample_initial_channel // (2 ** i), upsample_initial_channel // (2 ** (i + 1)),
524
- k, u, padding=(k - u) // 2)))
525
-
526
- self.resblocks = nn.ModuleList()
527
- for i in range(len(self.ups)):
528
- ch = upsample_initial_channel // (2 ** (i + 1))
529
- for j, (k, d) in enumerate(zip(resblock_kernel_sizes, resblock_dilation_sizes)):
530
- self.resblocks.append(resblock(ch, k, d))
531
-
532
- self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False)
533
- self.ups.apply(init_weights)
534
-
535
- if self.n_speakers != 0:
536
- self.cond = nn.Conv1d(spk_channels, upsample_initial_channel, 1)
537
-
538
- def forward(self, x, ddsp, g=None):
539
-
540
- x = self.conv_pre(x)
541
-
542
- if g is not None:
543
- x = x + self.cond(g)
544
-
545
- se = ddsp
546
- res_features = [se]
547
- for i in range(self.num_upsamples):
548
- in_size = se.size(2)
549
- se = self.downs[i](se)
550
- se = self.resblocks_downs[i](se)
551
- up_rate = self.upsample_rates[self.num_upsamples - 1 - i]
552
- se = se[:, :, : in_size // up_rate]
553
- res_features.append(se)
554
-
555
- x = torch.cat([x, se], 1)
556
- x = self.concat_pre(x)
557
-
558
- for i in range(self.num_upsamples):
559
- x = F.leaky_relu(x, modules.LRELU_SLOPE)
560
- in_size = x.size(2)
561
- x = self.ups[i](x)
562
- # 保证维度正确,丢掉多余通道
563
- x = x[:, :, : in_size * self.upsample_rates[i]]
564
-
565
- x = torch.cat([x, res_features[self.num_upsamples - 1 - i]], 1)
566
- x = self.concat_conv[i](x)
567
-
568
- xs = None
569
- for j in range(self.num_kernels):
570
- if xs is None:
571
- xs = self.resblocks[i * self.num_kernels + j](x)
572
- else:
573
- xs += self.resblocks[i * self.num_kernels + j](x)
574
- x = xs / self.num_kernels
575
-
576
- x = F.leaky_relu(x)
577
- x = self.conv_post(x)
578
- x = torch.tanh(x)
579
-
580
- return x
581
-
582
- def remove_weight_norm(self):
583
- print('Removing weight norm...')
584
- for l in self.ups:
585
- remove_weight_norm(l)
586
- for l in self.resblocks:
587
- l.remove_weight_norm()
588
-
589
-
590
- class Generator_Noise(torch.nn.Module):
591
- def __init__(self, hps):
592
- super(Generator_Noise, self).__init__()
593
- self.hps = hps
594
- self.win_size = hps.data.win_size
595
- self.hop_size = hps.data.hop_length
596
- self.fft_size = hps.data.n_fft
597
- self.istft_pre = Conv1d(hps.model.hidden_channels, hps.model.hidden_channels, 3, padding=1)
598
-
599
- self.net = ConvReluNorm(hps.model.hidden_channels,
600
- hps.model.hidden_channels,
601
- hps.model.hidden_channels,
602
- hps.model.kernel_size,
603
- 8,
604
- hps.model.p_dropout)
605
-
606
- self.istft_amplitude = torch.nn.Conv1d(hps.model.hidden_channels, self.fft_size // 2 + 1, 1, 1)
607
- self.window = torch.hann_window(self.win_size)
608
-
609
- def forward(self, x, mask):
610
- istft_x = x
611
- istft_x = self.istft_pre(istft_x)
612
-
613
- istft_x = self.net(istft_x) * mask
614
-
615
- amp = self.istft_amplitude(istft_x).unsqueeze(-1)
616
- phase = (torch.rand(amp.shape) * 2 * 3.14 - 3.14).to(amp)
617
-
618
- real = amp * torch.cos(phase)
619
- imag = amp * torch.sin(phase)
620
- spec = torch.cat([real, imag], 3)
621
- istft_x = torch.istft(spec, self.fft_size, self.hop_size, self.win_size, self.window.to(amp), True,
622
- length=x.shape[2] * self.hop_size, return_complex=False)
623
-
624
- return istft_x.unsqueeze(1)
625
-
626
-
627
- class LayerNorm(nn.Module):
628
- def __init__(self, channels, eps=1e-5):
629
- super().__init__()
630
- self.channels = channels
631
- self.eps = eps
632
-
633
- self.gamma = nn.Parameter(torch.ones(channels))
634
- self.beta = nn.Parameter(torch.zeros(channels))
635
-
636
- def forward(self, x):
637
- x = x.transpose(1, -1)
638
- x = F.layer_norm(x, (self.channels,), self.gamma, self.beta, self.eps)
639
- return x.transpose(1, -1)
640
-
641
-
642
- class DiscriminatorP(torch.nn.Module):
643
- def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False):
644
- super(DiscriminatorP, self).__init__()
645
- self.period = period
646
- self.use_spectral_norm = use_spectral_norm
647
- norm_f = weight_norm if use_spectral_norm == False else spectral_norm
648
- self.convs = nn.ModuleList([
649
- norm_f(Conv2d(1, 32, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))),
650
- norm_f(Conv2d(32, 128, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))),
651
- norm_f(Conv2d(128, 512, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))),
652
- norm_f(Conv2d(512, 1024, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))),
653
- norm_f(Conv2d(1024, 1024, (kernel_size, 1), 1, padding=(get_padding(kernel_size, 1), 0))),
654
- ])
655
- self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0)))
656
-
657
- def forward(self, x):
658
- fmap = []
659
-
660
- # 1d to 2d
661
- b, c, t = x.shape
662
- if t % self.period != 0: # pad first
663
- n_pad = self.period - (t % self.period)
664
- x = F.pad(x, (0, n_pad), "reflect")
665
- t = t + n_pad
666
- x = x.view(b, c, t // self.period, self.period)
667
-
668
- for l in self.convs:
669
- x = l(x)
670
- x = F.leaky_relu(x, modules.LRELU_SLOPE)
671
- fmap.append(x)
672
- x = self.conv_post(x)
673
- fmap.append(x)
674
- x = torch.flatten(x, 1, -1)
675
-
676
- return x, fmap
677
-
678
-
679
- class DiscriminatorS(torch.nn.Module):
680
- def __init__(self, use_spectral_norm=False):
681
- super(DiscriminatorS, self).__init__()
682
- norm_f = weight_norm if use_spectral_norm == False else spectral_norm
683
- self.convs = nn.ModuleList([
684
- norm_f(Conv1d(1, 16, 15, 1, padding=7)),
685
- norm_f(Conv1d(16, 64, 41, 4, groups=4, padding=20)),
686
- norm_f(Conv1d(64, 256, 41, 4, groups=16, padding=20)),
687
- norm_f(Conv1d(256, 1024, 41, 4, groups=64, padding=20)),
688
- norm_f(Conv1d(1024, 1024, 41, 4, groups=256, padding=20)),
689
- norm_f(Conv1d(1024, 1024, 5, 1, padding=2)),
690
- ])
691
- self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1))
692
-
693
- def forward(self, x):
694
- fmap = []
695
-
696
- for l in self.convs:
697
- x = l(x)
698
- x = F.leaky_relu(x, modules.LRELU_SLOPE)
699
- fmap.append(x)
700
- x = self.conv_post(x)
701
- fmap.append(x)
702
- x = torch.flatten(x, 1, -1)
703
-
704
- return x, fmap
705
-
706
-
707
- class MultiFrequencyDiscriminator(nn.Module):
708
- def __init__(self,
709
- hop_lengths=[128, 256, 512],
710
- hidden_channels=[256, 512, 512],
711
- domain='double', mel_scale=True):
712
- super(MultiFrequencyDiscriminator, self).__init__()
713
-
714
- self.stfts = nn.ModuleList([
715
- TorchSTFT(fft_size=x * 4, hop_size=x, win_size=x * 4,
716
- normalized=True, domain=domain, mel_scale=mel_scale)
717
- for x in hop_lengths])
718
-
719
- self.domain = domain
720
- if domain == 'double':
721
- self.discriminators = nn.ModuleList([
722
- BaseFrequenceDiscriminator(2, c)
723
- for x, c in zip(hop_lengths, hidden_channels)])
724
- else:
725
- self.discriminators = nn.ModuleList([
726
- BaseFrequenceDiscriminator(1, c)
727
- for x, c in zip(hop_lengths, hidden_channels)])
728
-
729
- def forward(self, x):
730
- scores, feats = list(), list()
731
- for stft, layer in zip(self.stfts, self.discriminators):
732
- # print(stft)
733
- mag, phase = stft.transform(x.squeeze())
734
- if self.domain == 'double':
735
- mag = torch.stack(torch.chunk(mag, 2, dim=1), dim=1)
736
- else:
737
- mag = mag.unsqueeze(1)
738
-
739
- score, feat = layer(mag)
740
- scores.append(score)
741
- feats.append(feat)
742
- return scores, feats
743
-
744
-
745
- class BaseFrequenceDiscriminator(nn.Module):
746
- def __init__(self, in_channels, hidden_channels=512):
747
- super(BaseFrequenceDiscriminator, self).__init__()
748
-
749
- self.discriminator = nn.ModuleList()
750
- self.discriminator += [
751
- nn.Sequential(
752
- nn.ReflectionPad2d((1, 1, 1, 1)),
753
- nn.utils.weight_norm(nn.Conv2d(
754
- in_channels, hidden_channels // 32,
755
- kernel_size=(3, 3), stride=(1, 1)))
756
- ),
757
- nn.Sequential(
758
- nn.LeakyReLU(0.2, True),
759
- nn.ReflectionPad2d((1, 1, 1, 1)),
760
- nn.utils.weight_norm(nn.Conv2d(
761
- hidden_channels // 32, hidden_channels // 16,
762
- kernel_size=(3, 3), stride=(2, 2)))
763
- ),
764
- nn.Sequential(
765
- nn.LeakyReLU(0.2, True),
766
- nn.ReflectionPad2d((1, 1, 1, 1)),
767
- nn.utils.weight_norm(nn.Conv2d(
768
- hidden_channels // 16, hidden_channels // 8,
769
- kernel_size=(3, 3), stride=(1, 1)))
770
- ),
771
- nn.Sequential(
772
- nn.LeakyReLU(0.2, True),
773
- nn.ReflectionPad2d((1, 1, 1, 1)),
774
- nn.utils.weight_norm(nn.Conv2d(
775
- hidden_channels // 8, hidden_channels // 4,
776
- kernel_size=(3, 3), stride=(2, 2)))
777
- ),
778
- nn.Sequential(
779
- nn.LeakyReLU(0.2, True),
780
- nn.ReflectionPad2d((1, 1, 1, 1)),
781
- nn.utils.weight_norm(nn.Conv2d(
782
- hidden_channels // 4, hidden_channels // 2,
783
- kernel_size=(3, 3), stride=(1, 1)))
784
- ),
785
- nn.Sequential(
786
- nn.LeakyReLU(0.2, True),
787
- nn.ReflectionPad2d((1, 1, 1, 1)),
788
- nn.utils.weight_norm(nn.Conv2d(
789
- hidden_channels // 2, hidden_channels,
790
- kernel_size=(3, 3), stride=(2, 2)))
791
- ),
792
- nn.Sequential(
793
- nn.LeakyReLU(0.2, True),
794
- nn.ReflectionPad2d((1, 1, 1, 1)),
795
- nn.utils.weight_norm(nn.Conv2d(
796
- hidden_channels, 1,
797
- kernel_size=(3, 3), stride=(1, 1)))
798
- )
799
- ]
800
-
801
- def forward(self, x):
802
- hiddens = []
803
- for layer in self.discriminator:
804
- x = layer(x)
805
- hiddens.append(x)
806
- return x, hiddens[-1]
807
-
808
-
809
- class Discriminator(torch.nn.Module):
810
- def __init__(self, hps, use_spectral_norm=False):
811
- super(Discriminator, self).__init__()
812
- periods = [2, 3, 5, 7, 11]
813
-
814
- discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)]
815
- discs = discs + [DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods]
816
- self.discriminators = nn.ModuleList(discs)
817
- # self.disc_multfrequency = MultiFrequencyDiscriminator(hop_lengths=[int(hps.data.sampling_rate * 2.5 / 1000),
818
- # int(hps.data.sampling_rate * 5 / 1000),
819
- # int(hps.data.sampling_rate * 7.5 / 1000),
820
- # int(hps.data.sampling_rate * 10 / 1000),
821
- # int(hps.data.sampling_rate * 12.5 / 1000),
822
- # int(hps.data.sampling_rate * 15 / 1000)],
823
- # hidden_channels=[256, 256, 256, 256, 256])
824
-
825
- def forward(self, y, y_hat):
826
- y_d_rs = []
827
- y_d_gs = []
828
- fmap_rs = []
829
- fmap_gs = []
830
- for i, d in enumerate(self.discriminators):
831
- y_d_r, fmap_r = d(y)
832
- y_d_g, fmap_g = d(y_hat)
833
- y_d_rs.append(y_d_r)
834
- y_d_gs.append(y_d_g)
835
- fmap_rs.append(fmap_r)
836
- fmap_gs.append(fmap_g)
837
- # scores_r, fmaps_r = self.disc_multfrequency(y)
838
- # scores_g, fmaps_g = self.disc_multfrequency(y_hat)
839
- # for i in range(len(scores_r)):
840
- # y_d_rs.append(scores_r[i])
841
- # y_d_gs.append(scores_g[i])
842
- # fmap_rs.append(fmaps_r[i])
843
- # fmap_gs.append(fmaps_g[i])
844
- return y_d_rs, y_d_gs, fmap_rs, fmap_gs
845
-
846
-
847
- class SynthesizerTrn(nn.Module):
848
- """
849
- Model
850
- """
851
-
852
- def __init__(self, hps):
853
- super().__init__()
854
- self.hps = hps
855
-
856
- self.text_encoder = TextEncoder(
857
- hps.data.c_dim,
858
- hps.model.prior_hidden_channels,
859
- hps.model.prior_hidden_channels,
860
- hps.model.prior_filter_channels,
861
- hps.model.prior_n_heads,
862
- hps.model.prior_n_layers,
863
- hps.model.prior_kernel_size,
864
- hps.model.prior_p_dropout)
865
-
866
- self.decoder = PriorDecoder(
867
- hps.model.hidden_channels * 2,
868
- hps.model.prior_hidden_channels,
869
- hps.model.prior_filter_channels,
870
- hps.model.prior_n_heads,
871
- hps.model.prior_n_layers,
872
- hps.model.prior_kernel_size,
873
- hps.model.prior_p_dropout,
874
- n_speakers=hps.data.n_speakers,
875
- spk_channels=hps.model.spk_channels
876
- )
877
-
878
- self.f0_decoder = F0Decoder(
879
- 1,
880
- hps.model.prior_hidden_channels,
881
- hps.model.prior_filter_channels,
882
- hps.model.prior_n_heads,
883
- hps.model.prior_n_layers,
884
- hps.model.prior_kernel_size,
885
- hps.model.prior_p_dropout,
886
- n_speakers=hps.data.n_speakers,
887
- spk_channels=hps.model.spk_channels
888
- )
889
-
890
- self.mel_decoder = Decoder(
891
- hps.data.acoustic_dim,
892
- hps.model.prior_hidden_channels,
893
- hps.model.prior_filter_channels,
894
- hps.model.prior_n_heads,
895
- hps.model.prior_n_layers,
896
- hps.model.prior_kernel_size,
897
- hps.model.prior_p_dropout,
898
- n_speakers=hps.data.n_speakers,
899
- spk_channels=hps.model.spk_channels
900
- )
901
-
902
- self.posterior_encoder = PosteriorEncoder(
903
- hps,
904
- hps.data.acoustic_dim,
905
- hps.model.hidden_channels,
906
- hps.model.hidden_channels, 3, 1, 8)
907
-
908
- self.dropout = nn.Dropout(0.2)
909
-
910
- self.LR = LengthRegulator()
911
-
912
- self.dec = Generator(hps,
913
- hps.model.hidden_channels,
914
- hps.model.resblock,
915
- hps.model.resblock_kernel_sizes,
916
- hps.model.resblock_dilation_sizes,
917
- hps.model.upsample_rates,
918
- hps.model.upsample_initial_channel,
919
- hps.model.upsample_kernel_sizes,
920
- n_speakers=hps.data.n_speakers,
921
- spk_channels=hps.model.spk_channels)
922
-
923
- self.dec_harm = Generator_Harm(hps)
924
-
925
- self.dec_noise = Generator_Noise(hps)
926
-
927
- self.f0_prenet = nn.Conv1d(1, hps.model.prior_hidden_channels , 3, padding=1)
928
- self.energy_prenet = nn.Conv1d(1, hps.model.prior_hidden_channels , 3, padding=1)
929
- self.mel_prenet = nn.Conv1d(hps.data.acoustic_dim, hps.model.prior_hidden_channels , 3, padding=1)
930
-
931
- if hps.data.n_speakers > 1:
932
- self.emb_spk = nn.Embedding(hps.data.n_speakers, hps.model.spk_channels)
933
- self.flow = modules.ResidualCouplingBlock(hps.model.prior_hidden_channels, hps.model.hidden_channels, 5, 1, 4,n_speakers=hps.data.n_speakers, gin_channels=hps.model.spk_channels)
934
-
935
- def forward(self, c, c_lengths, F0, uv, mel, bn_lengths, spk_id=None):
936
- if self.hps.data.n_speakers > 0:
937
- g = self.emb_spk(spk_id).unsqueeze(-1) # [b, h, 1]
938
- else:
939
- g = None
940
-
941
- # Encoder
942
- decoder_input, x_mask = self.text_encoder(c, c_lengths)
943
-
944
- LF0 = 2595. * torch.log10(1. + F0 / 700.)
945
- LF0 = LF0 / 500
946
- norm_f0 = utils.normalize_f0(LF0,x_mask, uv.squeeze(1),random_scale=True)
947
- pred_lf0, predict_bn_mask = self.f0_decoder(decoder_input, norm_f0, bn_lengths, spk_emb=g)
948
- # print(pred_lf0)
949
- loss_f0 = F.mse_loss(pred_lf0, LF0)
950
-
951
- # aam
952
- predict_mel, predict_bn_mask = self.mel_decoder(decoder_input + self.f0_prenet(LF0), bn_lengths, spk_emb=g)
953
-
954
- predict_energy = predict_mel.detach().sum(1).unsqueeze(1) / self.hps.data.acoustic_dim
955
-
956
- decoder_input = decoder_input + \
957
- self.f0_prenet(LF0) + \
958
- self.energy_prenet(predict_energy) + \
959
- self.mel_prenet(predict_mel.detach())
960
- decoder_output, predict_bn_mask = self.decoder(decoder_input, bn_lengths, spk_emb=g)
961
-
962
- prior_info = decoder_output
963
- m_p = prior_info[:, :self.hps.model.hidden_channels, :]
964
- logs_p = prior_info[:, self.hps.model.hidden_channels:, :]
965
-
966
- # posterior
967
- posterior, y_mask = self.posterior_encoder(mel, bn_lengths,g=g)
968
-
969
- m_q = posterior[:, :self.hps.model.hidden_channels, :]
970
- logs_q = posterior[:, self.hps.model.hidden_channels:, :]
971
- z = (m_q + torch.randn_like(m_q) * torch.exp(logs_q)) * y_mask
972
- z_p = self.flow(z, y_mask, g=g)
973
-
974
- # kl loss
975
- loss_kl = kl_loss(z_p, logs_q, m_p, logs_p, y_mask)
976
-
977
- p_z = z
978
- p_z = self.dropout(p_z)
979
-
980
- pitch = upsample(F0.transpose(1, 2), self.hps.data.hop_length)
981
- omega = torch.cumsum(2 * math.pi * pitch / self.hps.data.sampling_rate, 1)
982
- sin = torch.sin(omega).transpose(1, 2)
983
-
984
- # dsp synthesize
985
- noise_x = self.dec_noise(p_z, y_mask)
986
- harm_x = self.dec_harm(F0, p_z, y_mask)
987
-
988
- # dsp waveform
989
- dsp_o = torch.cat([harm_x, noise_x], axis=1)
990
-
991
- decoder_condition = torch.cat([harm_x, noise_x, sin], axis=1)
992
-
993
- # dsp based HiFiGAN vocoder
994
- x_slice, ids_slice = commons.rand_slice_segments(p_z, bn_lengths,
995
- self.hps.train.segment_size // self.hps.data.hop_length)
996
- F0_slice = commons.slice_segments(F0, ids_slice, self.hps.train.segment_size // self.hps.data.hop_length)
997
- dsp_slice = commons.slice_segments(dsp_o, ids_slice * self.hps.data.hop_length, self.hps.train.segment_size)
998
- condition_slice = commons.slice_segments(decoder_condition, ids_slice * self.hps.data.hop_length,
999
- self.hps.train.segment_size)
1000
- o = self.dec(x_slice, condition_slice.detach(), g=g)
1001
-
1002
- return o, ids_slice, LF0 * predict_bn_mask, dsp_slice.sum(1), loss_kl, \
1003
- predict_mel, predict_bn_mask, pred_lf0, loss_f0, norm_f0
1004
-
1005
- def infer(self, c, g=None, f0=None,uv=None, predict_f0=False, noice_scale=0.3):
1006
- if len(g.shape) == 2:
1007
- g = g.squeeze(0)
1008
- if len(f0.shape) == 2:
1009
- f0 = f0.unsqueeze(0)
1010
- g = self.emb_spk(g).unsqueeze(-1) # [b, h, 1]
1011
-
1012
- c_lengths = (torch.ones(c.size(0)) * c.size(-1)).to(c.device)
1013
-
1014
- # Encoder
1015
- decoder_input, x_mask = self.text_encoder(c, c_lengths)
1016
- y_lengths = c_lengths
1017
-
1018
- LF0 = 2595. * torch.log10(1. + f0 / 700.)
1019
- LF0 = LF0 / 500
1020
-
1021
- if predict_f0:
1022
- norm_f0 = utils.normalize_f0(LF0, x_mask, uv.squeeze(1))
1023
- pred_lf0, predict_bn_mask = self.f0_decoder(decoder_input, norm_f0, y_lengths, spk_emb=g)
1024
- pred_f0 = 700 * ( torch.pow(10, pred_lf0 * 500 / 2595) - 1)
1025
- f0 = pred_f0
1026
- LF0 = pred_lf0
1027
-
1028
- # aam
1029
- predict_mel, predict_bn_mask = self.mel_decoder(decoder_input + self.f0_prenet(LF0), y_lengths, spk_emb=g)
1030
- predict_energy = predict_mel.sum(1).unsqueeze(1) / self.hps.data.acoustic_dim
1031
-
1032
- decoder_input = decoder_input + \
1033
- self.f0_prenet(LF0) + \
1034
- self.energy_prenet(predict_energy) + \
1035
- self.mel_prenet(predict_mel)
1036
- decoder_output, y_mask = self.decoder(decoder_input, y_lengths, spk_emb=g)
1037
-
1038
- prior_info = decoder_output
1039
-
1040
- m_p = prior_info[:, :self.hps.model.hidden_channels, :]
1041
- logs_p = prior_info[:, self.hps.model.hidden_channels:, :]
1042
- z_p = m_p + torch.randn_like(m_p) * torch.exp(logs_p) * noice_scale
1043
- z = self.flow(z_p, y_mask, g=g, reverse=True)
1044
-
1045
- prior_z = z
1046
-
1047
- noise_x = self.dec_noise(prior_z, y_mask)
1048
-
1049
- harm_x = self.dec_harm(f0, prior_z, y_mask)
1050
-
1051
- pitch = upsample(f0.transpose(1, 2), self.hps.data.hop_length)
1052
- omega = torch.cumsum(2 * math.pi * pitch / self.hps.data.sampling_rate, 1)
1053
- sin = torch.sin(omega).transpose(1, 2)
1054
-
1055
- decoder_condition = torch.cat([harm_x, noise_x, sin], axis=1)
1056
-
1057
- # dsp based HiFiGAN vocoder
1058
- o = self.dec(prior_z, decoder_condition, g=g)
1059
-
1060
- return o, harm_x.sum(1).unsqueeze(1), noise_x, f0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
modules/__init__.py DELETED
File without changes
modules/attentions.py DELETED
@@ -1,349 +0,0 @@
1
- import copy
2
- import math
3
- import numpy as np
4
- import torch
5
- from torch import nn
6
- from torch.nn import functional as F
7
-
8
- import modules.commons as commons
9
- import modules.modules as modules
10
- from modules.modules import LayerNorm
11
-
12
-
13
- class FFT(nn.Module):
14
- def __init__(self, hidden_channels, filter_channels, n_heads, n_layers=1, kernel_size=1, p_dropout=0.,
15
- proximal_bias=False, proximal_init=True, **kwargs):
16
- super().__init__()
17
- self.hidden_channels = hidden_channels
18
- self.filter_channels = filter_channels
19
- self.n_heads = n_heads
20
- self.n_layers = n_layers
21
- self.kernel_size = kernel_size
22
- self.p_dropout = p_dropout
23
- self.proximal_bias = proximal_bias
24
- self.proximal_init = proximal_init
25
-
26
- self.drop = nn.Dropout(p_dropout)
27
- self.self_attn_layers = nn.ModuleList()
28
- self.norm_layers_0 = nn.ModuleList()
29
- self.ffn_layers = nn.ModuleList()
30
- self.norm_layers_1 = nn.ModuleList()
31
- for i in range(self.n_layers):
32
- self.self_attn_layers.append(
33
- MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout, proximal_bias=proximal_bias,
34
- proximal_init=proximal_init))
35
- self.norm_layers_0.append(LayerNorm(hidden_channels))
36
- self.ffn_layers.append(
37
- FFN(hidden_channels, hidden_channels, filter_channels, kernel_size, p_dropout=p_dropout, causal=True))
38
- self.norm_layers_1.append(LayerNorm(hidden_channels))
39
-
40
- def forward(self, x, x_mask):
41
- """
42
- x: decoder input
43
- h: encoder output
44
- """
45
- self_attn_mask = commons.subsequent_mask(x_mask.size(2)).to(device=x.device, dtype=x.dtype)
46
- x = x * x_mask
47
- for i in range(self.n_layers):
48
- y = self.self_attn_layers[i](x, x, self_attn_mask)
49
- y = self.drop(y)
50
- x = self.norm_layers_0[i](x + y)
51
-
52
- y = self.ffn_layers[i](x, x_mask)
53
- y = self.drop(y)
54
- x = self.norm_layers_1[i](x + y)
55
- x = x * x_mask
56
- return x
57
-
58
-
59
- class Encoder(nn.Module):
60
- def __init__(self, hidden_channels, filter_channels, n_heads, n_layers, kernel_size=1, p_dropout=0., window_size=4, **kwargs):
61
- super().__init__()
62
- self.hidden_channels = hidden_channels
63
- self.filter_channels = filter_channels
64
- self.n_heads = n_heads
65
- self.n_layers = n_layers
66
- self.kernel_size = kernel_size
67
- self.p_dropout = p_dropout
68
- self.window_size = window_size
69
-
70
- self.drop = nn.Dropout(p_dropout)
71
- self.attn_layers = nn.ModuleList()
72
- self.norm_layers_1 = nn.ModuleList()
73
- self.ffn_layers = nn.ModuleList()
74
- self.norm_layers_2 = nn.ModuleList()
75
- for i in range(self.n_layers):
76
- self.attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout, window_size=window_size))
77
- self.norm_layers_1.append(LayerNorm(hidden_channels))
78
- self.ffn_layers.append(FFN(hidden_channels, hidden_channels, filter_channels, kernel_size, p_dropout=p_dropout))
79
- self.norm_layers_2.append(LayerNorm(hidden_channels))
80
-
81
- def forward(self, x, x_mask):
82
- attn_mask = x_mask.unsqueeze(2) * x_mask.unsqueeze(-1)
83
- x = x * x_mask
84
- for i in range(self.n_layers):
85
- y = self.attn_layers[i](x, x, attn_mask)
86
- y = self.drop(y)
87
- x = self.norm_layers_1[i](x + y)
88
-
89
- y = self.ffn_layers[i](x, x_mask)
90
- y = self.drop(y)
91
- x = self.norm_layers_2[i](x + y)
92
- x = x * x_mask
93
- return x
94
-
95
-
96
- class Decoder(nn.Module):
97
- def __init__(self, hidden_channels, filter_channels, n_heads, n_layers, kernel_size=1, p_dropout=0., proximal_bias=False, proximal_init=True, **kwargs):
98
- super().__init__()
99
- self.hidden_channels = hidden_channels
100
- self.filter_channels = filter_channels
101
- self.n_heads = n_heads
102
- self.n_layers = n_layers
103
- self.kernel_size = kernel_size
104
- self.p_dropout = p_dropout
105
- self.proximal_bias = proximal_bias
106
- self.proximal_init = proximal_init
107
-
108
- self.drop = nn.Dropout(p_dropout)
109
- self.self_attn_layers = nn.ModuleList()
110
- self.norm_layers_0 = nn.ModuleList()
111
- self.encdec_attn_layers = nn.ModuleList()
112
- self.norm_layers_1 = nn.ModuleList()
113
- self.ffn_layers = nn.ModuleList()
114
- self.norm_layers_2 = nn.ModuleList()
115
- for i in range(self.n_layers):
116
- self.self_attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout, proximal_bias=proximal_bias, proximal_init=proximal_init))
117
- self.norm_layers_0.append(LayerNorm(hidden_channels))
118
- self.encdec_attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout))
119
- self.norm_layers_1.append(LayerNorm(hidden_channels))
120
- self.ffn_layers.append(FFN(hidden_channels, hidden_channels, filter_channels, kernel_size, p_dropout=p_dropout, causal=True))
121
- self.norm_layers_2.append(LayerNorm(hidden_channels))
122
-
123
- def forward(self, x, x_mask, h, h_mask):
124
- """
125
- x: decoder input
126
- h: encoder output
127
- """
128
- self_attn_mask = commons.subsequent_mask(x_mask.size(2)).to(device=x.device, dtype=x.dtype)
129
- encdec_attn_mask = h_mask.unsqueeze(2) * x_mask.unsqueeze(-1)
130
- x = x * x_mask
131
- for i in range(self.n_layers):
132
- y = self.self_attn_layers[i](x, x, self_attn_mask)
133
- y = self.drop(y)
134
- x = self.norm_layers_0[i](x + y)
135
-
136
- y = self.encdec_attn_layers[i](x, h, encdec_attn_mask)
137
- y = self.drop(y)
138
- x = self.norm_layers_1[i](x + y)
139
-
140
- y = self.ffn_layers[i](x, x_mask)
141
- y = self.drop(y)
142
- x = self.norm_layers_2[i](x + y)
143
- x = x * x_mask
144
- return x
145
-
146
-
147
- class MultiHeadAttention(nn.Module):
148
- def __init__(self, channels, out_channels, n_heads, p_dropout=0., window_size=None, heads_share=True, block_length=None, proximal_bias=False, proximal_init=False):
149
- super().__init__()
150
- assert channels % n_heads == 0
151
-
152
- self.channels = channels
153
- self.out_channels = out_channels
154
- self.n_heads = n_heads
155
- self.p_dropout = p_dropout
156
- self.window_size = window_size
157
- self.heads_share = heads_share
158
- self.block_length = block_length
159
- self.proximal_bias = proximal_bias
160
- self.proximal_init = proximal_init
161
- self.attn = None
162
-
163
- self.k_channels = channels // n_heads
164
- self.conv_q = nn.Conv1d(channels, channels, 1)
165
- self.conv_k = nn.Conv1d(channels, channels, 1)
166
- self.conv_v = nn.Conv1d(channels, channels, 1)
167
- self.conv_o = nn.Conv1d(channels, out_channels, 1)
168
- self.drop = nn.Dropout(p_dropout)
169
-
170
- if window_size is not None:
171
- n_heads_rel = 1 if heads_share else n_heads
172
- rel_stddev = self.k_channels**-0.5
173
- self.emb_rel_k = nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev)
174
- self.emb_rel_v = nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev)
175
-
176
- nn.init.xavier_uniform_(self.conv_q.weight)
177
- nn.init.xavier_uniform_(self.conv_k.weight)
178
- nn.init.xavier_uniform_(self.conv_v.weight)
179
- if proximal_init:
180
- with torch.no_grad():
181
- self.conv_k.weight.copy_(self.conv_q.weight)
182
- self.conv_k.bias.copy_(self.conv_q.bias)
183
-
184
- def forward(self, x, c, attn_mask=None):
185
- q = self.conv_q(x)
186
- k = self.conv_k(c)
187
- v = self.conv_v(c)
188
-
189
- x, self.attn = self.attention(q, k, v, mask=attn_mask)
190
-
191
- x = self.conv_o(x)
192
- return x
193
-
194
- def attention(self, query, key, value, mask=None):
195
- # reshape [b, d, t] -> [b, n_h, t, d_k]
196
- b, d, t_s, t_t = (*key.size(), query.size(2))
197
- query = query.view(b, self.n_heads, self.k_channels, t_t).transpose(2, 3)
198
- key = key.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3)
199
- value = value.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3)
200
-
201
- scores = torch.matmul(query / math.sqrt(self.k_channels), key.transpose(-2, -1))
202
- if self.window_size is not None:
203
- assert t_s == t_t, "Relative attention is only available for self-attention."
204
- key_relative_embeddings = self._get_relative_embeddings(self.emb_rel_k, t_s)
205
- rel_logits = self._matmul_with_relative_keys(query /math.sqrt(self.k_channels), key_relative_embeddings)
206
- scores_local = self._relative_position_to_absolute_position(rel_logits)
207
- scores = scores + scores_local
208
- if self.proximal_bias:
209
- assert t_s == t_t, "Proximal bias is only available for self-attention."
210
- scores = scores + self._attention_bias_proximal(t_s).to(device=scores.device, dtype=scores.dtype)
211
- if mask is not None:
212
- scores = scores.masked_fill(mask == 0, -1e4)
213
- if self.block_length is not None:
214
- assert t_s == t_t, "Local attention is only available for self-attention."
215
- block_mask = torch.ones_like(scores).triu(-self.block_length).tril(self.block_length)
216
- scores = scores.masked_fill(block_mask == 0, -1e4)
217
- p_attn = F.softmax(scores, dim=-1) # [b, n_h, t_t, t_s]
218
- p_attn = self.drop(p_attn)
219
- output = torch.matmul(p_attn, value)
220
- if self.window_size is not None:
221
- relative_weights = self._absolute_position_to_relative_position(p_attn)
222
- value_relative_embeddings = self._get_relative_embeddings(self.emb_rel_v, t_s)
223
- output = output + self._matmul_with_relative_values(relative_weights, value_relative_embeddings)
224
- output = output.transpose(2, 3).contiguous().view(b, d, t_t) # [b, n_h, t_t, d_k] -> [b, d, t_t]
225
- return output, p_attn
226
-
227
- def _matmul_with_relative_values(self, x, y):
228
- """
229
- x: [b, h, l, m]
230
- y: [h or 1, m, d]
231
- ret: [b, h, l, d]
232
- """
233
- ret = torch.matmul(x, y.unsqueeze(0))
234
- return ret
235
-
236
- def _matmul_with_relative_keys(self, x, y):
237
- """
238
- x: [b, h, l, d]
239
- y: [h or 1, m, d]
240
- ret: [b, h, l, m]
241
- """
242
- ret = torch.matmul(x, y.unsqueeze(0).transpose(-2, -1))
243
- return ret
244
-
245
- def _get_relative_embeddings(self, relative_embeddings, length):
246
- max_relative_position = 2 * self.window_size + 1
247
- # Pad first before slice to avoid using cond ops.
248
- pad_length = max(length - (self.window_size + 1), 0)
249
- slice_start_position = max((self.window_size + 1) - length, 0)
250
- slice_end_position = slice_start_position + 2 * length - 1
251
- if pad_length > 0:
252
- padded_relative_embeddings = F.pad(
253
- relative_embeddings,
254
- commons.convert_pad_shape([[0, 0], [pad_length, pad_length], [0, 0]]))
255
- else:
256
- padded_relative_embeddings = relative_embeddings
257
- used_relative_embeddings = padded_relative_embeddings[:,slice_start_position:slice_end_position]
258
- return used_relative_embeddings
259
-
260
- def _relative_position_to_absolute_position(self, x):
261
- """
262
- x: [b, h, l, 2*l-1]
263
- ret: [b, h, l, l]
264
- """
265
- batch, heads, length, _ = x.size()
266
- # Concat columns of pad to shift from relative to absolute indexing.
267
- x = F.pad(x, commons.convert_pad_shape([[0,0],[0,0],[0,0],[0,1]]))
268
-
269
- # Concat extra elements so to add up to shape (len+1, 2*len-1).
270
- x_flat = x.view([batch, heads, length * 2 * length])
271
- x_flat = F.pad(x_flat, commons.convert_pad_shape([[0,0],[0,0],[0,length-1]]))
272
-
273
- # Reshape and slice out the padded elements.
274
- x_final = x_flat.view([batch, heads, length+1, 2*length-1])[:, :, :length, length-1:]
275
- return x_final
276
-
277
- def _absolute_position_to_relative_position(self, x):
278
- """
279
- x: [b, h, l, l]
280
- ret: [b, h, l, 2*l-1]
281
- """
282
- batch, heads, length, _ = x.size()
283
- # padd along column
284
- x = F.pad(x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, length-1]]))
285
- x_flat = x.view([batch, heads, length**2 + length*(length -1)])
286
- # add 0's in the beginning that will skew the elements after reshape
287
- x_flat = F.pad(x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [length, 0]]))
288
- x_final = x_flat.view([batch, heads, length, 2*length])[:,:,:,1:]
289
- return x_final
290
-
291
- def _attention_bias_proximal(self, length):
292
- """Bias for self-attention to encourage attention to close positions.
293
- Args:
294
- length: an integer scalar.
295
- Returns:
296
- a Tensor with shape [1, 1, length, length]
297
- """
298
- r = torch.arange(length, dtype=torch.float32)
299
- diff = torch.unsqueeze(r, 0) - torch.unsqueeze(r, 1)
300
- return torch.unsqueeze(torch.unsqueeze(-torch.log1p(torch.abs(diff)), 0), 0)
301
-
302
-
303
- class FFN(nn.Module):
304
- def __init__(self, in_channels, out_channels, filter_channels, kernel_size, p_dropout=0., activation=None, causal=False):
305
- super().__init__()
306
- self.in_channels = in_channels
307
- self.out_channels = out_channels
308
- self.filter_channels = filter_channels
309
- self.kernel_size = kernel_size
310
- self.p_dropout = p_dropout
311
- self.activation = activation
312
- self.causal = causal
313
-
314
- if causal:
315
- self.padding = self._causal_padding
316
- else:
317
- self.padding = self._same_padding
318
-
319
- self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size)
320
- self.conv_2 = nn.Conv1d(filter_channels, out_channels, kernel_size)
321
- self.drop = nn.Dropout(p_dropout)
322
-
323
- def forward(self, x, x_mask):
324
- x = self.conv_1(self.padding(x * x_mask))
325
- if self.activation == "gelu":
326
- x = x * torch.sigmoid(1.702 * x)
327
- else:
328
- x = torch.relu(x)
329
- x = self.drop(x)
330
- x = self.conv_2(self.padding(x * x_mask))
331
- return x * x_mask
332
-
333
- def _causal_padding(self, x):
334
- if self.kernel_size == 1:
335
- return x
336
- pad_l = self.kernel_size - 1
337
- pad_r = 0
338
- padding = [[0, 0], [0, 0], [pad_l, pad_r]]
339
- x = F.pad(x, commons.convert_pad_shape(padding))
340
- return x
341
-
342
- def _same_padding(self, x):
343
- if self.kernel_size == 1:
344
- return x
345
- pad_l = (self.kernel_size - 1) // 2
346
- pad_r = self.kernel_size // 2
347
- padding = [[0, 0], [0, 0], [pad_l, pad_r]]
348
- x = F.pad(x, commons.convert_pad_shape(padding))
349
- return x
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
modules/audio.py DELETED
@@ -1,99 +0,0 @@
1
- import numpy as np
2
- from numpy import linalg as LA
3
- import librosa
4
- from scipy.io import wavfile
5
- import soundfile as sf
6
- import librosa.filters
7
-
8
-
9
- def load_wav(wav_path, raw_sr, target_sr=16000, win_size=800, hop_size=200):
10
- audio = librosa.core.load(wav_path, sr=raw_sr)[0]
11
- if raw_sr != target_sr:
12
- audio = librosa.core.resample(audio,
13
- raw_sr,
14
- target_sr,
15
- res_type='kaiser_best')
16
- target_length = (audio.size // hop_size +
17
- win_size // hop_size) * hop_size
18
- pad_len = (target_length - audio.size) // 2
19
- if audio.size % 2 == 0:
20
- audio = np.pad(audio, (pad_len, pad_len), mode='reflect')
21
- else:
22
- audio = np.pad(audio, (pad_len, pad_len + 1), mode='reflect')
23
- return audio
24
-
25
-
26
- def save_wav(wav, path, sample_rate, norm=False):
27
- if norm:
28
- wav *= 32767 / max(0.01, np.max(np.abs(wav)))
29
- wavfile.write(path, sample_rate, wav.astype(np.int16))
30
- else:
31
- sf.write(path, wav, sample_rate)
32
-
33
-
34
- _mel_basis = None
35
- _inv_mel_basis = None
36
-
37
-
38
- def _build_mel_basis(hparams):
39
- assert hparams.fmax <= hparams.sampling_rate // 2
40
- return librosa.filters.mel(hparams.sampling_rate,
41
- hparams.n_fft,
42
- n_mels=hparams.acoustic_dim,
43
- fmin=hparams.fmin,
44
- fmax=hparams.fmax)
45
-
46
-
47
- def _linear_to_mel(spectogram, hparams):
48
- global _mel_basis
49
- if _mel_basis is None:
50
- _mel_basis = _build_mel_basis(hparams)
51
- return np.dot(_mel_basis, spectogram)
52
-
53
-
54
- def _mel_to_linear(mel_spectrogram, hparams):
55
- global _inv_mel_basis
56
- if _inv_mel_basis is None:
57
- _inv_mel_basis = np.linalg.pinv(_build_mel_basis(hparams))
58
- return np.maximum(1e-10, np.dot(_inv_mel_basis, mel_spectrogram))
59
-
60
-
61
- def _stft(y, hparams):
62
- return librosa.stft(y=y,
63
- n_fft=hparams.n_fft,
64
- hop_length=hparams.hop_length,
65
- win_length=hparams.win_size)
66
-
67
-
68
- def _amp_to_db(x, hparams):
69
- min_level = np.exp(hparams.min_level_db / 20 * np.log(10))
70
- return 20 * np.log10(np.maximum(min_level, x))
71
-
72
- def _normalize(S, hparams):
73
- return hparams.max_abs_value * np.clip(((S - hparams.min_db) /
74
- (-hparams.min_db)), 0, 1)
75
-
76
- def _db_to_amp(x):
77
- return np.power(10.0, (x) * 0.05)
78
-
79
-
80
- def _stft(y, hparams):
81
- return librosa.stft(y=y,
82
- n_fft=hparams.n_fft,
83
- hop_length=hparams.hop_length,
84
- win_length=hparams.win_size)
85
-
86
-
87
- def _istft(y, hparams):
88
- return librosa.istft(y,
89
- hop_length=hparams.hop_length,
90
- win_length=hparams.win_size)
91
-
92
-
93
- def melspectrogram(wav, hparams):
94
- D = _stft(wav, hparams)
95
- S = _amp_to_db(_linear_to_mel(np.abs(D), hparams),
96
- hparams) - hparams.ref_level_db
97
- return _normalize(S, hparams)
98
-
99
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
modules/commons.py DELETED
@@ -1,188 +0,0 @@
1
- import math
2
- import numpy as np
3
- import torch
4
- from torch import nn
5
- from torch.nn import functional as F
6
-
7
- def slice_pitch_segments(x, ids_str, segment_size=4):
8
- ret = torch.zeros_like(x[:, :segment_size])
9
- for i in range(x.size(0)):
10
- idx_str = ids_str[i]
11
- idx_end = idx_str + segment_size
12
- ret[i] = x[i, idx_str:idx_end]
13
- return ret
14
-
15
- def rand_slice_segments_with_pitch(x, pitch, x_lengths=None, segment_size=4):
16
- b, d, t = x.size()
17
- if x_lengths is None:
18
- x_lengths = t
19
- ids_str_max = x_lengths - segment_size + 1
20
- ids_str = (torch.rand([b]).to(device=x.device) * ids_str_max).to(dtype=torch.long)
21
- ret = slice_segments(x, ids_str, segment_size)
22
- ret_pitch = slice_pitch_segments(pitch, ids_str, segment_size)
23
- return ret, ret_pitch, ids_str
24
-
25
- def init_weights(m, mean=0.0, std=0.01):
26
- classname = m.__class__.__name__
27
- if classname.find("Conv") != -1:
28
- m.weight.data.normal_(mean, std)
29
-
30
-
31
- def get_padding(kernel_size, dilation=1):
32
- return int((kernel_size*dilation - dilation)/2)
33
-
34
-
35
- def convert_pad_shape(pad_shape):
36
- l = pad_shape[::-1]
37
- pad_shape = [item for sublist in l for item in sublist]
38
- return pad_shape
39
-
40
-
41
- def intersperse(lst, item):
42
- result = [item] * (len(lst) * 2 + 1)
43
- result[1::2] = lst
44
- return result
45
-
46
-
47
- def kl_divergence(m_p, logs_p, m_q, logs_q):
48
- """KL(P||Q)"""
49
- kl = (logs_q - logs_p) - 0.5
50
- kl += 0.5 * (torch.exp(2. * logs_p) + ((m_p - m_q)**2)) * torch.exp(-2. * logs_q)
51
- return kl
52
-
53
-
54
- def rand_gumbel(shape):
55
- """Sample from the Gumbel distribution, protect from overflows."""
56
- uniform_samples = torch.rand(shape) * 0.99998 + 0.00001
57
- return -torch.log(-torch.log(uniform_samples))
58
-
59
-
60
- def rand_gumbel_like(x):
61
- g = rand_gumbel(x.size()).to(dtype=x.dtype, device=x.device)
62
- return g
63
-
64
-
65
- def slice_segments(x, ids_str, segment_size=4):
66
- ret = torch.zeros_like(x[:, :, :segment_size])
67
- for i in range(x.size(0)):
68
- idx_str = ids_str[i]
69
- idx_end = idx_str + segment_size
70
- ret[i] = x[i, :, idx_str:idx_end]
71
- return ret
72
-
73
-
74
- def rand_slice_segments(x, x_lengths=None, segment_size=4):
75
- b, d, t = x.size()
76
- if x_lengths is None:
77
- x_lengths = t
78
- ids_str_max = x_lengths - segment_size + 1
79
- ids_str = (torch.rand([b]).to(device=x.device) * ids_str_max).to(dtype=torch.long)
80
- ret = slice_segments(x, ids_str, segment_size)
81
- return ret, ids_str
82
-
83
-
84
- def rand_spec_segments(x, x_lengths=None, segment_size=4):
85
- b, d, t = x.size()
86
- if x_lengths is None:
87
- x_lengths = t
88
- ids_str_max = x_lengths - segment_size
89
- ids_str = (torch.rand([b]).to(device=x.device) * ids_str_max).to(dtype=torch.long)
90
- ret = slice_segments(x, ids_str, segment_size)
91
- return ret, ids_str
92
-
93
-
94
- def get_timing_signal_1d(
95
- length, channels, min_timescale=1.0, max_timescale=1.0e4):
96
- position = torch.arange(length, dtype=torch.float)
97
- num_timescales = channels // 2
98
- log_timescale_increment = (
99
- math.log(float(max_timescale) / float(min_timescale)) /
100
- (num_timescales - 1))
101
- inv_timescales = min_timescale * torch.exp(
102
- torch.arange(num_timescales, dtype=torch.float) * -log_timescale_increment)
103
- scaled_time = position.unsqueeze(0) * inv_timescales.unsqueeze(1)
104
- signal = torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)], 0)
105
- signal = F.pad(signal, [0, 0, 0, channels % 2])
106
- signal = signal.view(1, channels, length)
107
- return signal
108
-
109
-
110
- def add_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4):
111
- b, channels, length = x.size()
112
- signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale)
113
- return x + signal.to(dtype=x.dtype, device=x.device)
114
-
115
-
116
- def cat_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4, axis=1):
117
- b, channels, length = x.size()
118
- signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale)
119
- return torch.cat([x, signal.to(dtype=x.dtype, device=x.device)], axis)
120
-
121
-
122
- def subsequent_mask(length):
123
- mask = torch.tril(torch.ones(length, length)).unsqueeze(0).unsqueeze(0)
124
- return mask
125
-
126
-
127
- @torch.jit.script
128
- def fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels):
129
- n_channels_int = n_channels[0]
130
- in_act = input_a + input_b
131
- t_act = torch.tanh(in_act[:, :n_channels_int, :])
132
- s_act = torch.sigmoid(in_act[:, n_channels_int:, :])
133
- acts = t_act * s_act
134
- return acts
135
-
136
-
137
- def convert_pad_shape(pad_shape):
138
- l = pad_shape[::-1]
139
- pad_shape = [item for sublist in l for item in sublist]
140
- return pad_shape
141
-
142
-
143
- def shift_1d(x):
144
- x = F.pad(x, convert_pad_shape([[0, 0], [0, 0], [1, 0]]))[:, :, :-1]
145
- return x
146
-
147
-
148
- def sequence_mask(length, max_length=None):
149
- if max_length is None:
150
- max_length = length.max()
151
- x = torch.arange(max_length, dtype=length.dtype, device=length.device)
152
- return x.unsqueeze(0) < length.unsqueeze(1)
153
-
154
-
155
- def generate_path(duration, mask):
156
- """
157
- duration: [b, 1, t_x]
158
- mask: [b, 1, t_y, t_x]
159
- """
160
- device = duration.device
161
-
162
- b, _, t_y, t_x = mask.shape
163
- cum_duration = torch.cumsum(duration, -1)
164
-
165
- cum_duration_flat = cum_duration.view(b * t_x)
166
- path = sequence_mask(cum_duration_flat, t_y).to(mask.dtype)
167
- path = path.view(b, t_x, t_y)
168
- path = path - F.pad(path, convert_pad_shape([[0, 0], [1, 0], [0, 0]]))[:, :-1]
169
- path = path.unsqueeze(1).transpose(2,3) * mask
170
- return path
171
-
172
-
173
- def clip_grad_value_(parameters, clip_value, norm_type=2):
174
- if isinstance(parameters, torch.Tensor):
175
- parameters = [parameters]
176
- parameters = list(filter(lambda p: p.grad is not None, parameters))
177
- norm_type = float(norm_type)
178
- if clip_value is not None:
179
- clip_value = float(clip_value)
180
-
181
- total_norm = 0
182
- for p in parameters:
183
- param_norm = p.grad.data.norm(norm_type)
184
- total_norm += param_norm.item() ** norm_type
185
- if clip_value is not None:
186
- p.grad.data.clamp_(min=-clip_value, max=clip_value)
187
- total_norm = total_norm ** (1. / norm_type)
188
- return total_norm
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
modules/ddsp.py DELETED
@@ -1,189 +0,0 @@
1
- import torch
2
- import torch.nn as nn
3
- from torch.nn import functional as F
4
- import torch.fft as fft
5
- import numpy as np
6
- import librosa as li
7
- import math
8
- from scipy.signal import get_window
9
-
10
- def safe_log(x):
11
- return torch.log(x + 1e-7)
12
-
13
-
14
- @torch.no_grad()
15
- def mean_std_loudness(dataset):
16
- mean = 0
17
- std = 0
18
- n = 0
19
- for _, _, l in dataset:
20
- n += 1
21
- mean += (l.mean().item() - mean) / n
22
- std += (l.std().item() - std) / n
23
- return mean, std
24
-
25
-
26
- def multiscale_fft(signal, scales, overlap):
27
- stfts = []
28
- for s in scales:
29
- S = torch.stft(
30
- signal,
31
- s,
32
- int(s * (1 - overlap)),
33
- s,
34
- torch.hann_window(s).to(signal),
35
- True,
36
- normalized=True,
37
- return_complex=True,
38
- ).abs()
39
- stfts.append(S)
40
- return stfts
41
-
42
-
43
- def resample(x, factor: int):
44
- batch, frame, channel = x.shape
45
- x = x.permute(0, 2, 1).reshape(batch * channel, 1, frame)
46
-
47
- window = torch.hann_window(
48
- factor * 2,
49
- dtype=x.dtype,
50
- device=x.device,
51
- ).reshape(1, 1, -1)
52
- y = torch.zeros(x.shape[0], x.shape[1], factor * x.shape[2]).to(x)
53
- y[..., ::factor] = x
54
- y[..., -1:] = x[..., -1:]
55
- y = torch.nn.functional.pad(y, [factor, factor])
56
- y = torch.nn.functional.conv1d(y, window)[..., :-1]
57
-
58
- y = y.reshape(batch, channel, factor * frame).permute(0, 2, 1)
59
-
60
- return y
61
-
62
-
63
- def upsample(signal, factor):
64
- signal = signal.permute(0, 2, 1)
65
- signal = nn.functional.interpolate(signal, size=signal.shape[-1] * factor)
66
- return signal.permute(0, 2, 1)
67
-
68
-
69
- def remove_above_nyquist(amplitudes, pitch, sampling_rate):
70
- n_harm = amplitudes.shape[-1]
71
- pitches = pitch * torch.arange(1, n_harm + 1).to(pitch)
72
- aa = (pitches < sampling_rate / 2).float() + 1e-4
73
- return amplitudes * aa
74
-
75
-
76
- def scale_function(x):
77
- return 2 * torch.sigmoid(x)**(math.log(10)) + 1e-7
78
-
79
-
80
- def extract_loudness(signal, sampling_rate, block_size, n_fft=2048):
81
- S = li.stft(
82
- signal,
83
- n_fft=n_fft,
84
- hop_length=block_size,
85
- win_length=n_fft,
86
- center=True,
87
- )
88
- S = np.log(abs(S) + 1e-7)
89
- f = li.fft_frequencies(sampling_rate, n_fft)
90
- a_weight = li.A_weighting(f)
91
-
92
- S = S + a_weight.reshape(-1, 1)
93
-
94
- S = np.mean(S, 0)[..., :-1]
95
-
96
- return S
97
-
98
-
99
- def extract_pitch(signal, sampling_rate, block_size):
100
- length = signal.shape[-1] // block_size
101
- f0 = crepe.predict(
102
- signal,
103
- sampling_rate,
104
- step_size=int(1000 * block_size / sampling_rate),
105
- verbose=1,
106
- center=True,
107
- viterbi=True,
108
- )
109
- f0 = f0[1].reshape(-1)[:-1]
110
-
111
- if f0.shape[-1] != length:
112
- f0 = np.interp(
113
- np.linspace(0, 1, length, endpoint=False),
114
- np.linspace(0, 1, f0.shape[-1], endpoint=False),
115
- f0,
116
- )
117
-
118
- return f0
119
-
120
-
121
- def mlp(in_size, hidden_size, n_layers):
122
- channels = [in_size] + (n_layers) * [hidden_size]
123
- net = []
124
- for i in range(n_layers):
125
- net.append(nn.Linear(channels[i], channels[i + 1]))
126
- net.append(nn.LayerNorm(channels[i + 1]))
127
- net.append(nn.LeakyReLU())
128
- return nn.Sequential(*net)
129
-
130
-
131
- def gru(n_input, hidden_size):
132
- return nn.GRU(n_input * hidden_size, hidden_size, batch_first=True)
133
-
134
-
135
- def harmonic_synth(pitch, amplitudes, sampling_rate):
136
- n_harmonic = amplitudes.shape[-1]
137
- omega = torch.cumsum(2 * math.pi * pitch / sampling_rate, 1)
138
- omegas = omega * torch.arange(1, n_harmonic + 1).to(omega)
139
- signal = (torch.sin(omegas) * amplitudes).sum(-1, keepdim=True)
140
- return signal
141
-
142
-
143
- def amp_to_impulse_response(amp, target_size):
144
- amp = torch.stack([amp, torch.zeros_like(amp)], -1)
145
- amp = torch.view_as_complex(amp)
146
- amp = fft.irfft(amp)
147
-
148
- filter_size = amp.shape[-1]
149
-
150
- amp = torch.roll(amp, filter_size // 2, -1)
151
- win = torch.hann_window(filter_size, dtype=amp.dtype, device=amp.device)
152
-
153
- amp = amp * win
154
-
155
- amp = nn.functional.pad(amp, (0, int(target_size) - int(filter_size)))
156
- amp = torch.roll(amp, -filter_size // 2, -1)
157
-
158
- return amp
159
-
160
-
161
- def fft_convolve(signal, kernel):
162
- signal = nn.functional.pad(signal, (0, signal.shape[-1]))
163
- kernel = nn.functional.pad(kernel, (kernel.shape[-1], 0))
164
-
165
- output = fft.irfft(fft.rfft(signal) * fft.rfft(kernel))
166
- output = output[..., output.shape[-1] // 2:]
167
-
168
- return output
169
-
170
-
171
- def init_kernels(win_len, win_inc, fft_len, win_type=None, invers=False):
172
- if win_type == 'None' or win_type is None:
173
- window = np.ones(win_len)
174
- else:
175
- window = get_window(win_type, win_len, fftbins=True)#**0.5
176
-
177
- N = fft_len
178
- fourier_basis = np.fft.rfft(np.eye(N))[:win_len]
179
- real_kernel = np.real(fourier_basis)
180
- imag_kernel = np.imag(fourier_basis)
181
- kernel = np.concatenate([real_kernel, imag_kernel], 1).T
182
-
183
- if invers :
184
- kernel = np.linalg.pinv(kernel).T
185
-
186
- kernel = kernel*window
187
- kernel = kernel[:, None, :]
188
- return torch.from_numpy(kernel.astype(np.float32)), torch.from_numpy(window[None,:,None].astype(np.float32))
189
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
modules/losses.py DELETED
@@ -1,61 +0,0 @@
1
- import torch
2
- from torch.nn import functional as F
3
-
4
- import modules.commons as commons
5
-
6
-
7
- def feature_loss(fmap_r, fmap_g):
8
- loss = 0
9
- for dr, dg in zip(fmap_r, fmap_g):
10
- for rl, gl in zip(dr, dg):
11
- rl = rl.float().detach()
12
- gl = gl.float()
13
- loss += torch.mean(torch.abs(rl - gl))
14
-
15
- return loss * 2
16
-
17
-
18
- def discriminator_loss(disc_real_outputs, disc_generated_outputs):
19
- loss = 0
20
- r_losses = []
21
- g_losses = []
22
- for dr, dg in zip(disc_real_outputs, disc_generated_outputs):
23
- dr = dr.float()
24
- dg = dg.float()
25
- r_loss = torch.mean((1-dr)**2)
26
- g_loss = torch.mean(dg**2)
27
- loss += (r_loss + g_loss)
28
- r_losses.append(r_loss.item())
29
- g_losses.append(g_loss.item())
30
-
31
- return loss, r_losses, g_losses
32
-
33
-
34
- def generator_loss(disc_outputs):
35
- loss = 0
36
- gen_losses = []
37
- for dg in disc_outputs:
38
- dg = dg.float()
39
- l = torch.mean((1-dg)**2)
40
- gen_losses.append(l)
41
- loss += l
42
-
43
- return loss, gen_losses
44
-
45
-
46
- def kl_loss(z_p, logs_q, m_p, logs_p, z_mask):
47
- """
48
- z_p, logs_q: [b, h, t_t]
49
- m_p, logs_p: [b, h, t_t]
50
- """
51
- z_p = z_p.float()
52
- logs_q = logs_q.float()
53
- m_p = m_p.float()
54
- logs_p = logs_p.float()
55
- z_mask = z_mask.float()
56
- #print(logs_p)
57
- kl = logs_p - logs_q - 0.5
58
- kl += 0.5 * ((z_p - m_p)**2) * torch.exp(-2. * logs_p)
59
- kl = torch.sum(kl * z_mask)
60
- l = kl / torch.sum(z_mask)
61
- return l
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
modules/mel_processing.py DELETED
@@ -1,112 +0,0 @@
1
- import math
2
- import os
3
- import random
4
- import torch
5
- from torch import nn
6
- import torch.nn.functional as F
7
- import torch.utils.data
8
- import numpy as np
9
- import librosa
10
- import librosa.util as librosa_util
11
- from librosa.util import normalize, pad_center, tiny
12
- from scipy.signal import get_window
13
- from scipy.io.wavfile import read
14
- from librosa.filters import mel as librosa_mel_fn
15
-
16
- MAX_WAV_VALUE = 32768.0
17
-
18
-
19
- def dynamic_range_compression_torch(x, C=1, clip_val=1e-5):
20
- """
21
- PARAMS
22
- ------
23
- C: compression factor
24
- """
25
- return torch.log(torch.clamp(x, min=clip_val) * C)
26
-
27
-
28
- def dynamic_range_decompression_torch(x, C=1):
29
- """
30
- PARAMS
31
- ------
32
- C: compression factor used to compress
33
- """
34
- return torch.exp(x) / C
35
-
36
-
37
- def spectral_normalize_torch(magnitudes):
38
- output = dynamic_range_compression_torch(magnitudes)
39
- return output
40
-
41
-
42
- def spectral_de_normalize_torch(magnitudes):
43
- output = dynamic_range_decompression_torch(magnitudes)
44
- return output
45
-
46
-
47
- mel_basis = {}
48
- hann_window = {}
49
-
50
-
51
- def spectrogram_torch(y, n_fft, sampling_rate, hop_size, win_size, center=False):
52
- if torch.min(y) < -1.:
53
- print('min value is ', torch.min(y))
54
- if torch.max(y) > 1.:
55
- print('max value is ', torch.max(y))
56
-
57
- global hann_window
58
- dtype_device = str(y.dtype) + '_' + str(y.device)
59
- wnsize_dtype_device = str(win_size) + '_' + dtype_device
60
- if wnsize_dtype_device not in hann_window:
61
- hann_window[wnsize_dtype_device] = torch.hann_window(win_size).to(dtype=y.dtype, device=y.device)
62
-
63
- y = torch.nn.functional.pad(y.unsqueeze(1), (int((n_fft-hop_size)/2), int((n_fft-hop_size)/2)), mode='reflect')
64
- y = y.squeeze(1)
65
-
66
- spec = torch.stft(y, n_fft, hop_length=hop_size, win_length=win_size, window=hann_window[wnsize_dtype_device],
67
- center=center, pad_mode='reflect', normalized=False, onesided=True, return_complex=False)
68
-
69
- spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-6)
70
- return spec
71
-
72
-
73
- def spec_to_mel_torch(spec, n_fft, num_mels, sampling_rate, fmin, fmax):
74
- global mel_basis
75
- dtype_device = str(spec.dtype) + '_' + str(spec.device)
76
- fmax_dtype_device = str(fmax) + '_' + dtype_device
77
- if fmax_dtype_device not in mel_basis:
78
- mel = librosa_mel_fn(sr=sampling_rate, n_fft=n_fft, n_mels=num_mels, fmin=fmin, fmax=fmax)
79
- mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(dtype=spec.dtype, device=spec.device)
80
- spec = torch.matmul(mel_basis[fmax_dtype_device], spec)
81
- spec = spectral_normalize_torch(spec)
82
- return spec
83
-
84
-
85
- def mel_spectrogram_torch(y, n_fft, num_mels, sampling_rate, hop_size, win_size, fmin, fmax, center=False):
86
- if torch.min(y) < -1.:
87
- print('min value is ', torch.min(y))
88
- if torch.max(y) > 1.:
89
- print('max value is ', torch.max(y))
90
-
91
- global mel_basis, hann_window
92
- dtype_device = str(y.dtype) + '_' + str(y.device)
93
- fmax_dtype_device = str(fmax) + '_' + dtype_device
94
- wnsize_dtype_device = str(win_size) + '_' + dtype_device
95
- if fmax_dtype_device not in mel_basis:
96
- mel = librosa_mel_fn(sr=sampling_rate, n_fft=n_fft, n_mels=num_mels, fmin=fmin, fmax=fmax)
97
- mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(dtype=y.dtype, device=y.device)
98
- if wnsize_dtype_device not in hann_window:
99
- hann_window[wnsize_dtype_device] = torch.hann_window(win_size).to(dtype=y.dtype, device=y.device)
100
-
101
- y = torch.nn.functional.pad(y.unsqueeze(1), (int((n_fft-hop_size)/2), int((n_fft-hop_size)/2)), mode='reflect')
102
- y = y.squeeze(1)
103
-
104
- spec = torch.stft(y, n_fft, hop_length=hop_size, win_length=win_size, window=hann_window[wnsize_dtype_device],
105
- center=center, pad_mode='reflect', normalized=False, onesided=True, return_complex=False)
106
-
107
- spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-6)
108
-
109
- spec = torch.matmul(mel_basis[fmax_dtype_device], spec)
110
- spec = spectral_normalize_torch(spec)
111
-
112
- return spec
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
modules/modules.py DELETED
@@ -1,453 +0,0 @@
1
- import copy
2
- import math
3
- import numpy as np
4
- import scipy
5
- import torch
6
- from torch import nn
7
- from torch.nn import functional as F
8
- from torch.autograd import Function
9
- from typing import Any, Optional, Tuple
10
-
11
- from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d
12
- from torch.nn.utils import weight_norm, remove_weight_norm
13
-
14
- import modules.commons as commons
15
- from modules.commons import init_weights, get_padding
16
- from modules.transforms import piecewise_rational_quadratic_transform
17
-
18
- LRELU_SLOPE = 0.1
19
-
20
-
21
- class LayerNorm(nn.Module):
22
- def __init__(self, channels, eps=1e-5):
23
- super().__init__()
24
- self.channels = channels
25
- self.eps = eps
26
-
27
- self.gamma = nn.Parameter(torch.ones(channels))
28
- self.beta = nn.Parameter(torch.zeros(channels))
29
-
30
- def forward(self, x):
31
- x = x.transpose(1, -1)
32
- x = F.layer_norm(x, (self.channels,), self.gamma, self.beta, self.eps)
33
- return x.transpose(1, -1)
34
-
35
-
36
- class ConvReluNorm(nn.Module):
37
- def __init__(self, in_channels, hidden_channels, out_channels, kernel_size, n_layers, p_dropout):
38
- super().__init__()
39
- self.in_channels = in_channels
40
- self.hidden_channels = hidden_channels
41
- self.out_channels = out_channels
42
- self.kernel_size = kernel_size
43
- self.n_layers = n_layers
44
- self.p_dropout = p_dropout
45
- assert n_layers > 1, "Number of layers should be larger than 0."
46
-
47
- self.conv_layers = nn.ModuleList()
48
- self.norm_layers = nn.ModuleList()
49
- self.conv_layers.append(nn.Conv1d(in_channels, hidden_channels, kernel_size, padding=kernel_size // 2))
50
- self.norm_layers.append(LayerNorm(hidden_channels))
51
- self.relu_drop = nn.Sequential(
52
- nn.ReLU(),
53
- nn.Dropout(p_dropout))
54
- for _ in range(n_layers - 1):
55
- self.conv_layers.append(nn.Conv1d(hidden_channels, hidden_channels, kernel_size, padding=kernel_size // 2))
56
- self.norm_layers.append(LayerNorm(hidden_channels))
57
- self.proj = nn.Conv1d(hidden_channels, out_channels, 1)
58
- self.proj.weight.data.zero_()
59
- self.proj.bias.data.zero_()
60
-
61
- def forward(self, x, x_mask):
62
- x_org = x
63
- for i in range(self.n_layers):
64
- x = self.conv_layers[i](x * x_mask)
65
- x = self.norm_layers[i](x)
66
- x = self.relu_drop(x)
67
- x = x_org + self.proj(x)
68
- return x * x_mask
69
-
70
-
71
- class DDSConv(nn.Module):
72
- """
73
- Dialted and Depth-Separable Convolution
74
- """
75
-
76
- def __init__(self, channels, kernel_size, n_layers, p_dropout=0.):
77
- super().__init__()
78
- self.channels = channels
79
- self.kernel_size = kernel_size
80
- self.n_layers = n_layers
81
- self.p_dropout = p_dropout
82
-
83
- self.drop = nn.Dropout(p_dropout)
84
- self.convs_sep = nn.ModuleList()
85
- self.convs_1x1 = nn.ModuleList()
86
- self.norms_1 = nn.ModuleList()
87
- self.norms_2 = nn.ModuleList()
88
- for i in range(n_layers):
89
- dilation = kernel_size ** i
90
- padding = (kernel_size * dilation - dilation) // 2
91
- self.convs_sep.append(nn.Conv1d(channels, channels, kernel_size,
92
- groups=channels, dilation=dilation, padding=padding
93
- ))
94
- self.convs_1x1.append(nn.Conv1d(channels, channels, 1))
95
- self.norms_1.append(LayerNorm(channels))
96
- self.norms_2.append(LayerNorm(channels))
97
-
98
- def forward(self, x, x_mask, g=None):
99
- if g is not None:
100
- x = x + g
101
- for i in range(self.n_layers):
102
- y = self.convs_sep[i](x * x_mask)
103
- y = self.norms_1[i](y)
104
- y = F.gelu(y)
105
- y = self.convs_1x1[i](y)
106
- y = self.norms_2[i](y)
107
- y = F.gelu(y)
108
- y = self.drop(y)
109
- x = x + y
110
- return x * x_mask
111
-
112
-
113
- class WN(torch.nn.Module):
114
- def __init__(self, hidden_channels, kernel_size, dilation_rate, n_layers, n_speakers=0, spk_channels=0,
115
- p_dropout=0):
116
- super(WN, self).__init__()
117
- assert (kernel_size % 2 == 1)
118
- self.hidden_channels = hidden_channels
119
- self.kernel_size = kernel_size,
120
- self.dilation_rate = dilation_rate
121
- self.n_layers = n_layers
122
- self.n_speakers = n_speakers
123
- self.spk_channels = spk_channels
124
- self.p_dropout = p_dropout
125
-
126
- self.in_layers = torch.nn.ModuleList()
127
- self.res_skip_layers = torch.nn.ModuleList()
128
- self.drop = nn.Dropout(p_dropout)
129
-
130
- if n_speakers > 0:
131
- cond_layer = torch.nn.Conv1d(spk_channels, 2 * hidden_channels * n_layers, 1)
132
- self.cond_layer = torch.nn.utils.weight_norm(cond_layer, name='weight')
133
-
134
- for i in range(n_layers):
135
- dilation = dilation_rate ** i
136
- padding = int((kernel_size * dilation - dilation) / 2)
137
- in_layer = torch.nn.Conv1d(hidden_channels, 2 * hidden_channels, kernel_size,
138
- dilation=dilation, padding=padding)
139
- in_layer = torch.nn.utils.weight_norm(in_layer, name='weight')
140
- self.in_layers.append(in_layer)
141
-
142
- # last one is not necessary
143
- if i < n_layers - 1:
144
- res_skip_channels = 2 * hidden_channels
145
- else:
146
- res_skip_channels = hidden_channels
147
-
148
- res_skip_layer = torch.nn.Conv1d(hidden_channels, res_skip_channels, 1)
149
- res_skip_layer = torch.nn.utils.weight_norm(res_skip_layer, name='weight')
150
- self.res_skip_layers.append(res_skip_layer)
151
-
152
- def forward(self, x, x_mask, g=None, **kwargs):
153
- output = torch.zeros_like(x)
154
- n_channels_tensor = torch.IntTensor([self.hidden_channels])
155
-
156
- if g is not None:
157
- g = self.cond_layer(g)
158
-
159
- for i in range(self.n_layers):
160
- x_in = self.in_layers[i](x)
161
- if g is not None:
162
- cond_offset = i * 2 * self.hidden_channels
163
- g_l = g[:, cond_offset:cond_offset + 2 * self.hidden_channels, :]
164
- else:
165
- g_l = torch.zeros_like(x_in)
166
-
167
- acts = commons.fused_add_tanh_sigmoid_multiply(
168
- x_in,
169
- g_l,
170
- n_channels_tensor)
171
- acts = self.drop(acts)
172
-
173
- res_skip_acts = self.res_skip_layers[i](acts)
174
- if i < self.n_layers - 1:
175
- res_acts = res_skip_acts[:, :self.hidden_channels, :]
176
- x = (x + res_acts) * x_mask
177
- output = output + res_skip_acts[:, self.hidden_channels:, :]
178
- else:
179
- output = output + res_skip_acts
180
- return output * x_mask
181
-
182
- def remove_weight_norm(self):
183
- if self.n_speakers > 0:
184
- torch.nn.utils.remove_weight_norm(self.cond_layer)
185
- for l in self.in_layers:
186
- torch.nn.utils.remove_weight_norm(l)
187
- for l in self.res_skip_layers:
188
- torch.nn.utils.remove_weight_norm(l)
189
-
190
-
191
- class ResBlock1(torch.nn.Module):
192
- def __init__(self, channels, kernel_size=3, dilation=(1, 3, 5)):
193
- super(ResBlock1, self).__init__()
194
- self.convs1 = nn.ModuleList([
195
- weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0],
196
- padding=get_padding(kernel_size, dilation[0]))),
197
- weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1],
198
- padding=get_padding(kernel_size, dilation[1]))),
199
- weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[2],
200
- padding=get_padding(kernel_size, dilation[2])))
201
- ])
202
- self.convs1.apply(init_weights)
203
-
204
- self.convs2 = nn.ModuleList([
205
- weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1,
206
- padding=get_padding(kernel_size, 1))),
207
- weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1,
208
- padding=get_padding(kernel_size, 1))),
209
- weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1,
210
- padding=get_padding(kernel_size, 1)))
211
- ])
212
- self.convs2.apply(init_weights)
213
-
214
- def forward(self, x, x_mask=None):
215
- for c1, c2 in zip(self.convs1, self.convs2):
216
- xt = F.leaky_relu(x, LRELU_SLOPE)
217
- if x_mask is not None:
218
- xt = xt * x_mask
219
- xt = c1(xt)
220
- xt = F.leaky_relu(xt, LRELU_SLOPE)
221
- if x_mask is not None:
222
- xt = xt * x_mask
223
- xt = c2(xt)
224
- x = xt + x
225
- if x_mask is not None:
226
- x = x * x_mask
227
- return x
228
-
229
- def remove_weight_norm(self):
230
- for l in self.convs1:
231
- remove_weight_norm(l)
232
- for l in self.convs2:
233
- remove_weight_norm(l)
234
-
235
-
236
- class ResBlock2(torch.nn.Module):
237
- def __init__(self, channels, kernel_size=3, dilation=(1, 3)):
238
- super(ResBlock2, self).__init__()
239
- self.convs = nn.ModuleList([
240
- weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0],
241
- padding=get_padding(kernel_size, dilation[0]))),
242
- weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1],
243
- padding=get_padding(kernel_size, dilation[1])))
244
- ])
245
- self.convs.apply(init_weights)
246
-
247
- def forward(self, x, x_mask=None):
248
- for c in self.convs:
249
- xt = F.leaky_relu(x, LRELU_SLOPE)
250
- if x_mask is not None:
251
- xt = xt * x_mask
252
- xt = c(xt)
253
- x = xt + x
254
- if x_mask is not None:
255
- x = x * x_mask
256
- return x
257
-
258
- def remove_weight_norm(self):
259
- for l in self.convs:
260
- remove_weight_norm(l)
261
-
262
-
263
- class Log(nn.Module):
264
- def forward(self, x, x_mask, reverse=False, **kwargs):
265
- if not reverse:
266
- y = torch.log(torch.clamp_min(x, 1e-5)) * x_mask
267
- logdet = torch.sum(-y, [1, 2])
268
- return y, logdet
269
- else:
270
- x = torch.exp(x) * x_mask
271
- return x
272
-
273
-
274
- class Flip(nn.Module):
275
- def forward(self, x, *args, reverse=False, **kwargs):
276
- x = torch.flip(x, [1])
277
- if not reverse:
278
- logdet = torch.zeros(x.size(0)).to(dtype=x.dtype, device=x.device)
279
- return x, logdet
280
- else:
281
- return x
282
-
283
-
284
- class ElementwiseAffine(nn.Module):
285
- def __init__(self, channels):
286
- super().__init__()
287
- self.channels = channels
288
- self.m = nn.Parameter(torch.zeros(channels, 1))
289
- self.logs = nn.Parameter(torch.zeros(channels, 1))
290
-
291
- def forward(self, x, x_mask, reverse=False, **kwargs):
292
- if not reverse:
293
- y = self.m + torch.exp(self.logs) * x
294
- y = y * x_mask
295
- logdet = torch.sum(self.logs * x_mask, [1, 2])
296
- return y, logdet
297
- else:
298
- x = (x - self.m) * torch.exp(-self.logs) * x_mask
299
- return x
300
-
301
-
302
- class ResidualCouplingLayer(nn.Module):
303
- def __init__(self,
304
- channels,
305
- hidden_channels,
306
- kernel_size,
307
- dilation_rate,
308
- n_layers,
309
- p_dropout=0,
310
- n_speakers=0,
311
- spk_channels=0,
312
- mean_only=False):
313
- assert channels % 2 == 0, "channels should be divisible by 2"
314
- super().__init__()
315
- self.channels = channels
316
- self.hidden_channels = hidden_channels
317
- self.kernel_size = kernel_size
318
- self.dilation_rate = dilation_rate
319
- self.n_layers = n_layers
320
- self.half_channels = channels // 2
321
- self.mean_only = mean_only
322
-
323
- self.pre = nn.Conv1d(self.half_channels, hidden_channels, 1)
324
- self.enc = WN(hidden_channels, kernel_size, dilation_rate, n_layers, p_dropout=p_dropout, n_speakers=n_speakers,
325
- spk_channels=spk_channels)
326
- self.post = nn.Conv1d(hidden_channels, self.half_channels * (2 - mean_only), 1)
327
- self.post.weight.data.zero_()
328
- self.post.bias.data.zero_()
329
-
330
- def forward(self, x, x_mask, g=None, reverse=False):
331
- x0, x1 = torch.split(x, [self.half_channels] * 2, 1)
332
- h = self.pre(x0) * x_mask
333
- h = self.enc(h, x_mask, g=g)
334
- stats = self.post(h) * x_mask
335
- if not self.mean_only:
336
- m, logs = torch.split(stats, [self.half_channels] * 2, 1)
337
- else:
338
- m = stats
339
- logs = torch.zeros_like(m)
340
-
341
- if not reverse:
342
- x1 = m + x1 * torch.exp(logs) * x_mask
343
- x = torch.cat([x0, x1], 1)
344
- logdet = torch.sum(logs, [1, 2])
345
- return x, logdet
346
- else:
347
- x1 = (x1 - m) * torch.exp(-logs) * x_mask
348
- x = torch.cat([x0, x1], 1)
349
- return x
350
-
351
-
352
- class ResidualCouplingBlock(nn.Module):
353
- def __init__(self,
354
- channels,
355
- hidden_channels,
356
- kernel_size,
357
- dilation_rate,
358
- n_layers,
359
- n_flows=4,
360
- n_speakers=0,
361
- gin_channels=0):
362
- super().__init__()
363
- self.channels = channels
364
- self.hidden_channels = hidden_channels
365
- self.kernel_size = kernel_size
366
- self.dilation_rate = dilation_rate
367
- self.n_layers = n_layers
368
- self.n_flows = n_flows
369
- self.gin_channels = gin_channels
370
-
371
- self.flows = nn.ModuleList()
372
- for i in range(n_flows):
373
- self.flows.append(ResidualCouplingLayer(channels, hidden_channels, kernel_size, dilation_rate, n_layers,
374
- n_speakers=n_speakers, spk_channels=gin_channels, mean_only=True))
375
- self.flows.append(Flip())
376
-
377
- def forward(self, x, x_mask, g=None, reverse=False):
378
- if not reverse:
379
- for flow in self.flows:
380
- x, _ = flow(x, x_mask, g=g, reverse=reverse)
381
- else:
382
- for flow in reversed(self.flows):
383
- x = flow(x, x_mask, g=g, reverse=reverse)
384
- return x
385
-
386
-
387
- class ConvFlow(nn.Module):
388
- def __init__(self, in_channels, filter_channels, kernel_size, n_layers, num_bins=10, tail_bound=5.0):
389
- super().__init__()
390
- self.in_channels = in_channels
391
- self.filter_channels = filter_channels
392
- self.kernel_size = kernel_size
393
- self.n_layers = n_layers
394
- self.num_bins = num_bins
395
- self.tail_bound = tail_bound
396
- self.half_channels = in_channels // 2
397
-
398
- self.pre = nn.Conv1d(self.half_channels, filter_channels, 1)
399
- self.convs = DDSConv(filter_channels, kernel_size, n_layers, p_dropout=0.)
400
- self.proj = nn.Conv1d(filter_channels, self.half_channels * (num_bins * 3 - 1), 1)
401
- self.proj.weight.data.zero_()
402
- self.proj.bias.data.zero_()
403
-
404
- def forward(self, x, x_mask, g=None, reverse=False):
405
- x0, x1 = torch.split(x, [self.half_channels] * 2, 1)
406
- h = self.pre(x0)
407
- h = self.convs(h, x_mask, g=g)
408
- h = self.proj(h) * x_mask
409
-
410
- b, c, t = x0.shape
411
- h = h.reshape(b, c, -1, t).permute(0, 1, 3, 2) # [b, cx?, t] -> [b, c, t, ?]
412
-
413
- unnormalized_widths = h[..., :self.num_bins] / math.sqrt(self.filter_channels)
414
- unnormalized_heights = h[..., self.num_bins:2 * self.num_bins] / math.sqrt(self.filter_channels)
415
- unnormalized_derivatives = h[..., 2 * self.num_bins:]
416
-
417
- x1, logabsdet = piecewise_rational_quadratic_transform(x1,
418
- unnormalized_widths,
419
- unnormalized_heights,
420
- unnormalized_derivatives,
421
- inverse=reverse,
422
- tails='linear',
423
- tail_bound=self.tail_bound
424
- )
425
-
426
- x = torch.cat([x0, x1], 1) * x_mask
427
- logdet = torch.sum(logabsdet * x_mask, [1, 2])
428
- if not reverse:
429
- return x, logdet
430
- else:
431
- return x
432
-
433
-
434
- class ResStack(nn.Module):
435
- def __init__(self, channel, kernel_size=3, base=3, nums=4):
436
- super(ResStack, self).__init__()
437
-
438
- self.layers = nn.ModuleList([
439
- nn.Sequential(
440
- nn.LeakyReLU(),
441
- nn.utils.weight_norm(nn.Conv1d(channel, channel,
442
- kernel_size=kernel_size, dilation=base ** i, padding=base ** i)),
443
- nn.LeakyReLU(),
444
- nn.utils.weight_norm(nn.Conv1d(channel, channel,
445
- kernel_size=kernel_size, dilation=1, padding=1)),
446
- )
447
- for i in range(nums)
448
- ])
449
-
450
- def forward(self, x):
451
- for layer in self.layers:
452
- x = x + layer(x)
453
- return x
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
modules/stft.py DELETED
@@ -1,512 +0,0 @@
1
- from librosa.util import pad_center, tiny
2
- from scipy.signal import get_window
3
- from torch import Tensor
4
- from torch.autograd import Variable
5
- from typing import Optional, Tuple
6
-
7
- import librosa
8
- import librosa.util as librosa_util
9
- import math
10
- import numpy as np
11
- import scipy
12
- import torch
13
- import torch.nn.functional as F
14
- import warnings
15
-
16
-
17
- def create_fb_matrix(
18
- n_freqs: int,
19
- f_min: float,
20
- f_max: float,
21
- n_mels: int,
22
- sample_rate: int,
23
- norm: Optional[str] = None
24
- ) -> Tensor:
25
- r"""Create a frequency bin conversion matrix.
26
-
27
- Args:
28
- n_freqs (int): Number of frequencies to highlight/apply
29
- f_min (float): Minimum frequency (Hz)
30
- f_max (float): Maximum frequency (Hz)
31
- n_mels (int): Number of mel filterbanks
32
- sample_rate (int): Sample rate of the audio waveform
33
- norm (Optional[str]): If 'slaney', divide the triangular mel weights by the width of the mel band
34
- (area normalization). (Default: ``None``)
35
-
36
- Returns:
37
- Tensor: Triangular filter banks (fb matrix) of size (``n_freqs``, ``n_mels``)
38
- meaning number of frequencies to highlight/apply to x the number of filterbanks.
39
- Each column is a filterbank so that assuming there is a matrix A of
40
- size (..., ``n_freqs``), the applied result would be
41
- ``A * create_fb_matrix(A.size(-1), ...)``.
42
- """
43
-
44
- if norm is not None and norm != "slaney":
45
- raise ValueError("norm must be one of None or 'slaney'")
46
-
47
- # freq bins
48
- # Equivalent filterbank construction by Librosa
49
- all_freqs = torch.linspace(0, sample_rate // 2, n_freqs)
50
-
51
- # calculate mel freq bins
52
- # hertz to mel(f) is 2595. * math.log10(1. + (f / 700.))
53
- m_min = 2595.0 * math.log10(1.0 + (f_min / 700.0))
54
- m_max = 2595.0 * math.log10(1.0 + (f_max / 700.0))
55
- m_pts = torch.linspace(m_min, m_max, n_mels + 2)
56
- # mel to hertz(mel) is 700. * (10**(mel / 2595.) - 1.)
57
- f_pts = 700.0 * (10 ** (m_pts / 2595.0) - 1.0)
58
- # calculate the difference between each mel point and each stft freq point in hertz
59
- f_diff = f_pts[1:] - f_pts[:-1] # (n_mels + 1)
60
- slopes = f_pts.unsqueeze(0) - all_freqs.unsqueeze(1) # (n_freqs, n_mels + 2)
61
- # create overlapping triangles
62
- down_slopes = (-1.0 * slopes[:, :-2]) / f_diff[:-1] # (n_freqs, n_mels)
63
- up_slopes = slopes[:, 2:] / f_diff[1:] # (n_freqs, n_mels)
64
- fb = torch.min(down_slopes, up_slopes)
65
- fb = torch.clamp(fb, 1e-6, 1)
66
-
67
- if norm is not None and norm == "slaney":
68
- # Slaney-style mel is scaled to be approx constant energy per channel
69
- enorm = 2.0 / (f_pts[2:n_mels + 2] - f_pts[:n_mels])
70
- fb *= enorm.unsqueeze(0)
71
- return fb
72
-
73
-
74
- def lfilter(
75
- waveform: Tensor,
76
- a_coeffs: Tensor,
77
- b_coeffs: Tensor,
78
- clamp: bool = True,
79
- ) -> Tensor:
80
- r"""Perform an IIR filter by evaluating difference equation.
81
-
82
- Args:
83
- waveform (Tensor): audio waveform of dimension of ``(..., time)``. Must be normalized to -1 to 1.
84
- a_coeffs (Tensor): denominator coefficients of difference equation of dimension of ``(n_order + 1)``.
85
- Lower delays coefficients are first, e.g. ``[a0, a1, a2, ...]``.
86
- Must be same size as b_coeffs (pad with 0's as necessary).
87
- b_coeffs (Tensor): numerator coefficients of difference equation of dimension of ``(n_order + 1)``.
88
- Lower delays coefficients are first, e.g. ``[b0, b1, b2, ...]``.
89
- Must be same size as a_coeffs (pad with 0's as necessary).
90
- clamp (bool, optional): If ``True``, clamp the output signal to be in the range [-1, 1] (Default: ``True``)
91
-
92
- Returns:
93
- Tensor: Waveform with dimension of ``(..., time)``.
94
- """
95
- # pack batch
96
- shape = waveform.size()
97
- waveform = waveform.reshape(-1, shape[-1])
98
-
99
- assert (a_coeffs.size(0) == b_coeffs.size(0))
100
- assert (len(waveform.size()) == 2)
101
- assert (waveform.device == a_coeffs.device)
102
- assert (b_coeffs.device == a_coeffs.device)
103
-
104
- device = waveform.device
105
- dtype = waveform.dtype
106
- n_channel, n_sample = waveform.size()
107
- n_order = a_coeffs.size(0)
108
- n_sample_padded = n_sample + n_order - 1
109
- assert (n_order > 0)
110
-
111
- # Pad the input and create output
112
- padded_waveform = torch.zeros(n_channel, n_sample_padded, dtype=dtype, device=device)
113
- padded_waveform[:, (n_order - 1):] = waveform
114
- padded_output_waveform = torch.zeros(n_channel, n_sample_padded, dtype=dtype, device=device)
115
-
116
- # Set up the coefficients matrix
117
- # Flip coefficients' order
118
- a_coeffs_flipped = a_coeffs.flip(0)
119
- b_coeffs_flipped = b_coeffs.flip(0)
120
-
121
- # calculate windowed_input_signal in parallel
122
- # create indices of original with shape (n_channel, n_order, n_sample)
123
- window_idxs = torch.arange(n_sample, device=device).unsqueeze(0) + torch.arange(n_order, device=device).unsqueeze(1)
124
- window_idxs = window_idxs.repeat(n_channel, 1, 1)
125
- window_idxs += (torch.arange(n_channel, device=device).unsqueeze(-1).unsqueeze(-1) * n_sample_padded)
126
- window_idxs = window_idxs.long()
127
- # (n_order, ) matmul (n_channel, n_order, n_sample) -> (n_channel, n_sample)
128
- input_signal_windows = torch.matmul(b_coeffs_flipped, torch.take(padded_waveform, window_idxs))
129
-
130
- input_signal_windows.div_(a_coeffs[0])
131
- a_coeffs_flipped.div_(a_coeffs[0])
132
- for i_sample, o0 in enumerate(input_signal_windows.t()):
133
- windowed_output_signal = padded_output_waveform[:, i_sample:(i_sample + n_order)]
134
- o0.addmv_(windowed_output_signal, a_coeffs_flipped, alpha=-1)
135
- padded_output_waveform[:, i_sample + n_order - 1] = o0
136
-
137
- output = padded_output_waveform[:, (n_order - 1):]
138
-
139
- if clamp:
140
- output = torch.clamp(output, min=-1., max=1.)
141
-
142
- # unpack batch
143
- output = output.reshape(shape[:-1] + output.shape[-1:])
144
-
145
- return output
146
-
147
-
148
-
149
- def biquad(
150
- waveform: Tensor,
151
- b0: float,
152
- b1: float,
153
- b2: float,
154
- a0: float,
155
- a1: float,
156
- a2: float
157
- ) -> Tensor:
158
- r"""Perform a biquad filter of input tensor. Initial conditions set to 0.
159
- https://en.wikipedia.org/wiki/Digital_biquad_filter
160
-
161
- Args:
162
- waveform (Tensor): audio waveform of dimension of `(..., time)`
163
- b0 (float): numerator coefficient of current input, x[n]
164
- b1 (float): numerator coefficient of input one time step ago x[n-1]
165
- b2 (float): numerator coefficient of input two time steps ago x[n-2]
166
- a0 (float): denominator coefficient of current output y[n], typically 1
167
- a1 (float): denominator coefficient of current output y[n-1]
168
- a2 (float): denominator coefficient of current output y[n-2]
169
-
170
- Returns:
171
- Tensor: Waveform with dimension of `(..., time)`
172
- """
173
-
174
- device = waveform.device
175
- dtype = waveform.dtype
176
-
177
- output_waveform = lfilter(
178
- waveform,
179
- torch.tensor([a0, a1, a2], dtype=dtype, device=device),
180
- torch.tensor([b0, b1, b2], dtype=dtype, device=device)
181
- )
182
- return output_waveform
183
-
184
-
185
-
186
- def _dB2Linear(x: float) -> float:
187
- return math.exp(x * math.log(10) / 20.0)
188
-
189
-
190
- def highpass_biquad(
191
- waveform: Tensor,
192
- sample_rate: int,
193
- cutoff_freq: float,
194
- Q: float = 0.707
195
- ) -> Tensor:
196
- r"""Design biquad highpass filter and perform filtering. Similar to SoX implementation.
197
-
198
- Args:
199
- waveform (Tensor): audio waveform of dimension of `(..., time)`
200
- sample_rate (int): sampling rate of the waveform, e.g. 44100 (Hz)
201
- cutoff_freq (float): filter cutoff frequency
202
- Q (float, optional): https://en.wikipedia.org/wiki/Q_factor (Default: ``0.707``)
203
-
204
- Returns:
205
- Tensor: Waveform dimension of `(..., time)`
206
- """
207
- w0 = 2 * math.pi * cutoff_freq / sample_rate
208
- alpha = math.sin(w0) / 2. / Q
209
-
210
- b0 = (1 + math.cos(w0)) / 2
211
- b1 = -1 - math.cos(w0)
212
- b2 = b0
213
- a0 = 1 + alpha
214
- a1 = -2 * math.cos(w0)
215
- a2 = 1 - alpha
216
- return biquad(waveform, b0, b1, b2, a0, a1, a2)
217
-
218
-
219
-
220
- def lowpass_biquad(
221
- waveform: Tensor,
222
- sample_rate: int,
223
- cutoff_freq: float,
224
- Q: float = 0.707
225
- ) -> Tensor:
226
- r"""Design biquad lowpass filter and perform filtering. Similar to SoX implementation.
227
-
228
- Args:
229
- waveform (torch.Tensor): audio waveform of dimension of `(..., time)`
230
- sample_rate (int): sampling rate of the waveform, e.g. 44100 (Hz)
231
- cutoff_freq (float): filter cutoff frequency
232
- Q (float, optional): https://en.wikipedia.org/wiki/Q_factor (Default: ``0.707``)
233
-
234
- Returns:
235
- Tensor: Waveform of dimension of `(..., time)`
236
- """
237
- w0 = 2 * math.pi * cutoff_freq / sample_rate
238
- alpha = math.sin(w0) / 2 / Q
239
-
240
- b0 = (1 - math.cos(w0)) / 2
241
- b1 = 1 - math.cos(w0)
242
- b2 = b0
243
- a0 = 1 + alpha
244
- a1 = -2 * math.cos(w0)
245
- a2 = 1 - alpha
246
- return biquad(waveform, b0, b1, b2, a0, a1, a2)
247
-
248
-
249
- def window_sumsquare(window, n_frames, hop_length=200, win_length=800,
250
- n_fft=800, dtype=np.float32, norm=None):
251
- """
252
- # from librosa 0.6
253
- Compute the sum-square envelope of a window function at a given hop length.
254
-
255
- This is used to estimate modulation effects induced by windowing
256
- observations in short-time fourier transforms.
257
-
258
- Parameters
259
- ----------
260
- window : string, tuple, number, callable, or list-like
261
- Window specification, as in `get_window`
262
-
263
- n_frames : int > 0
264
- The number of analysis frames
265
-
266
- hop_length : int > 0
267
- The number of samples to advance between frames
268
-
269
- win_length : [optional]
270
- The length of the window function. By default, this matches `n_fft`.
271
-
272
- n_fft : int > 0
273
- The length of each analysis frame.
274
-
275
- dtype : np.dtype
276
- The data type of the output
277
-
278
- Returns
279
- -------
280
- wss : np.ndarray, shape=`(n_fft + hop_length * (n_frames - 1))`
281
- The sum-squared envelope of the window function
282
- """
283
- if win_length is None:
284
- win_length = n_fft
285
-
286
- n = n_fft + hop_length * (n_frames - 1)
287
- x = np.zeros(n, dtype=dtype)
288
-
289
- # Compute the squared window at the desired length
290
- win_sq = get_window(window, win_length, fftbins=True)
291
- win_sq = librosa_util.normalize(win_sq, norm=norm)**2
292
- win_sq = librosa_util.pad_center(win_sq, n_fft)
293
-
294
- # Fill the envelope
295
- for i in range(n_frames):
296
- sample = i * hop_length
297
- x[sample:min(n, sample + n_fft)] += win_sq[:max(0, min(n_fft, n - sample))]
298
- return x
299
-
300
-
301
- class MelScale(torch.nn.Module):
302
- r"""Turn a normal STFT into a mel frequency STFT, using a conversion
303
- matrix. This uses triangular filter banks.
304
-
305
- User can control which device the filter bank (`fb`) is (e.g. fb.to(spec_f.device)).
306
-
307
- Args:
308
- n_mels (int, optional): Number of mel filterbanks. (Default: ``128``)
309
- sample_rate (int, optional): Sample rate of audio signal. (Default: ``16000``)
310
- f_min (float, optional): Minimum frequency. (Default: ``0.``)
311
- f_max (float or None, optional): Maximum frequency. (Default: ``sample_rate // 2``)
312
- n_stft (int, optional): Number of bins in STFT. Calculated from first input
313
- if None is given. See ``n_fft`` in :class:`Spectrogram`. (Default: ``None``)
314
- """
315
- __constants__ = ['n_mels', 'sample_rate', 'f_min', 'f_max']
316
-
317
- def __init__(self,
318
- n_mels: int = 128,
319
- sample_rate: int = 24000,
320
- f_min: float = 0.,
321
- f_max: Optional[float] = None,
322
- n_stft: Optional[int] = None) -> None:
323
- super(MelScale, self).__init__()
324
- self.n_mels = n_mels
325
- self.sample_rate = sample_rate
326
- self.f_max = f_max if f_max is not None else float(sample_rate // 2)
327
- self.f_min = f_min
328
-
329
- assert f_min <= self.f_max, 'Require f_min: %f < f_max: %f' % (f_min, self.f_max)
330
-
331
- fb = torch.empty(0) if n_stft is None else create_fb_matrix(
332
- n_stft, self.f_min, self.f_max, self.n_mels, self.sample_rate)
333
- self.register_buffer('fb', fb)
334
-
335
- def forward(self, specgram: Tensor) -> Tensor:
336
- r"""
337
- Args:
338
- specgram (Tensor): A spectrogram STFT of dimension (..., freq, time).
339
-
340
- Returns:
341
- Tensor: Mel frequency spectrogram of size (..., ``n_mels``, time).
342
- """
343
-
344
- # pack batch
345
- shape = specgram.size()
346
- specgram = specgram.reshape(-1, shape[-2], shape[-1])
347
-
348
- if self.fb.numel() == 0:
349
- tmp_fb = create_fb_matrix(specgram.size(1), self.f_min, self.f_max, self.n_mels, self.sample_rate)
350
- # Attributes cannot be reassigned outside __init__ so workaround
351
- self.fb.resize_(tmp_fb.size())
352
- self.fb.copy_(tmp_fb)
353
-
354
- # (channel, frequency, time).transpose(...) dot (frequency, n_mels)
355
- # -> (channel, time, n_mels).transpose(...)
356
- mel_specgram = torch.matmul(specgram.transpose(1, 2), self.fb).transpose(1, 2)
357
-
358
- # unpack batch
359
- mel_specgram = mel_specgram.reshape(shape[:-2] + mel_specgram.shape[-2:])
360
-
361
- return mel_specgram
362
-
363
-
364
- class TorchSTFT(torch.nn.Module):
365
- def __init__(self, fft_size, hop_size, win_size,
366
- normalized=False, domain='linear',
367
- mel_scale=False, ref_level_db=20, min_level_db=-100):
368
- super().__init__()
369
- self.fft_size = fft_size
370
- self.hop_size = hop_size
371
- self.win_size = win_size
372
- self.ref_level_db = ref_level_db
373
- self.min_level_db = min_level_db
374
- self.window = torch.hann_window(win_size)
375
- self.normalized = normalized
376
- self.domain = domain
377
- self.mel_scale = MelScale(n_mels=(fft_size // 2 + 1),
378
- n_stft=(fft_size // 2 + 1)) if mel_scale else None
379
-
380
- def transform(self, x):
381
- x_stft = torch.stft(x.to(torch.float32), self.fft_size, self.hop_size, self.win_size,
382
- self.window.type_as(x), normalized=self.normalized)
383
- real = x_stft[..., 0]
384
- imag = x_stft[..., 1]
385
- mag = torch.clamp(real ** 2 + imag ** 2, min=1e-7)
386
- mag = torch.sqrt(mag)
387
- phase = torch.atan2(imag, real)
388
-
389
- if self.mel_scale is not None:
390
- mag = self.mel_scale(mag)
391
-
392
- if self.domain == 'log':
393
- mag = 20 * torch.log10(mag) - self.ref_level_db
394
- mag = torch.clamp((mag - self.min_level_db) / -self.min_level_db, 0, 1)
395
- return mag, phase
396
- elif self.domain == 'linear':
397
- return mag, phase
398
- elif self.domain == 'double':
399
- log_mag = 20 * torch.log10(mag) - self.ref_level_db
400
- log_mag = torch.clamp((log_mag - self.min_level_db) / -self.min_level_db, 0, 1)
401
- return torch.cat((mag, log_mag), dim=1), phase
402
-
403
- def complex(self, x):
404
- x_stft = torch.stft(x, self.fft_size, self.hop_size, self.win_size,
405
- self.window.type_as(x), normalized=self.normalized)
406
- real = x_stft[..., 0]
407
- imag = x_stft[..., 1]
408
- return real, imag
409
-
410
-
411
-
412
- class STFT(torch.nn.Module):
413
- """adapted from Prem Seetharaman's https://github.com/pseeth/pytorch-stft"""
414
- def __init__(self, filter_length=800, hop_length=200, win_length=800,
415
- window='hann'):
416
- super(STFT, self).__init__()
417
- self.filter_length = filter_length
418
- self.hop_length = hop_length
419
- self.win_length = win_length
420
- self.window = window
421
- self.forward_transform = None
422
- scale = self.filter_length / self.hop_length
423
- fourier_basis = np.fft.fft(np.eye(self.filter_length))
424
-
425
- cutoff = int((self.filter_length / 2 + 1))
426
- fourier_basis = np.vstack([np.real(fourier_basis[:cutoff, :]),
427
- np.imag(fourier_basis[:cutoff, :])])
428
-
429
- forward_basis = torch.FloatTensor(fourier_basis[:, None, :])
430
- inverse_basis = torch.FloatTensor(
431
- np.linalg.pinv(scale * fourier_basis).T[:, None, :])
432
-
433
- if window is not None:
434
- assert(filter_length >= win_length)
435
- # get window and zero center pad it to filter_length
436
- fft_window = get_window(window, win_length, fftbins=True)
437
- fft_window = pad_center(fft_window, filter_length)
438
- fft_window = torch.from_numpy(fft_window).float()
439
-
440
- # window the bases
441
- forward_basis *= fft_window
442
- inverse_basis *= fft_window
443
-
444
- self.register_buffer('forward_basis', forward_basis.float())
445
- self.register_buffer('inverse_basis', inverse_basis.float())
446
-
447
- def transform(self, input_data):
448
- num_batches = input_data.size(0)
449
- num_samples = input_data.size(1)
450
-
451
- self.num_samples = num_samples
452
-
453
- # similar to librosa, reflect-pad the input
454
- input_data = input_data.view(num_batches, 1, num_samples)
455
- input_data = F.pad(
456
- input_data.unsqueeze(1),
457
- (int(self.filter_length / 2), int(self.filter_length / 2), 0, 0),
458
- mode='reflect')
459
- input_data = input_data.squeeze(1)
460
-
461
- forward_transform = F.conv1d(
462
- input_data,
463
- Variable(self.forward_basis, requires_grad=False),
464
- stride=self.hop_length,
465
- padding=0)
466
-
467
- cutoff = int((self.filter_length / 2) + 1)
468
- real_part = forward_transform[:, :cutoff, :]
469
- imag_part = forward_transform[:, cutoff:, :]
470
-
471
- magnitude = torch.sqrt(real_part**2 + imag_part**2)
472
- phase = torch.autograd.Variable(
473
- torch.atan2(imag_part.data, real_part.data))
474
-
475
- return magnitude, phase
476
-
477
- def inverse(self, magnitude, phase):
478
- recombine_magnitude_phase = torch.cat(
479
- [magnitude*torch.cos(phase), magnitude*torch.sin(phase)], dim=1)
480
-
481
- inverse_transform = F.conv_transpose1d(
482
- recombine_magnitude_phase,
483
- Variable(self.inverse_basis, requires_grad=False),
484
- stride=self.hop_length,
485
- padding=0)
486
-
487
- if self.window is not None:
488
- window_sum = window_sumsquare(
489
- self.window, magnitude.size(-1), hop_length=self.hop_length,
490
- win_length=self.win_length, n_fft=self.filter_length,
491
- dtype=np.float32)
492
- # remove modulation effects
493
- approx_nonzero_indices = torch.from_numpy(
494
- np.where(window_sum > tiny(window_sum))[0])
495
- window_sum = torch.autograd.Variable(
496
- torch.from_numpy(window_sum), requires_grad=False)
497
- window_sum = window_sum.cuda() if magnitude.is_cuda else window_sum
498
- inverse_transform[:, :, approx_nonzero_indices] /= window_sum[approx_nonzero_indices]
499
-
500
- # scale by hop ratio
501
- inverse_transform *= float(self.filter_length) / self.hop_length
502
-
503
- inverse_transform = inverse_transform[:, :, int(self.filter_length/2):]
504
- inverse_transform = inverse_transform[:, :, :-int(self.filter_length/2):]
505
-
506
- return inverse_transform
507
-
508
- def forward(self, input_data):
509
- self.magnitude, self.phase = self.transform(input_data)
510
- reconstruction = self.inverse(self.magnitude, self.phase)
511
- return reconstruction
512
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
modules/transforms.py DELETED
@@ -1,193 +0,0 @@
1
- import torch
2
- from torch.nn import functional as F
3
-
4
- import numpy as np
5
-
6
-
7
- DEFAULT_MIN_BIN_WIDTH = 1e-3
8
- DEFAULT_MIN_BIN_HEIGHT = 1e-3
9
- DEFAULT_MIN_DERIVATIVE = 1e-3
10
-
11
-
12
- def piecewise_rational_quadratic_transform(inputs,
13
- unnormalized_widths,
14
- unnormalized_heights,
15
- unnormalized_derivatives,
16
- inverse=False,
17
- tails=None,
18
- tail_bound=1.,
19
- min_bin_width=DEFAULT_MIN_BIN_WIDTH,
20
- min_bin_height=DEFAULT_MIN_BIN_HEIGHT,
21
- min_derivative=DEFAULT_MIN_DERIVATIVE):
22
-
23
- if tails is None:
24
- spline_fn = rational_quadratic_spline
25
- spline_kwargs = {}
26
- else:
27
- spline_fn = unconstrained_rational_quadratic_spline
28
- spline_kwargs = {
29
- 'tails': tails,
30
- 'tail_bound': tail_bound
31
- }
32
-
33
- outputs, logabsdet = spline_fn(
34
- inputs=inputs,
35
- unnormalized_widths=unnormalized_widths,
36
- unnormalized_heights=unnormalized_heights,
37
- unnormalized_derivatives=unnormalized_derivatives,
38
- inverse=inverse,
39
- min_bin_width=min_bin_width,
40
- min_bin_height=min_bin_height,
41
- min_derivative=min_derivative,
42
- **spline_kwargs
43
- )
44
- return outputs, logabsdet
45
-
46
-
47
- def searchsorted(bin_locations, inputs, eps=1e-6):
48
- bin_locations[..., -1] += eps
49
- return torch.sum(
50
- inputs[..., None] >= bin_locations,
51
- dim=-1
52
- ) - 1
53
-
54
-
55
- def unconstrained_rational_quadratic_spline(inputs,
56
- unnormalized_widths,
57
- unnormalized_heights,
58
- unnormalized_derivatives,
59
- inverse=False,
60
- tails='linear',
61
- tail_bound=1.,
62
- min_bin_width=DEFAULT_MIN_BIN_WIDTH,
63
- min_bin_height=DEFAULT_MIN_BIN_HEIGHT,
64
- min_derivative=DEFAULT_MIN_DERIVATIVE):
65
- inside_interval_mask = (inputs >= -tail_bound) & (inputs <= tail_bound)
66
- outside_interval_mask = ~inside_interval_mask
67
-
68
- outputs = torch.zeros_like(inputs)
69
- logabsdet = torch.zeros_like(inputs)
70
-
71
- if tails == 'linear':
72
- unnormalized_derivatives = F.pad(unnormalized_derivatives, pad=(1, 1))
73
- constant = np.log(np.exp(1 - min_derivative) - 1)
74
- unnormalized_derivatives[..., 0] = constant
75
- unnormalized_derivatives[..., -1] = constant
76
-
77
- outputs[outside_interval_mask] = inputs[outside_interval_mask]
78
- logabsdet[outside_interval_mask] = 0
79
- else:
80
- raise RuntimeError('{} tails are not implemented.'.format(tails))
81
-
82
- outputs[inside_interval_mask], logabsdet[inside_interval_mask] = rational_quadratic_spline(
83
- inputs=inputs[inside_interval_mask],
84
- unnormalized_widths=unnormalized_widths[inside_interval_mask, :],
85
- unnormalized_heights=unnormalized_heights[inside_interval_mask, :],
86
- unnormalized_derivatives=unnormalized_derivatives[inside_interval_mask, :],
87
- inverse=inverse,
88
- left=-tail_bound, right=tail_bound, bottom=-tail_bound, top=tail_bound,
89
- min_bin_width=min_bin_width,
90
- min_bin_height=min_bin_height,
91
- min_derivative=min_derivative
92
- )
93
-
94
- return outputs, logabsdet
95
-
96
- def rational_quadratic_spline(inputs,
97
- unnormalized_widths,
98
- unnormalized_heights,
99
- unnormalized_derivatives,
100
- inverse=False,
101
- left=0., right=1., bottom=0., top=1.,
102
- min_bin_width=DEFAULT_MIN_BIN_WIDTH,
103
- min_bin_height=DEFAULT_MIN_BIN_HEIGHT,
104
- min_derivative=DEFAULT_MIN_DERIVATIVE):
105
- if torch.min(inputs) < left or torch.max(inputs) > right:
106
- raise ValueError('Input to a transform is not within its domain')
107
-
108
- num_bins = unnormalized_widths.shape[-1]
109
-
110
- if min_bin_width * num_bins > 1.0:
111
- raise ValueError('Minimal bin width too large for the number of bins')
112
- if min_bin_height * num_bins > 1.0:
113
- raise ValueError('Minimal bin height too large for the number of bins')
114
-
115
- widths = F.softmax(unnormalized_widths, dim=-1)
116
- widths = min_bin_width + (1 - min_bin_width * num_bins) * widths
117
- cumwidths = torch.cumsum(widths, dim=-1)
118
- cumwidths = F.pad(cumwidths, pad=(1, 0), mode='constant', value=0.0)
119
- cumwidths = (right - left) * cumwidths + left
120
- cumwidths[..., 0] = left
121
- cumwidths[..., -1] = right
122
- widths = cumwidths[..., 1:] - cumwidths[..., :-1]
123
-
124
- derivatives = min_derivative + F.softplus(unnormalized_derivatives)
125
-
126
- heights = F.softmax(unnormalized_heights, dim=-1)
127
- heights = min_bin_height + (1 - min_bin_height * num_bins) * heights
128
- cumheights = torch.cumsum(heights, dim=-1)
129
- cumheights = F.pad(cumheights, pad=(1, 0), mode='constant', value=0.0)
130
- cumheights = (top - bottom) * cumheights + bottom
131
- cumheights[..., 0] = bottom
132
- cumheights[..., -1] = top
133
- heights = cumheights[..., 1:] - cumheights[..., :-1]
134
-
135
- if inverse:
136
- bin_idx = searchsorted(cumheights, inputs)[..., None]
137
- else:
138
- bin_idx = searchsorted(cumwidths, inputs)[..., None]
139
-
140
- input_cumwidths = cumwidths.gather(-1, bin_idx)[..., 0]
141
- input_bin_widths = widths.gather(-1, bin_idx)[..., 0]
142
-
143
- input_cumheights = cumheights.gather(-1, bin_idx)[..., 0]
144
- delta = heights / widths
145
- input_delta = delta.gather(-1, bin_idx)[..., 0]
146
-
147
- input_derivatives = derivatives.gather(-1, bin_idx)[..., 0]
148
- input_derivatives_plus_one = derivatives[..., 1:].gather(-1, bin_idx)[..., 0]
149
-
150
- input_heights = heights.gather(-1, bin_idx)[..., 0]
151
-
152
- if inverse:
153
- a = (((inputs - input_cumheights) * (input_derivatives
154
- + input_derivatives_plus_one
155
- - 2 * input_delta)
156
- + input_heights * (input_delta - input_derivatives)))
157
- b = (input_heights * input_derivatives
158
- - (inputs - input_cumheights) * (input_derivatives
159
- + input_derivatives_plus_one
160
- - 2 * input_delta))
161
- c = - input_delta * (inputs - input_cumheights)
162
-
163
- discriminant = b.pow(2) - 4 * a * c
164
- assert (discriminant >= 0).all()
165
-
166
- root = (2 * c) / (-b - torch.sqrt(discriminant))
167
- outputs = root * input_bin_widths + input_cumwidths
168
-
169
- theta_one_minus_theta = root * (1 - root)
170
- denominator = input_delta + ((input_derivatives + input_derivatives_plus_one - 2 * input_delta)
171
- * theta_one_minus_theta)
172
- derivative_numerator = input_delta.pow(2) * (input_derivatives_plus_one * root.pow(2)
173
- + 2 * input_delta * theta_one_minus_theta
174
- + input_derivatives * (1 - root).pow(2))
175
- logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator)
176
-
177
- return outputs, -logabsdet
178
- else:
179
- theta = (inputs - input_cumwidths) / input_bin_widths
180
- theta_one_minus_theta = theta * (1 - theta)
181
-
182
- numerator = input_heights * (input_delta * theta.pow(2)
183
- + input_derivatives * theta_one_minus_theta)
184
- denominator = input_delta + ((input_derivatives + input_derivatives_plus_one - 2 * input_delta)
185
- * theta_one_minus_theta)
186
- outputs = input_cumheights + numerator / denominator
187
-
188
- derivative_numerator = input_delta.pow(2) * (input_derivatives_plus_one * theta.pow(2)
189
- + 2 * input_delta * theta_one_minus_theta
190
- + input_derivatives * (1 - theta).pow(2))
191
- logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator)
192
-
193
- return outputs, logabsdet
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
onnx_export.py DELETED
@@ -1,94 +0,0 @@
1
- import torch
2
- from torchaudio.models.wav2vec2.utils import import_fairseq_model
3
- from fairseq import checkpoint_utils
4
- from onnxexport.model_onnx import SynthesizerTrn
5
- import utils
6
-
7
- def get_hubert_model():
8
- vec_path = "hubert/checkpoint_best_legacy_500.pt"
9
- print("load model(s) from {}".format(vec_path))
10
- models, saved_cfg, task = checkpoint_utils.load_model_ensemble_and_task(
11
- [vec_path],
12
- suffix="",
13
- )
14
- model = models[0]
15
- model.eval()
16
- return model
17
-
18
-
19
- def main(HubertExport, NetExport):
20
- path = "SoVits4.0"
21
-
22
- '''if HubertExport:
23
- device = torch.device("cpu")
24
- vec_path = "hubert/checkpoint_best_legacy_500.pt"
25
- models, saved_cfg, task = checkpoint_utils.load_model_ensemble_and_task(
26
- [vec_path],
27
- suffix="",
28
- )
29
- original = models[0]
30
- original.eval()
31
- model = original
32
- test_input = torch.rand(1, 1, 16000)
33
- model(test_input)
34
- torch.onnx.export(model,
35
- test_input,
36
- "hubert4.0.onnx",
37
- export_params=True,
38
- opset_version=16,
39
- do_constant_folding=True,
40
- input_names=['source'],
41
- output_names=['embed'],
42
- dynamic_axes={
43
- 'source':
44
- {
45
- 2: "sample_length"
46
- },
47
- }
48
- )'''
49
- if NetExport:
50
- device = torch.device("cpu")
51
- hps = utils.get_hparams_from_file(f"checkpoints/{path}/config.json")
52
- SVCVITS = SynthesizerTrn(
53
- hps.data.filter_length // 2 + 1,
54
- hps.train.segment_size // hps.data.hop_length,
55
- **hps.model)
56
- _ = utils.load_checkpoint(f"checkpoints/{path}/model.pth", SVCVITS, None)
57
- _ = SVCVITS.eval().to(device)
58
- for i in SVCVITS.parameters():
59
- i.requires_grad = False
60
- test_hidden_unit = torch.rand(1, 10, 256)
61
- test_pitch = torch.rand(1, 10)
62
- test_mel2ph = torch.LongTensor([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]).unsqueeze(0)
63
- test_uv = torch.ones(1, 10, dtype=torch.float32)
64
- test_noise = torch.randn(1, 192, 10)
65
- test_sid = torch.LongTensor([0])
66
- input_names = ["c", "f0", "mel2ph", "uv", "noise", "sid"]
67
- output_names = ["audio", ]
68
- SVCVITS.eval()
69
- torch.onnx.export(SVCVITS,
70
- (
71
- test_hidden_unit.to(device),
72
- test_pitch.to(device),
73
- test_mel2ph.to(device),
74
- test_uv.to(device),
75
- test_noise.to(device),
76
- test_sid.to(device)
77
- ),
78
- f"checkpoints/{path}/model.onnx",
79
- dynamic_axes={
80
- "c": [0, 1],
81
- "f0": [1],
82
- "mel2ph": [1],
83
- "uv": [1],
84
- "noise": [2],
85
- },
86
- do_constant_folding=False,
87
- opset_version=16,
88
- verbose=False,
89
- input_names=input_names,
90
- output_names=output_names)
91
-
92
-
93
- if __name__ == '__main__':
94
- main(False, True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
preprocess_flist_config.py DELETED
@@ -1,83 +0,0 @@
1
- import os
2
- import argparse
3
- import re
4
-
5
- from tqdm import tqdm
6
- from random import shuffle
7
- import json
8
- import wave
9
-
10
- config_template = json.load(open("configs/config.json"))
11
-
12
- pattern = re.compile(r'^[\.a-zA-Z0-9_\/]+$')
13
-
14
- def get_wav_duration(file_path):
15
- with wave.open(file_path, 'rb') as wav_file:
16
- # 获取音频帧数
17
- n_frames = wav_file.getnframes()
18
- # 获取采样率
19
- framerate = wav_file.getframerate()
20
- # 计算时长(秒)
21
- duration = n_frames / float(framerate)
22
- return duration
23
-
24
- if __name__ == "__main__":
25
- parser = argparse.ArgumentParser()
26
- parser.add_argument("--train_list", type=str, default="./filelists/train.txt", help="path to train list")
27
- parser.add_argument("--val_list", type=str, default="./filelists/val.txt", help="path to val list")
28
- parser.add_argument("--test_list", type=str, default="./filelists/test.txt", help="path to test list")
29
- parser.add_argument("--source_dir", type=str, default="./dataset/44k", help="path to source dir")
30
- args = parser.parse_args()
31
-
32
- train = []
33
- val = []
34
- test = []
35
- idx = 0
36
- spk_dict = {}
37
- spk_id = 0
38
- for speaker in tqdm(os.listdir(args.source_dir)):
39
- spk_dict[speaker] = spk_id
40
- spk_id += 1
41
- wavs = ["/".join([args.source_dir, speaker, i]) for i in os.listdir(os.path.join(args.source_dir, speaker))]
42
- new_wavs = []
43
- for file in wavs:
44
- if not file.endswith("wav"):
45
- continue
46
- if not pattern.match(file):
47
- print(f"warning:文件名{file}中包含非字母数字下划线,可能会导致错误。(也可能不会)")
48
- if get_wav_duration(file) < 0.3:
49
- print("skip too short audio:", file)
50
- continue
51
- new_wavs.append(file)
52
- wavs = new_wavs
53
- shuffle(wavs)
54
- train += wavs[2:-2]
55
- val += wavs[:2]
56
- test += wavs[-2:]
57
-
58
- shuffle(train)
59
- shuffle(val)
60
- shuffle(test)
61
-
62
- print("Writing", args.train_list)
63
- with open(args.train_list, "w") as f:
64
- for fname in tqdm(train):
65
- wavpath = fname
66
- f.write(wavpath + "\n")
67
-
68
- print("Writing", args.val_list)
69
- with open(args.val_list, "w") as f:
70
- for fname in tqdm(val):
71
- wavpath = fname
72
- f.write(wavpath + "\n")
73
-
74
- print("Writing", args.test_list)
75
- with open(args.test_list, "w") as f:
76
- for fname in tqdm(test):
77
- wavpath = fname
78
- f.write(wavpath + "\n")
79
-
80
- config_template["spk"] = spk_dict
81
- print("Writing configs/config.json")
82
- with open("configs/config.json", "w") as f:
83
- json.dump(config_template, f, indent=2)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
preprocess_hubert_f0.py DELETED
@@ -1,62 +0,0 @@
1
- import math
2
- import multiprocessing
3
- import os
4
- import argparse
5
- from random import shuffle
6
-
7
- import torch
8
- from glob import glob
9
- from tqdm import tqdm
10
-
11
- import utils
12
- import logging
13
- logging.getLogger('numba').setLevel(logging.WARNING)
14
- import librosa
15
- import numpy as np
16
-
17
- hps = utils.get_hparams_from_file("configs/config.json")
18
- sampling_rate = hps.data.sampling_rate
19
- hop_length = hps.data.hop_length
20
-
21
-
22
- def process_one(filename, hmodel):
23
- # print(filename)
24
- wav, sr = librosa.load(filename, sr=sampling_rate)
25
- soft_path = filename + ".soft.pt"
26
- if not os.path.exists(soft_path):
27
- devive = torch.device("cuda" if torch.cuda.is_available() else "cpu")
28
- wav16k = librosa.resample(wav, orig_sr=sampling_rate, target_sr=16000)
29
- wav16k = torch.from_numpy(wav16k).to(devive)
30
- c = utils.get_hubert_content(hmodel, wav_16k_tensor=wav16k)
31
- torch.save(c.cpu(), soft_path)
32
- f0_path = filename + ".f0.npy"
33
- if not os.path.exists(f0_path):
34
- f0 = utils.compute_f0_dio(wav, sampling_rate=sampling_rate, hop_length=hop_length)
35
- np.save(f0_path, f0)
36
-
37
-
38
- def process_batch(filenames):
39
- print("Loading hubert for content...")
40
- device = "cuda" if torch.cuda.is_available() else "cpu"
41
- hmodel = utils.get_hubert_model().to(device)
42
- print("Loaded hubert.")
43
- for filename in tqdm(filenames):
44
- process_one(filename, hmodel)
45
-
46
-
47
- if __name__ == "__main__":
48
- parser = argparse.ArgumentParser()
49
- parser.add_argument("--in_dir", type=str, default="dataset/44k", help="path to input dir")
50
-
51
- args = parser.parse_args()
52
- filenames = glob(f'{args.in_dir}/*/*.wav', recursive=True) # [:10]
53
- shuffle(filenames)
54
- multiprocessing.set_start_method('spawn',force=True)
55
-
56
- num_processes = 1
57
- chunk_size = int(math.ceil(len(filenames) / num_processes))
58
- chunks = [filenames[i:i + chunk_size] for i in range(0, len(filenames), chunk_size)]
59
- print([len(c) for c in chunks])
60
- processes = [multiprocessing.Process(target=process_batch, args=(chunk,)) for chunk in chunks]
61
- for p in processes:
62
- p.start()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
requirements.txt DELETED
@@ -1,17 +0,0 @@
1
- Flask
2
- Flask_Cors
3
- gradio
4
- numpy==1.22.4
5
- pyworld==0.3.2
6
- scipy==1.7.3
7
- SoundFile==0.12.1
8
- torch==1.13.1
9
- torchaudio==0.13.1
10
- tqdm
11
- scikit-maad
12
- praat-parselmouth
13
- onnx
14
- onnxsim
15
- onnxoptimizer
16
- fairseq==0.12.2
17
- librosa==0.8.1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
resample.py DELETED
@@ -1,48 +0,0 @@
1
- import os
2
- import argparse
3
- import librosa
4
- import numpy as np
5
- from multiprocessing import Pool, cpu_count
6
- from scipy.io import wavfile
7
- from tqdm import tqdm
8
-
9
-
10
- def process(item):
11
- spkdir, wav_name, args = item
12
- # speaker 's5', 'p280', 'p315' are excluded,
13
- speaker = spkdir.replace("\\", "/").split("/")[-1]
14
- wav_path = os.path.join(args.in_dir, speaker, wav_name)
15
- if os.path.exists(wav_path) and '.wav' in wav_path:
16
- os.makedirs(os.path.join(args.out_dir2, speaker), exist_ok=True)
17
- wav, sr = librosa.load(wav_path, sr=None)
18
- wav, _ = librosa.effects.trim(wav, top_db=20)
19
- peak = np.abs(wav).max()
20
- if peak > 1.0:
21
- wav = 0.98 * wav / peak
22
- wav2 = librosa.resample(wav, orig_sr=sr, target_sr=args.sr2)
23
- wav2 /= max(wav2.max(), -wav2.min())
24
- save_name = wav_name
25
- save_path2 = os.path.join(args.out_dir2, speaker, save_name)
26
- wavfile.write(
27
- save_path2,
28
- args.sr2,
29
- (wav2 * np.iinfo(np.int16).max).astype(np.int16)
30
- )
31
-
32
-
33
-
34
- if __name__ == "__main__":
35
- parser = argparse.ArgumentParser()
36
- parser.add_argument("--sr2", type=int, default=44100, help="sampling rate")
37
- parser.add_argument("--in_dir", type=str, default="./dataset_raw", help="path to source dir")
38
- parser.add_argument("--out_dir2", type=str, default="./dataset/44k", help="path to target dir")
39
- args = parser.parse_args()
40
- processs = cpu_count()-2 if cpu_count() >4 else 1
41
- pool = Pool(processes=processs)
42
-
43
- for speaker in os.listdir(args.in_dir):
44
- spk_dir = os.path.join(args.in_dir, speaker)
45
- if os.path.isdir(spk_dir):
46
- print(spk_dir)
47
- for _ in tqdm(pool.imap_unordered(process, [(spk_dir, i, args) for i in os.listdir(spk_dir) if i.endswith("wav")])):
48
- pass
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spec_gen.py DELETED
@@ -1,22 +0,0 @@
1
- from data_utils import TextAudioSpeakerLoader
2
- import json
3
- from tqdm import tqdm
4
-
5
- from utils import HParams
6
-
7
- config_path = 'configs/config.json'
8
- with open(config_path, "r") as f:
9
- data = f.read()
10
- config = json.loads(data)
11
- hps = HParams(**config)
12
-
13
- train_dataset = TextAudioSpeakerLoader("filelists/train.txt", hps)
14
- test_dataset = TextAudioSpeakerLoader("filelists/test.txt", hps)
15
- eval_dataset = TextAudioSpeakerLoader("filelists/val.txt", hps)
16
-
17
- for _ in tqdm(train_dataset):
18
- pass
19
- for _ in tqdm(eval_dataset):
20
- pass
21
- for _ in tqdm(test_dataset):
22
- pass
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
train.py DELETED
@@ -1,435 +0,0 @@
1
- import os
2
- import sys
3
- import json
4
- import argparse
5
- import itertools
6
- import math
7
- import time
8
- import logging
9
-
10
- import torch
11
- from torch import nn, optim
12
- from torch.nn import functional as F
13
- from torch.utils.data import DataLoader
14
- from torch.utils.tensorboard import SummaryWriter
15
- import torch.multiprocessing as mp
16
- import torch.distributed as dist
17
- from torch.nn.parallel import DistributedDataParallel as DDP
18
- from torch.cuda.amp import autocast, GradScaler
19
-
20
- sys.path.append('../..')
21
- import modules.commons as commons
22
- import utils
23
-
24
- from data_utils import DatasetConstructor
25
-
26
- from models import (
27
- SynthesizerTrn,
28
- Discriminator
29
- )
30
-
31
- from modules.losses import (
32
- generator_loss,
33
- discriminator_loss,
34
- feature_loss,
35
- kl_loss,
36
- )
37
- from modules.mel_processing import mel_spectrogram_torch, spec_to_mel_torch, spectrogram_torch
38
-
39
- torch.backends.cudnn.benchmark = True
40
- global_step = 0
41
- use_cuda = torch.cuda.is_available()
42
- print("use_cuda, ", use_cuda)
43
-
44
- numba_logger = logging.getLogger('numba')
45
- numba_logger.setLevel(logging.WARNING)
46
-
47
-
48
- def main():
49
- """Assume Single Node Multi GPUs Training Only"""
50
-
51
- hps = utils.get_hparams()
52
- os.environ['MASTER_ADDR'] = 'localhost'
53
- os.environ['MASTER_PORT'] = str(hps.train.port)
54
-
55
- if (torch.cuda.is_available()):
56
- n_gpus = torch.cuda.device_count()
57
- mp.spawn(run, nprocs=n_gpus, args=(n_gpus, hps,))
58
- else:
59
- cpurun(0, 1, hps)
60
-
61
-
62
- def run(rank, n_gpus, hps):
63
- global global_step
64
- if rank == 0:
65
- logger = utils.get_logger(hps.model_dir)
66
- logger.info(hps.train)
67
- logger.info(hps.data)
68
- logger.info(hps.model)
69
- utils.check_git_hash(hps.model_dir)
70
- writer = SummaryWriter(log_dir=hps.model_dir)
71
- writer_eval = SummaryWriter(log_dir=os.path.join(hps.model_dir, "eval"))
72
-
73
- dist.init_process_group(backend='nccl', init_method='env://', world_size=n_gpus, rank=rank)
74
- torch.manual_seed(hps.train.seed)
75
- torch.cuda.set_device(rank)
76
- dataset_constructor = DatasetConstructor(hps, num_replicas=n_gpus, rank=rank)
77
-
78
- train_loader = dataset_constructor.get_train_loader()
79
- if rank == 0:
80
- valid_loader = dataset_constructor.get_valid_loader()
81
-
82
- net_g = SynthesizerTrn(hps).cuda(rank)
83
- net_d = Discriminator(hps, hps.model.use_spectral_norm).cuda(rank)
84
-
85
- optim_g = torch.optim.AdamW(
86
- net_g.parameters(),
87
- hps.train.learning_rate,
88
- betas=hps.train.betas,
89
- eps=hps.train.eps)
90
- optim_d = torch.optim.AdamW(
91
- net_d.parameters(),
92
- hps.train.learning_rate,
93
- betas=hps.train.betas,
94
- eps=hps.train.eps)
95
- net_g = DDP(net_g, device_ids=[rank], find_unused_parameters=True)
96
- net_d = DDP(net_d, device_ids=[rank], find_unused_parameters=True)
97
- skip_optimizer = True
98
- try:
99
- _, _, _, epoch_str = utils.load_checkpoint(utils.latest_checkpoint_path(hps.model_dir, "G_*.pth"), net_g,
100
- optim_g, skip_optimizer)
101
- _, _, _, epoch_str = utils.load_checkpoint(utils.latest_checkpoint_path(hps.model_dir, "D_*.pth"), net_d,
102
- optim_d, skip_optimizer)
103
- global_step = (epoch_str - 1) * len(train_loader)
104
- except:
105
- print("load old checkpoint failed...")
106
- epoch_str = 1
107
- global_step = 0
108
- if skip_optimizer:
109
- epoch_str = 1
110
- global_step = 0
111
-
112
- scheduler_g = torch.optim.lr_scheduler.ExponentialLR(optim_g, gamma=hps.train.lr_decay, last_epoch=epoch_str - 2)
113
- scheduler_d = torch.optim.lr_scheduler.ExponentialLR(optim_d, gamma=hps.train.lr_decay, last_epoch=epoch_str - 2)
114
-
115
- for epoch in range(epoch_str, hps.train.epochs + 1):
116
- if rank == 0:
117
- train_and_evaluate(rank, epoch, hps, [net_g, net_d], [optim_g, optim_d], [scheduler_g, scheduler_d],
118
- [train_loader, valid_loader], logger, [writer, writer_eval])
119
- else:
120
- train_and_evaluate(rank, epoch, hps, [net_g, net_d], [optim_g, optim_d], [scheduler_g, scheduler_d],
121
- [train_loader, None], None, None)
122
- scheduler_g.step()
123
- scheduler_d.step()
124
-
125
-
126
- def cpurun(rank, n_gpus, hps):
127
- global global_step
128
- if rank == 0:
129
- logger = utils.get_logger(hps.model_dir)
130
- logger.info(hps.train)
131
- logger.info(hps.data)
132
- logger.info(hps.model)
133
- utils.check_git_hash(hps.model_dir)
134
- writer = SummaryWriter(log_dir=hps.model_dir)
135
- writer_eval = SummaryWriter(log_dir=os.path.join(hps.model_dir, "eval"))
136
- torch.manual_seed(hps.train.seed)
137
- dataset_constructor = DatasetConstructor(hps, num_replicas=n_gpus, rank=rank)
138
-
139
- train_loader = dataset_constructor.get_train_loader()
140
- if rank == 0:
141
- valid_loader = dataset_constructor.get_valid_loader()
142
-
143
- net_g = SynthesizerTrn(hps)
144
- net_d = Discriminator(hps, hps.model.use_spectral_norm)
145
-
146
- optim_g = torch.optim.AdamW(
147
- net_g.parameters(),
148
- hps.train.learning_rate,
149
- betas=hps.train.betas,
150
- eps=hps.train.eps)
151
- optim_d = torch.optim.AdamW(
152
- net_d.parameters(),
153
- hps.train.learning_rate,
154
- betas=hps.train.betas,
155
- eps=hps.train.eps)
156
- skip_optimizer = True
157
- try:
158
- _, _, _, epoch_str = utils.load_checkpoint(utils.latest_checkpoint_path(hps.model_dir, "G_*.pth"), net_g,
159
- optim_g, skip_optimizer)
160
- _, _, _, epoch_str = utils.load_checkpoint(utils.latest_checkpoint_path(hps.model_dir, "D_*.pth"), net_d,
161
- optim_d, skip_optimizer)
162
- global_step = (epoch_str - 1) * len(train_loader)
163
- except:
164
- print("load old checkpoint failed...")
165
- epoch_str = 1
166
- global_step = 0
167
- if skip_optimizer:
168
- epoch_str = 1
169
- global_step = 0
170
- scheduler_g = torch.optim.lr_scheduler.ExponentialLR(optim_g, gamma=hps.train.lr_decay, last_epoch=epoch_str - 2)
171
- scheduler_d = torch.optim.lr_scheduler.ExponentialLR(optim_d, gamma=hps.train.lr_decay, last_epoch=epoch_str - 2)
172
-
173
- for epoch in range(epoch_str, hps.train.epochs + 1):
174
- train_and_evaluate(rank, epoch, hps, [net_g, net_d], [optim_g, optim_d], [scheduler_g, scheduler_d],
175
- [train_loader, valid_loader], logger, [writer, writer_eval])
176
-
177
- scheduler_g.step()
178
- scheduler_d.step()
179
-
180
-
181
- def train_and_evaluate(rank, epoch, hps, nets, optims, schedulers, loaders, logger, writers):
182
- net_g, net_d = nets
183
- optim_g, optim_d = optims
184
- scheduler_g, scheduler_d = schedulers
185
- train_loader, eval_loader = loaders
186
- if writers is not None:
187
- writer, writer_eval = writers
188
-
189
- train_loader.sampler.set_epoch(epoch)
190
- global global_step
191
-
192
- net_g.train()
193
- net_d.train()
194
- for batch_idx, data_dict in enumerate(train_loader):
195
-
196
- c = data_dict["c"]
197
- mel = data_dict["mel"]
198
- f0 = data_dict["f0"]
199
- uv = data_dict["uv"]
200
- wav = data_dict["wav"]
201
- spkid = data_dict["spkid"]
202
-
203
- c_lengths = data_dict["c_lengths"]
204
- mel_lengths = data_dict["mel_lengths"]
205
- wav_lengths = data_dict["wav_lengths"]
206
- f0_lengths = data_dict["f0_lengths"]
207
-
208
- # data
209
- if (use_cuda):
210
- c, c_lengths = c.cuda(rank, non_blocking=True), c_lengths.cuda(rank, non_blocking=True)
211
- mel, mel_lengths = mel.cuda(rank, non_blocking=True), mel_lengths.cuda(rank, non_blocking=True)
212
- wav, wav_lengths = wav.cuda(rank, non_blocking=True), wav_lengths.cuda(rank, non_blocking=True)
213
- f0, f0_lengths = f0.cuda(rank, non_blocking=True), f0_lengths.cuda(rank, non_blocking=True)
214
- spkid = spkid.cuda(rank, non_blocking=True)
215
- uv = uv.cuda(rank, non_blocking=True)
216
-
217
- # forward
218
- y_hat, ids_slice, LF0, y_ddsp, kl_div, predict_mel, mask, \
219
- pred_lf0, loss_f0, norm_f0 = net_g(c, c_lengths, f0,uv, mel, mel_lengths, spk_id=spkid)
220
- y_ddsp = y_ddsp.unsqueeze(1)
221
-
222
- # Discriminator
223
- y = commons.slice_segments(wav, ids_slice * hps.data.hop_length, hps.train.segment_size) # slice
224
- y_ddsp_mel = mel_spectrogram_torch(
225
- y_ddsp.squeeze(1),
226
- hps.data.n_fft,
227
- hps.data.acoustic_dim,
228
- hps.data.sampling_rate,
229
- hps.data.hop_length,
230
- hps.data.win_size,
231
- hps.data.fmin,
232
- hps.data.fmax
233
- )
234
-
235
- y_logspec = torch.log(spectrogram_torch(
236
- y.squeeze(1),
237
- hps.data.n_fft,
238
- hps.data.sampling_rate,
239
- hps.data.hop_length,
240
- hps.data.win_size
241
- ) + 1e-7)
242
-
243
- y_ddsp_logspec = torch.log(spectrogram_torch(
244
- y_ddsp.squeeze(1),
245
- hps.data.n_fft,
246
- hps.data.sampling_rate,
247
- hps.data.hop_length,
248
- hps.data.win_size
249
- ) + 1e-7)
250
-
251
- y_mel = mel_spectrogram_torch(
252
- y.squeeze(1),
253
- hps.data.n_fft,
254
- hps.data.acoustic_dim,
255
- hps.data.sampling_rate,
256
- hps.data.hop_length,
257
- hps.data.win_size,
258
- hps.data.fmin,
259
- hps.data.fmax
260
- )
261
- y_hat_mel = mel_spectrogram_torch(
262
- y_hat.squeeze(1),
263
- hps.data.n_fft,
264
- hps.data.acoustic_dim,
265
- hps.data.sampling_rate,
266
- hps.data.hop_length,
267
- hps.data.win_size,
268
- hps.data.fmin,
269
- hps.data.fmax
270
- )
271
-
272
- y_d_hat_r, y_d_hat_g, _, _ = net_d(y, y_hat.detach())
273
- loss_disc, losses_disc_r, losses_disc_g = discriminator_loss(y_d_hat_r, y_d_hat_g)
274
- loss_disc_all = loss_disc
275
-
276
- optim_d.zero_grad()
277
- loss_disc_all.backward()
278
- grad_norm_d = commons.clip_grad_value_(net_d.parameters(), None)
279
- optim_d.step()
280
-
281
- # loss
282
- y_d_hat_r, y_d_hat_g, fmap_r, fmap_g = net_d(y, y_hat)
283
-
284
- loss_mel = F.l1_loss(y_mel, y_hat_mel) * 45
285
- loss_mel_dsp = F.l1_loss(y_mel, y_ddsp_mel) * 45
286
- loss_spec_dsp = F.l1_loss(y_logspec, y_ddsp_logspec) * 45
287
-
288
- loss_mel_am = F.mse_loss(mel * mask, predict_mel * mask) # * 10
289
-
290
- loss_fm = feature_loss(fmap_r, fmap_g)
291
- loss_gen, losses_gen = generator_loss(y_d_hat_g)
292
-
293
- loss_fm = loss_fm / 2
294
- loss_gen = loss_gen / 2
295
- loss_gen_all = loss_gen + loss_fm + loss_mel + loss_mel_dsp + kl_div + loss_mel_am + loss_spec_dsp +\
296
- loss_f0
297
-
298
- loss_gen_all = loss_gen_all / hps.train.accumulation_steps
299
-
300
- loss_gen_all.backward()
301
- if ((global_step + 1) % hps.train.accumulation_steps == 0):
302
- grad_norm_g = commons.clip_grad_value_(net_g.parameters(), None)
303
- optim_g.step()
304
- optim_g.zero_grad()
305
-
306
- if rank == 0:
307
- if (global_step + 1) % (hps.train.accumulation_steps * 10) == 0:
308
- print(["step&time&loss", global_step, time.asctime(time.localtime(time.time())), loss_gen_all])
309
-
310
- if global_step % hps.train.log_interval == 0:
311
- lr = optim_g.param_groups[0]['lr']
312
- losses = [loss_gen_all, loss_mel]
313
- logger.info('Train Epoch: {} [{:.0f}%]'.format(
314
- epoch,
315
- 100. * batch_idx / len(train_loader)))
316
- logger.info([x.item() for x in losses] + [global_step, lr])
317
-
318
- scalar_dict = {"loss/total": loss_gen_all,
319
- "loss/mel": loss_mel,
320
- "loss/adv": loss_gen,
321
- "loss/fm": loss_fm,
322
- "loss/mel_ddsp": loss_mel_dsp,
323
- "loss/spec_ddsp": loss_spec_dsp,
324
- "loss/mel_am": loss_mel_am,
325
- "loss/kl_div": kl_div,
326
- "loss/lf0": loss_f0,
327
- "learning_rate": lr}
328
- image_dict = {
329
- "train/lf0": utils.plot_data_to_numpy(LF0[0,0, :].cpu().numpy(), pred_lf0[0,0, :].detach().cpu().numpy()),
330
- "train/norm_lf0": utils.plot_data_to_numpy(LF0[0,0, :].cpu().numpy(), norm_f0[0,0, :].detach().cpu().numpy()),
331
- }
332
- utils.summarize(
333
- writer=writer,
334
- global_step=global_step,
335
- scalars=scalar_dict,
336
- images=image_dict)
337
-
338
- if global_step % hps.train.eval_interval == 0:
339
- # logger.info(['All training params(G): ', utils.count_parameters(net_g), ' M'])
340
- # print('Sub training params(G): ', \
341
- # 'text_encoder: ', utils.count_parameters(net_g.module.text_encoder), ' M, ', \
342
- # 'decoder: ', utils.count_parameters(net_g.module.decoder), ' M, ', \
343
- # 'mel_decoder: ', utils.count_parameters(net_g.module.mel_decoder), ' M, ', \
344
- # 'dec: ', utils.count_parameters(net_g.module.dec), ' M, ', \
345
- # 'dec_harm: ', utils.count_parameters(net_g.module.dec_harm), ' M, ', \
346
- # 'dec_noise: ', utils.count_parameters(net_g.module.dec_noise), ' M, ', \
347
- # 'posterior: ', utils.count_parameters(net_g.module.posterior_encoder), ' M, ', \
348
- # )
349
-
350
- evaluate(hps, net_g, eval_loader, writer_eval)
351
- utils.save_checkpoint(net_g, optim_g, hps.train.learning_rate, epoch,
352
- os.path.join(hps.model_dir, "G_{}.pth".format(global_step)))
353
- utils.save_checkpoint(net_d, optim_d, hps.train.learning_rate, epoch,
354
- os.path.join(hps.model_dir, "D_{}.pth".format(global_step)))
355
- keep_ckpts = getattr(hps.train, 'keep_ckpts', 0)
356
- if keep_ckpts > 0:
357
- utils.clean_checkpoints(path_to_models=hps.model_dir, n_ckpts_to_keep=keep_ckpts, sort_by_time=True)
358
-
359
- net_g.train()
360
- global_step += 1
361
-
362
- if rank == 0:
363
- logger.info('====> Epoch: {}'.format(epoch))
364
-
365
-
366
- def evaluate(hps, generator, eval_loader, writer_eval):
367
- generator.eval()
368
- image_dict = {}
369
- audio_dict = {}
370
- with torch.no_grad():
371
- for batch_idx, data_dict in enumerate(eval_loader):
372
- if batch_idx == 8:
373
- break
374
- c = data_dict["c"]
375
- mel = data_dict["mel"]
376
- f0 = data_dict["f0"]
377
- uv = data_dict["uv"]
378
- wav = data_dict["wav"]
379
- spkid = data_dict["spkid"]
380
-
381
- wav_lengths = data_dict["wav_lengths"]
382
-
383
- # data
384
- if (use_cuda):
385
- c = c.cuda(0)
386
- wav = wav.cuda(0)
387
- mel = mel.cuda(0)
388
- f0 = f0.cuda(0)
389
- uv = uv.cuda(0)
390
- spkid = spkid.cuda(0)
391
- # remove else
392
- c = c[:1]
393
- wav = wav[:1]
394
- mel = mel[:1]
395
- f0 = f0[:1]
396
- spkid = spkid[:1]
397
- if use_cuda:
398
- y_hat, y_harm, y_noise, _ = generator.module.infer(c, f0=f0,uv=uv, g=spkid)
399
- else:
400
- y_hat, y_harm, y_noise, _ = generator.infer(c, f0=f0,uv=uv, g=spkid)
401
-
402
- y_hat_mel = mel_spectrogram_torch(
403
- y_hat.squeeze(1),
404
- hps.data.n_fft,
405
- hps.data.acoustic_dim,
406
- hps.data.sampling_rate,
407
- hps.data.hop_length,
408
- hps.data.win_size,
409
- hps.data.fmin,
410
- hps.data.fmax
411
- )
412
- image_dict.update({
413
- f"gen/mel_{batch_idx}": utils.plot_spectrogram_to_numpy(y_hat_mel[0].cpu().numpy()),
414
- })
415
- audio_dict.update( {
416
- f"gen/audio_{batch_idx}": y_hat[0, :, :],
417
- f"gen/harm": y_harm[0, :, :],
418
- "gen/noise": y_noise[0, :, :]
419
- })
420
- # if global_step == 0:
421
- image_dict.update({f"gt/mel_{batch_idx}": utils.plot_spectrogram_to_numpy(mel[0].cpu().numpy())})
422
- audio_dict.update({f"gt/audio_{batch_idx}": wav[0, :, :wav_lengths[0]]})
423
-
424
- utils.summarize(
425
- writer=writer_eval,
426
- global_step=global_step,
427
- images=image_dict,
428
- audios=audio_dict,
429
- audio_sampling_rate=hps.data.sampling_rate
430
- )
431
- generator.train()
432
-
433
-
434
- if __name__ == "__main__":
435
- main()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
utils.py DELETED
@@ -1,517 +0,0 @@
1
- import os
2
- import glob
3
- import re
4
- import sys
5
- import argparse
6
- import logging
7
- import json
8
- import subprocess
9
- import random
10
-
11
- import librosa
12
- import numpy as np
13
- from scipy.io.wavfile import read
14
- import torch
15
- from torch.nn import functional as F
16
- from modules.commons import sequence_mask
17
- from hubert import hubert_model
18
- MATPLOTLIB_FLAG = False
19
-
20
- logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
21
- logger = logging
22
-
23
- f0_bin = 256
24
- f0_max = 1100.0
25
- f0_min = 50.0
26
- f0_mel_min = 1127 * np.log(1 + f0_min / 700)
27
- f0_mel_max = 1127 * np.log(1 + f0_max / 700)
28
-
29
-
30
- # def normalize_f0(f0, random_scale=True):
31
- # f0_norm = f0.clone() # create a copy of the input Tensor
32
- # batch_size, _, frame_length = f0_norm.shape
33
- # for i in range(batch_size):
34
- # means = torch.mean(f0_norm[i, 0, :])
35
- # if random_scale:
36
- # factor = random.uniform(0.8, 1.2)
37
- # else:
38
- # factor = 1
39
- # f0_norm[i, 0, :] = (f0_norm[i, 0, :] - means) * factor
40
- # return f0_norm
41
- # def normalize_f0(f0, random_scale=True):
42
- # means = torch.mean(f0[:, 0, :], dim=1, keepdim=True)
43
- # if random_scale:
44
- # factor = torch.Tensor(f0.shape[0],1).uniform_(0.8, 1.2).to(f0.device)
45
- # else:
46
- # factor = torch.ones(f0.shape[0], 1, 1).to(f0.device)
47
- # f0_norm = (f0 - means.unsqueeze(-1)) * factor.unsqueeze(-1)
48
- # return f0_norm
49
- def normalize_f0(f0, x_mask, uv, random_scale=True):
50
- # calculate means based on x_mask
51
- uv_sum = torch.sum(uv, dim=1, keepdim=True)
52
- uv_sum[uv_sum == 0] = 9999
53
- means = torch.sum(f0[:, 0, :] * uv, dim=1, keepdim=True) / uv_sum
54
-
55
- if random_scale:
56
- factor = torch.Tensor(f0.shape[0], 1).uniform_(0.8, 1.2).to(f0.device)
57
- else:
58
- factor = torch.ones(f0.shape[0], 1).to(f0.device)
59
- # normalize f0 based on means and factor
60
- f0_norm = (f0 - means.unsqueeze(-1)) * factor.unsqueeze(-1)
61
- if torch.isnan(f0_norm).any():
62
- exit(0)
63
- return f0_norm * x_mask
64
-
65
-
66
- def plot_data_to_numpy(x, y):
67
- global MATPLOTLIB_FLAG
68
- if not MATPLOTLIB_FLAG:
69
- import matplotlib
70
- matplotlib.use("Agg")
71
- MATPLOTLIB_FLAG = True
72
- mpl_logger = logging.getLogger('matplotlib')
73
- mpl_logger.setLevel(logging.WARNING)
74
- import matplotlib.pylab as plt
75
- import numpy as np
76
-
77
- fig, ax = plt.subplots(figsize=(10, 2))
78
- plt.plot(x)
79
- plt.plot(y)
80
- plt.tight_layout()
81
-
82
- fig.canvas.draw()
83
- data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='')
84
- data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,))
85
- plt.close()
86
- return data
87
-
88
-
89
-
90
- def interpolate_f0(f0):
91
- '''
92
- 对F0进行插值处理
93
- '''
94
-
95
- data = np.reshape(f0, (f0.size, 1))
96
-
97
- vuv_vector = np.zeros((data.size, 1), dtype=np.float32)
98
- vuv_vector[data > 0.0] = 1.0
99
- vuv_vector[data <= 0.0] = 0.0
100
-
101
- ip_data = data
102
-
103
- frame_number = data.size
104
- last_value = 0.0
105
- for i in range(frame_number):
106
- if data[i] <= 0.0:
107
- j = i + 1
108
- for j in range(i + 1, frame_number):
109
- if data[j] > 0.0:
110
- break
111
- if j < frame_number - 1:
112
- if last_value > 0.0:
113
- step = (data[j] - data[i - 1]) / float(j - i)
114
- for k in range(i, j):
115
- ip_data[k] = data[i - 1] + step * (k - i + 1)
116
- else:
117
- for k in range(i, j):
118
- ip_data[k] = data[j]
119
- else:
120
- for k in range(i, frame_number):
121
- ip_data[k] = last_value
122
- else:
123
- ip_data[i] = data[i]
124
- last_value = data[i]
125
-
126
- return ip_data[:,0], vuv_vector[:,0]
127
-
128
-
129
- def compute_f0_parselmouth(wav_numpy, p_len=None, sampling_rate=44100, hop_length=512):
130
- import parselmouth
131
- x = wav_numpy
132
- if p_len is None:
133
- p_len = x.shape[0]//hop_length
134
- else:
135
- assert abs(p_len-x.shape[0]//hop_length) < 4, "pad length error"
136
- time_step = hop_length / sampling_rate * 1000
137
- f0_min = 50
138
- f0_max = 1100
139
- f0 = parselmouth.Sound(x, sampling_rate).to_pitch_ac(
140
- time_step=time_step / 1000, voicing_threshold=0.6,
141
- pitch_floor=f0_min, pitch_ceiling=f0_max).selected_array['frequency']
142
-
143
- pad_size=(p_len - len(f0) + 1) // 2
144
- if(pad_size>0 or p_len - len(f0) - pad_size>0):
145
- f0 = np.pad(f0,[[pad_size,p_len - len(f0) - pad_size]], mode='constant')
146
- return f0
147
-
148
- def resize_f0(x, target_len):
149
- source = np.array(x)
150
- source[source<0.001] = np.nan
151
- target = np.interp(np.arange(0, len(source)*target_len, len(source))/ target_len, np.arange(0, len(source)), source)
152
- res = np.nan_to_num(target)
153
- return res
154
-
155
- def compute_f0_dio(wav_numpy, p_len=None, sampling_rate=44100, hop_length=512):
156
- import pyworld
157
- if p_len is None:
158
- p_len = wav_numpy.shape[0]//hop_length
159
- f0, t = pyworld.dio(
160
- wav_numpy.astype(np.double),
161
- fs=sampling_rate,
162
- f0_ceil=800,
163
- frame_period=1000 * hop_length / sampling_rate,
164
- )
165
- f0 = pyworld.stonemask(wav_numpy.astype(np.double), f0, t, sampling_rate)
166
- for index, pitch in enumerate(f0):
167
- f0[index] = round(pitch, 1)
168
- return resize_f0(f0, p_len)
169
-
170
- def f0_to_coarse(f0):
171
- is_torch = isinstance(f0, torch.Tensor)
172
- f0_mel = 1127 * (1 + f0 / 700).log() if is_torch else 1127 * np.log(1 + f0 / 700)
173
- f0_mel[f0_mel > 0] = (f0_mel[f0_mel > 0] - f0_mel_min) * (f0_bin - 2) / (f0_mel_max - f0_mel_min) + 1
174
-
175
- f0_mel[f0_mel <= 1] = 1
176
- f0_mel[f0_mel > f0_bin - 1] = f0_bin - 1
177
- f0_coarse = (f0_mel + 0.5).long() if is_torch else np.rint(f0_mel).astype(np.int)
178
- assert f0_coarse.max() <= 255 and f0_coarse.min() >= 1, (f0_coarse.max(), f0_coarse.min())
179
- return f0_coarse
180
-
181
-
182
- def get_hubert_model():
183
- vec_path = "hubert/checkpoint_best_legacy_500.pt"
184
- print("load model(s) from {}".format(vec_path))
185
- from fairseq import checkpoint_utils
186
- models, saved_cfg, task = checkpoint_utils.load_model_ensemble_and_task(
187
- [vec_path],
188
- suffix="",
189
- )
190
- model = models[0]
191
- model.eval()
192
- return model
193
-
194
- def get_hubert_content(hmodel, wav_16k_tensor):
195
- feats = wav_16k_tensor
196
- if feats.dim() == 2: # double channels
197
- feats = feats.mean(-1)
198
- assert feats.dim() == 1, feats.dim()
199
- feats = feats.view(1, -1)
200
- padding_mask = torch.BoolTensor(feats.shape).fill_(False)
201
- inputs = {
202
- "source": feats.to(wav_16k_tensor.device),
203
- "padding_mask": padding_mask.to(wav_16k_tensor.device),
204
- "output_layer": 9, # layer 9
205
- }
206
- with torch.no_grad():
207
- logits = hmodel.extract_features(**inputs)
208
- feats = hmodel.final_proj(logits[0])
209
- return feats.transpose(1, 2)
210
-
211
-
212
- def get_content(cmodel, y):
213
- with torch.no_grad():
214
- c = cmodel.extract_features(y.squeeze(1))[0]
215
- c = c.transpose(1, 2)
216
- return c
217
-
218
-
219
-
220
- def load_checkpoint(checkpoint_path, model, optimizer=None, skip_optimizer=False):
221
- assert os.path.isfile(checkpoint_path)
222
- checkpoint_dict = torch.load(checkpoint_path, map_location='cpu')
223
- iteration = checkpoint_dict['iteration']
224
- learning_rate = checkpoint_dict['learning_rate']
225
- if optimizer is not None and not skip_optimizer and checkpoint_dict['optimizer'] is not None:
226
- optimizer.load_state_dict(checkpoint_dict['optimizer'])
227
- saved_state_dict = checkpoint_dict['model']
228
- if hasattr(model, 'module'):
229
- state_dict = model.module.state_dict()
230
- else:
231
- state_dict = model.state_dict()
232
- new_state_dict = {}
233
- for k, v in state_dict.items():
234
- try:
235
- # assert "dec" in k or "disc" in k
236
- # print("load", k)
237
- new_state_dict[k] = saved_state_dict[k]
238
- assert saved_state_dict[k].shape == v.shape, (saved_state_dict[k].shape, v.shape)
239
- except:
240
- print("error, %s is not in the checkpoint" % k)
241
- logger.info("%s is not in the checkpoint" % k)
242
- new_state_dict[k] = v
243
- if hasattr(model, 'module'):
244
- model.module.load_state_dict(new_state_dict)
245
- else:
246
- model.load_state_dict(new_state_dict)
247
- print("load ")
248
- logger.info("Loaded checkpoint '{}' (iteration {})".format(
249
- checkpoint_path, iteration))
250
- return model, optimizer, learning_rate, iteration
251
-
252
-
253
- def save_checkpoint(model, optimizer, learning_rate, iteration, checkpoint_path):
254
- logger.info("Saving model and optimizer state at iteration {} to {}".format(
255
- iteration, checkpoint_path))
256
- if hasattr(model, 'module'):
257
- state_dict = model.module.state_dict()
258
- else:
259
- state_dict = model.state_dict()
260
- torch.save({'model': state_dict,
261
- 'iteration': iteration,
262
- 'optimizer': optimizer.state_dict(),
263
- 'learning_rate': learning_rate}, checkpoint_path)
264
-
265
- def clean_checkpoints(path_to_models='logs/44k/', n_ckpts_to_keep=2, sort_by_time=True):
266
- """Freeing up space by deleting saved ckpts
267
-
268
- Arguments:
269
- path_to_models -- Path to the model directory
270
- n_ckpts_to_keep -- Number of ckpts to keep, excluding G_0.pth and D_0.pth
271
- sort_by_time -- True -> chronologically delete ckpts
272
- False -> lexicographically delete ckpts
273
- """
274
- ckpts_files = [f for f in os.listdir(path_to_models) if os.path.isfile(os.path.join(path_to_models, f))]
275
- name_key = (lambda _f: int(re.compile('._(\d+)\.pth').match(_f).group(1)))
276
- time_key = (lambda _f: os.path.getmtime(os.path.join(path_to_models, _f)))
277
- sort_key = time_key if sort_by_time else name_key
278
- x_sorted = lambda _x: sorted([f for f in ckpts_files if f.startswith(_x) and not f.endswith('_0.pth')], key=sort_key)
279
- to_del = [os.path.join(path_to_models, fn) for fn in
280
- (x_sorted('G')[:-n_ckpts_to_keep] + x_sorted('D')[:-n_ckpts_to_keep])]
281
- del_info = lambda fn: logger.info(f".. Free up space by deleting ckpt {fn}")
282
- del_routine = lambda x: [os.remove(x), del_info(x)]
283
- rs = [del_routine(fn) for fn in to_del]
284
-
285
- def summarize(writer, global_step, scalars={}, histograms={}, images={}, audios={}, audio_sampling_rate=22050):
286
- for k, v in scalars.items():
287
- writer.add_scalar(k, v, global_step)
288
- for k, v in histograms.items():
289
- writer.add_histogram(k, v, global_step)
290
- for k, v in images.items():
291
- writer.add_image(k, v, global_step, dataformats='HWC')
292
- for k, v in audios.items():
293
- writer.add_audio(k, v, global_step, audio_sampling_rate)
294
-
295
-
296
- def latest_checkpoint_path(dir_path, regex="G_*.pth"):
297
- f_list = glob.glob(os.path.join(dir_path, regex))
298
- f_list.sort(key=lambda f: int("".join(filter(str.isdigit, f))))
299
- x = f_list[-1]
300
- print(x)
301
- return x
302
-
303
-
304
- def plot_spectrogram_to_numpy(spectrogram):
305
- global MATPLOTLIB_FLAG
306
- if not MATPLOTLIB_FLAG:
307
- import matplotlib
308
- matplotlib.use("Agg")
309
- MATPLOTLIB_FLAG = True
310
- mpl_logger = logging.getLogger('matplotlib')
311
- mpl_logger.setLevel(logging.WARNING)
312
- import matplotlib.pylab as plt
313
- import numpy as np
314
-
315
- fig, ax = plt.subplots(figsize=(10,2))
316
- im = ax.imshow(spectrogram, aspect="auto", origin="lower",
317
- interpolation='none')
318
- plt.colorbar(im, ax=ax)
319
- plt.xlabel("Frames")
320
- plt.ylabel("Channels")
321
- plt.tight_layout()
322
-
323
- fig.canvas.draw()
324
- data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='')
325
- data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,))
326
- plt.close()
327
- return data
328
-
329
-
330
- def plot_alignment_to_numpy(alignment, info=None):
331
- global MATPLOTLIB_FLAG
332
- if not MATPLOTLIB_FLAG:
333
- import matplotlib
334
- matplotlib.use("Agg")
335
- MATPLOTLIB_FLAG = True
336
- mpl_logger = logging.getLogger('matplotlib')
337
- mpl_logger.setLevel(logging.WARNING)
338
- import matplotlib.pylab as plt
339
- import numpy as np
340
-
341
- fig, ax = plt.subplots(figsize=(6, 4))
342
- im = ax.imshow(alignment.transpose(), aspect='auto', origin='lower',
343
- interpolation='none')
344
- fig.colorbar(im, ax=ax)
345
- xlabel = 'Decoder timestep'
346
- if info is not None:
347
- xlabel += '\n\n' + info
348
- plt.xlabel(xlabel)
349
- plt.ylabel('Encoder timestep')
350
- plt.tight_layout()
351
-
352
- fig.canvas.draw()
353
- data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='')
354
- data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,))
355
- plt.close()
356
- return data
357
-
358
-
359
- def load_wav_to_torch(full_path):
360
- sampling_rate, data = read(full_path)
361
- return torch.FloatTensor(data.astype(np.float32)), sampling_rate
362
-
363
-
364
- def load_filepaths_and_text(filename, split="|"):
365
- with open(filename, encoding='utf-8') as f:
366
- filepaths_and_text = [line.strip().split(split) for line in f]
367
- return filepaths_and_text
368
-
369
-
370
- def get_hparams(init=True):
371
- parser = argparse.ArgumentParser()
372
- parser.add_argument('-c', '--config', type=str, default="./configs/base.json",
373
- help='JSON file for configuration')
374
- parser.add_argument('-m', '--model', type=str, required=True,
375
- help='Model name')
376
-
377
- args = parser.parse_args()
378
- model_dir = os.path.join("./logs", args.model)
379
-
380
- if not os.path.exists(model_dir):
381
- os.makedirs(model_dir)
382
-
383
- config_path = args.config
384
- config_save_path = os.path.join(model_dir, "config.json")
385
- if init:
386
- with open(config_path, "r") as f:
387
- data = f.read()
388
- with open(config_save_path, "w") as f:
389
- f.write(data)
390
- else:
391
- with open(config_save_path, "r") as f:
392
- data = f.read()
393
- config = json.loads(data)
394
-
395
- hparams = HParams(**config)
396
- hparams.model_dir = model_dir
397
- return hparams
398
-
399
-
400
- def get_hparams_from_dir(model_dir):
401
- config_save_path = os.path.join(model_dir, "config.json")
402
- with open(config_save_path, "r") as f:
403
- data = f.read()
404
- config = json.loads(data)
405
-
406
- hparams =HParams(**config)
407
- hparams.model_dir = model_dir
408
- return hparams
409
-
410
-
411
- def get_hparams_from_file(config_path):
412
- with open(config_path, "r") as f:
413
- data = f.read()
414
- config = json.loads(data)
415
-
416
- hparams =HParams(**config)
417
- return hparams
418
-
419
-
420
- def check_git_hash(model_dir):
421
- source_dir = os.path.dirname(os.path.realpath(__file__))
422
- if not os.path.exists(os.path.join(source_dir, ".git")):
423
- logger.warn("{} is not a git repository, therefore hash value comparison will be ignored.".format(
424
- source_dir
425
- ))
426
- return
427
-
428
- cur_hash = subprocess.getoutput("git rev-parse HEAD")
429
-
430
- path = os.path.join(model_dir, "githash")
431
- if os.path.exists(path):
432
- saved_hash = open(path).read()
433
- if saved_hash != cur_hash:
434
- logger.warn("git hash values are different. {}(saved) != {}(current)".format(
435
- saved_hash[:8], cur_hash[:8]))
436
- else:
437
- open(path, "w").write(cur_hash)
438
-
439
-
440
- def get_logger(model_dir, filename="train.log"):
441
- global logger
442
- logger = logging.getLogger(os.path.basename(model_dir))
443
- logger.setLevel(logging.DEBUG)
444
-
445
- formatter = logging.Formatter("%(asctime)s\t%(name)s\t%(levelname)s\t%(message)s")
446
- if not os.path.exists(model_dir):
447
- os.makedirs(model_dir)
448
- h = logging.FileHandler(os.path.join(model_dir, filename))
449
- h.setLevel(logging.DEBUG)
450
- h.setFormatter(formatter)
451
- logger.addHandler(h)
452
- return logger
453
-
454
-
455
- def repeat_expand_2d(content, target_len):
456
- # content : [h, t]
457
-
458
- src_len = content.shape[-1]
459
- target = torch.zeros([content.shape[0], target_len], dtype=torch.float).to(content.device)
460
- temp = torch.arange(src_len+1) * target_len / src_len
461
- current_pos = 0
462
- for i in range(target_len):
463
- if i < temp[current_pos+1]:
464
- target[:, i] = content[:, current_pos]
465
- else:
466
- current_pos += 1
467
- target[:, i] = content[:, current_pos]
468
-
469
- return target
470
-
471
- def load_wav(wav_path, raw_sr, target_sr=16000, win_size=800, hop_size=200):
472
- audio = librosa.core.load(wav_path, sr=raw_sr)[0]
473
- if raw_sr != target_sr:
474
- audio = librosa.core.resample(audio,
475
- raw_sr,
476
- target_sr,
477
- res_type='kaiser_best')
478
- target_length = (audio.size // hop_size +
479
- win_size // hop_size) * hop_size
480
- pad_len = (target_length - audio.size) // 2
481
- if audio.size % 2 == 0:
482
- audio = np.pad(audio, (pad_len, pad_len), mode='reflect')
483
- else:
484
- audio = np.pad(audio, (pad_len, pad_len + 1), mode='reflect')
485
- return audio
486
-
487
- class HParams():
488
- def __init__(self, **kwargs):
489
- for k, v in kwargs.items():
490
- if type(v) == dict:
491
- v = HParams(**v)
492
- self[k] = v
493
-
494
- def keys(self):
495
- return self.__dict__.keys()
496
-
497
- def items(self):
498
- return self.__dict__.items()
499
-
500
- def values(self):
501
- return self.__dict__.values()
502
-
503
- def __len__(self):
504
- return len(self.__dict__)
505
-
506
- def __getitem__(self, key):
507
- return getattr(self, key)
508
-
509
- def __setitem__(self, key, value):
510
- return setattr(self, key, value)
511
-
512
- def __contains__(self, key):
513
- return key in self.__dict__
514
-
515
- def __repr__(self):
516
- return self.__dict__.__repr__()
517
-