XiaoHei Studio commited on
Commit
abc4e5e
1 Parent(s): c82bb46

Upload 29 files

Browse files
diffusion/__init__.py ADDED
File without changes
diffusion/__pycache__/__init__.cpython-38.pyc ADDED
Binary file (159 Bytes). View file
 
diffusion/__pycache__/data_loaders.cpython-38.pyc ADDED
Binary file (5.7 kB). View file
 
diffusion/__pycache__/diffusion.cpython-38.pyc ADDED
Binary file (11.4 kB). View file
 
diffusion/__pycache__/dpm_solver_pytorch.cpython-38.pyc ADDED
Binary file (50.5 kB). View file
 
diffusion/__pycache__/solver.cpython-38.pyc ADDED
Binary file (4.05 kB). View file
 
diffusion/__pycache__/unit2mel.cpython-38.pyc ADDED
Binary file (4.93 kB). View file
 
diffusion/__pycache__/vocoder.cpython-38.pyc ADDED
Binary file (3.53 kB). View file
 
diffusion/__pycache__/wavenet.cpython-38.pyc ADDED
Binary file (3.87 kB). View file
 
diffusion/data_loaders.py ADDED
@@ -0,0 +1,288 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import random
3
+
4
+ import librosa
5
+ import numpy as np
6
+ import torch
7
+ from torch.utils.data import Dataset
8
+ from tqdm import tqdm
9
+
10
+ from utils import repeat_expand_2d
11
+
12
+
13
+ def traverse_dir(
14
+ root_dir,
15
+ extensions,
16
+ amount=None,
17
+ str_include=None,
18
+ str_exclude=None,
19
+ is_pure=False,
20
+ is_sort=False,
21
+ is_ext=True):
22
+
23
+ file_list = []
24
+ cnt = 0
25
+ for root, _, files in os.walk(root_dir):
26
+ for file in files:
27
+ if any([file.endswith(f".{ext}") for ext in extensions]):
28
+ # path
29
+ mix_path = os.path.join(root, file)
30
+ pure_path = mix_path[len(root_dir)+1:] if is_pure else mix_path
31
+
32
+ # amount
33
+ if (amount is not None) and (cnt == amount):
34
+ if is_sort:
35
+ file_list.sort()
36
+ return file_list
37
+
38
+ # check string
39
+ if (str_include is not None) and (str_include not in pure_path):
40
+ continue
41
+ if (str_exclude is not None) and (str_exclude in pure_path):
42
+ continue
43
+
44
+ if not is_ext:
45
+ ext = pure_path.split('.')[-1]
46
+ pure_path = pure_path[:-(len(ext)+1)]
47
+ file_list.append(pure_path)
48
+ cnt += 1
49
+ if is_sort:
50
+ file_list.sort()
51
+ return file_list
52
+
53
+
54
+ def get_data_loaders(args, whole_audio=False):
55
+ data_train = AudioDataset(
56
+ filelists = args.data.training_files,
57
+ waveform_sec=args.data.duration,
58
+ hop_size=args.data.block_size,
59
+ sample_rate=args.data.sampling_rate,
60
+ load_all_data=args.train.cache_all_data,
61
+ whole_audio=whole_audio,
62
+ extensions=args.data.extensions,
63
+ n_spk=args.model.n_spk,
64
+ spk=args.spk,
65
+ device=args.train.cache_device,
66
+ fp16=args.train.cache_fp16,
67
+ unit_interpolate_mode = args.data.unit_interpolate_mode,
68
+ use_aug=True)
69
+ loader_train = torch.utils.data.DataLoader(
70
+ data_train ,
71
+ batch_size=args.train.batch_size if not whole_audio else 1,
72
+ shuffle=True,
73
+ num_workers=args.train.num_workers if args.train.cache_device=='cpu' else 0,
74
+ persistent_workers=(args.train.num_workers > 0) if args.train.cache_device=='cpu' else False,
75
+ pin_memory=True if args.train.cache_device=='cpu' else False
76
+ )
77
+ data_valid = AudioDataset(
78
+ filelists = args.data.validation_files,
79
+ waveform_sec=args.data.duration,
80
+ hop_size=args.data.block_size,
81
+ sample_rate=args.data.sampling_rate,
82
+ load_all_data=args.train.cache_all_data,
83
+ whole_audio=True,
84
+ spk=args.spk,
85
+ extensions=args.data.extensions,
86
+ unit_interpolate_mode = args.data.unit_interpolate_mode,
87
+ n_spk=args.model.n_spk)
88
+ loader_valid = torch.utils.data.DataLoader(
89
+ data_valid,
90
+ batch_size=1,
91
+ shuffle=False,
92
+ num_workers=0,
93
+ pin_memory=True
94
+ )
95
+ return loader_train, loader_valid
96
+
97
+
98
+ class AudioDataset(Dataset):
99
+ def __init__(
100
+ self,
101
+ filelists,
102
+ waveform_sec,
103
+ hop_size,
104
+ sample_rate,
105
+ spk,
106
+ load_all_data=True,
107
+ whole_audio=False,
108
+ extensions=['wav'],
109
+ n_spk=1,
110
+ device='cpu',
111
+ fp16=False,
112
+ use_aug=False,
113
+ unit_interpolate_mode = 'left'
114
+ ):
115
+ super().__init__()
116
+
117
+ self.waveform_sec = waveform_sec
118
+ self.sample_rate = sample_rate
119
+ self.hop_size = hop_size
120
+ self.filelists = filelists
121
+ self.whole_audio = whole_audio
122
+ self.use_aug = use_aug
123
+ self.data_buffer={}
124
+ self.pitch_aug_dict = {}
125
+ self.unit_interpolate_mode = unit_interpolate_mode
126
+ # np.load(os.path.join(self.path_root, 'pitch_aug_dict.npy'), allow_pickle=True).item()
127
+ if load_all_data:
128
+ print('Load all the data filelists:', filelists)
129
+ else:
130
+ print('Load the f0, volume data filelists:', filelists)
131
+ with open(filelists,"r") as f:
132
+ self.paths = f.read().splitlines()
133
+ for name_ext in tqdm(self.paths, total=len(self.paths)):
134
+ path_audio = name_ext
135
+ duration = librosa.get_duration(filename = path_audio, sr = self.sample_rate)
136
+
137
+ path_f0 = name_ext + ".f0.npy"
138
+ f0,_ = np.load(path_f0,allow_pickle=True)
139
+ f0 = torch.from_numpy(np.array(f0,dtype=float)).float().unsqueeze(-1).to(device)
140
+
141
+ path_volume = name_ext + ".vol.npy"
142
+ volume = np.load(path_volume)
143
+ volume = torch.from_numpy(volume).float().unsqueeze(-1).to(device)
144
+
145
+ path_augvol = name_ext + ".aug_vol.npy"
146
+ aug_vol = np.load(path_augvol)
147
+ aug_vol = torch.from_numpy(aug_vol).float().unsqueeze(-1).to(device)
148
+
149
+ if n_spk is not None and n_spk > 1:
150
+ spk_name = name_ext.split("/")[-2]
151
+ spk_id = spk[spk_name] if spk_name in spk else 0
152
+ if spk_id < 0 or spk_id >= n_spk:
153
+ raise ValueError(' [x] Muiti-speaker traing error : spk_id must be a positive integer from 0 to n_spk-1 ')
154
+ else:
155
+ spk_id = 0
156
+ spk_id = torch.LongTensor(np.array([spk_id])).to(device)
157
+
158
+ if load_all_data:
159
+ '''
160
+ audio, sr = librosa.load(path_audio, sr=self.sample_rate)
161
+ if len(audio.shape) > 1:
162
+ audio = librosa.to_mono(audio)
163
+ audio = torch.from_numpy(audio).to(device)
164
+ '''
165
+ path_mel = name_ext + ".mel.npy"
166
+ mel = np.load(path_mel)
167
+ mel = torch.from_numpy(mel).to(device)
168
+
169
+ path_augmel = name_ext + ".aug_mel.npy"
170
+ aug_mel,keyshift = np.load(path_augmel, allow_pickle=True)
171
+ aug_mel = np.array(aug_mel,dtype=float)
172
+ aug_mel = torch.from_numpy(aug_mel).to(device)
173
+ self.pitch_aug_dict[name_ext] = keyshift
174
+
175
+ path_units = name_ext + ".soft.pt"
176
+ units = torch.load(path_units).to(device)
177
+ units = units[0]
178
+ units = repeat_expand_2d(units,f0.size(0),unit_interpolate_mode).transpose(0,1)
179
+
180
+ if fp16:
181
+ mel = mel.half()
182
+ aug_mel = aug_mel.half()
183
+ units = units.half()
184
+
185
+ self.data_buffer[name_ext] = {
186
+ 'duration': duration,
187
+ 'mel': mel,
188
+ 'aug_mel': aug_mel,
189
+ 'units': units,
190
+ 'f0': f0,
191
+ 'volume': volume,
192
+ 'aug_vol': aug_vol,
193
+ 'spk_id': spk_id
194
+ }
195
+ else:
196
+ path_augmel = name_ext + ".aug_mel.npy"
197
+ aug_mel,keyshift = np.load(path_augmel, allow_pickle=True)
198
+ self.pitch_aug_dict[name_ext] = keyshift
199
+ self.data_buffer[name_ext] = {
200
+ 'duration': duration,
201
+ 'f0': f0,
202
+ 'volume': volume,
203
+ 'aug_vol': aug_vol,
204
+ 'spk_id': spk_id
205
+ }
206
+
207
+
208
+ def __getitem__(self, file_idx):
209
+ name_ext = self.paths[file_idx]
210
+ data_buffer = self.data_buffer[name_ext]
211
+ # check duration. if too short, then skip
212
+ if data_buffer['duration'] < (self.waveform_sec + 0.1):
213
+ return self.__getitem__( (file_idx + 1) % len(self.paths))
214
+
215
+ # get item
216
+ return self.get_data(name_ext, data_buffer)
217
+
218
+ def get_data(self, name_ext, data_buffer):
219
+ name = os.path.splitext(name_ext)[0]
220
+ frame_resolution = self.hop_size / self.sample_rate
221
+ duration = data_buffer['duration']
222
+ waveform_sec = duration if self.whole_audio else self.waveform_sec
223
+
224
+ # load audio
225
+ idx_from = 0 if self.whole_audio else random.uniform(0, duration - waveform_sec - 0.1)
226
+ start_frame = int(idx_from / frame_resolution)
227
+ units_frame_len = int(waveform_sec / frame_resolution)
228
+ aug_flag = random.choice([True, False]) and self.use_aug
229
+ '''
230
+ audio = data_buffer.get('audio')
231
+ if audio is None:
232
+ path_audio = os.path.join(self.path_root, 'audio', name) + '.wav'
233
+ audio, sr = librosa.load(
234
+ path_audio,
235
+ sr = self.sample_rate,
236
+ offset = start_frame * frame_resolution,
237
+ duration = waveform_sec)
238
+ if len(audio.shape) > 1:
239
+ audio = librosa.to_mono(audio)
240
+ # clip audio into N seconds
241
+ audio = audio[ : audio.shape[-1] // self.hop_size * self.hop_size]
242
+ audio = torch.from_numpy(audio).float()
243
+ else:
244
+ audio = audio[start_frame * self.hop_size : (start_frame + units_frame_len) * self.hop_size]
245
+ '''
246
+ # load mel
247
+ mel_key = 'aug_mel' if aug_flag else 'mel'
248
+ mel = data_buffer.get(mel_key)
249
+ if mel is None:
250
+ mel = name_ext + ".mel.npy"
251
+ mel = np.load(mel)
252
+ mel = mel[start_frame : start_frame + units_frame_len]
253
+ mel = torch.from_numpy(mel).float()
254
+ else:
255
+ mel = mel[start_frame : start_frame + units_frame_len]
256
+
257
+ # load f0
258
+ f0 = data_buffer.get('f0')
259
+ aug_shift = 0
260
+ if aug_flag:
261
+ aug_shift = self.pitch_aug_dict[name_ext]
262
+ f0_frames = 2 ** (aug_shift / 12) * f0[start_frame : start_frame + units_frame_len]
263
+
264
+ # load units
265
+ units = data_buffer.get('units')
266
+ if units is None:
267
+ path_units = name_ext + ".soft.pt"
268
+ units = torch.load(path_units)
269
+ units = units[0]
270
+ units = repeat_expand_2d(units,f0.size(0),self.unit_interpolate_mode).transpose(0,1)
271
+
272
+ units = units[start_frame : start_frame + units_frame_len]
273
+
274
+ # load volume
275
+ vol_key = 'aug_vol' if aug_flag else 'volume'
276
+ volume = data_buffer.get(vol_key)
277
+ volume_frames = volume[start_frame : start_frame + units_frame_len]
278
+
279
+ # load spk_id
280
+ spk_id = data_buffer.get('spk_id')
281
+
282
+ # load shift
283
+ aug_shift = torch.from_numpy(np.array([[aug_shift]])).float()
284
+
285
+ return dict(mel=mel, f0=f0_frames, volume=volume_frames, units=units, spk_id=spk_id, aug_shift=aug_shift, name=name, name_ext=name_ext)
286
+
287
+ def __len__(self):
288
+ return len(self.paths)
diffusion/diffusion.py ADDED
@@ -0,0 +1,396 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from collections import deque
2
+ from functools import partial
3
+ from inspect import isfunction
4
+
5
+ import numpy as np
6
+ import torch
7
+ import torch.nn.functional as F
8
+ from torch import nn
9
+ from tqdm import tqdm
10
+
11
+
12
+ def exists(x):
13
+ return x is not None
14
+
15
+
16
+ def default(val, d):
17
+ if exists(val):
18
+ return val
19
+ return d() if isfunction(d) else d
20
+
21
+
22
+ def extract(a, t, x_shape):
23
+ b, *_ = t.shape
24
+ out = a.gather(-1, t)
25
+ return out.reshape(b, *((1,) * (len(x_shape) - 1)))
26
+
27
+
28
+ def noise_like(shape, device, repeat=False):
29
+ def repeat_noise():
30
+ return torch.randn((1, *shape[1:]), device=device).repeat(shape[0], *((1,) * (len(shape) - 1)))
31
+ def noise():
32
+ return torch.randn(shape, device=device)
33
+ return repeat_noise() if repeat else noise()
34
+
35
+
36
+ def linear_beta_schedule(timesteps, max_beta=0.02):
37
+ """
38
+ linear schedule
39
+ """
40
+ betas = np.linspace(1e-4, max_beta, timesteps)
41
+ return betas
42
+
43
+
44
+ def cosine_beta_schedule(timesteps, s=0.008):
45
+ """
46
+ cosine schedule
47
+ as proposed in https://openreview.net/forum?id=-NEXDKk8gZ
48
+ """
49
+ steps = timesteps + 1
50
+ x = np.linspace(0, steps, steps)
51
+ alphas_cumprod = np.cos(((x / steps) + s) / (1 + s) * np.pi * 0.5) ** 2
52
+ alphas_cumprod = alphas_cumprod / alphas_cumprod[0]
53
+ betas = 1 - (alphas_cumprod[1:] / alphas_cumprod[:-1])
54
+ return np.clip(betas, a_min=0, a_max=0.999)
55
+
56
+
57
+ beta_schedule = {
58
+ "cosine": cosine_beta_schedule,
59
+ "linear": linear_beta_schedule,
60
+ }
61
+
62
+
63
+ class GaussianDiffusion(nn.Module):
64
+ def __init__(self,
65
+ denoise_fn,
66
+ out_dims=128,
67
+ timesteps=1000,
68
+ k_step=1000,
69
+ max_beta=0.02,
70
+ spec_min=-12,
71
+ spec_max=2):
72
+
73
+ super().__init__()
74
+ self.denoise_fn = denoise_fn
75
+ self.out_dims = out_dims
76
+ betas = beta_schedule['linear'](timesteps, max_beta=max_beta)
77
+
78
+ alphas = 1. - betas
79
+ alphas_cumprod = np.cumprod(alphas, axis=0)
80
+ alphas_cumprod_prev = np.append(1., alphas_cumprod[:-1])
81
+
82
+ timesteps, = betas.shape
83
+ self.num_timesteps = int(timesteps)
84
+ self.k_step = k_step if k_step>0 and k_step<timesteps else timesteps
85
+
86
+ self.noise_list = deque(maxlen=4)
87
+
88
+ to_torch = partial(torch.tensor, dtype=torch.float32)
89
+
90
+ self.register_buffer('betas', to_torch(betas))
91
+ self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod))
92
+ self.register_buffer('alphas_cumprod_prev', to_torch(alphas_cumprod_prev))
93
+
94
+ # calculations for diffusion q(x_t | x_{t-1}) and others
95
+ self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod)))
96
+ self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod)))
97
+ self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod)))
98
+ self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod)))
99
+ self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod - 1)))
100
+
101
+ # calculations for posterior q(x_{t-1} | x_t, x_0)
102
+ posterior_variance = betas * (1. - alphas_cumprod_prev) / (1. - alphas_cumprod)
103
+ # above: equal to 1. / (1. / (1. - alpha_cumprod_tm1) + alpha_t / beta_t)
104
+ self.register_buffer('posterior_variance', to_torch(posterior_variance))
105
+ # below: log calculation clipped because the posterior variance is 0 at the beginning of the diffusion chain
106
+ self.register_buffer('posterior_log_variance_clipped', to_torch(np.log(np.maximum(posterior_variance, 1e-20))))
107
+ self.register_buffer('posterior_mean_coef1', to_torch(
108
+ betas * np.sqrt(alphas_cumprod_prev) / (1. - alphas_cumprod)))
109
+ self.register_buffer('posterior_mean_coef2', to_torch(
110
+ (1. - alphas_cumprod_prev) * np.sqrt(alphas) / (1. - alphas_cumprod)))
111
+
112
+ self.register_buffer('spec_min', torch.FloatTensor([spec_min])[None, None, :out_dims])
113
+ self.register_buffer('spec_max', torch.FloatTensor([spec_max])[None, None, :out_dims])
114
+
115
+ def q_mean_variance(self, x_start, t):
116
+ mean = extract(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start
117
+ variance = extract(1. - self.alphas_cumprod, t, x_start.shape)
118
+ log_variance = extract(self.log_one_minus_alphas_cumprod, t, x_start.shape)
119
+ return mean, variance, log_variance
120
+
121
+ def predict_start_from_noise(self, x_t, t, noise):
122
+ return (
123
+ extract(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t -
124
+ extract(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * noise
125
+ )
126
+
127
+ def q_posterior(self, x_start, x_t, t):
128
+ posterior_mean = (
129
+ extract(self.posterior_mean_coef1, t, x_t.shape) * x_start +
130
+ extract(self.posterior_mean_coef2, t, x_t.shape) * x_t
131
+ )
132
+ posterior_variance = extract(self.posterior_variance, t, x_t.shape)
133
+ posterior_log_variance_clipped = extract(self.posterior_log_variance_clipped, t, x_t.shape)
134
+ return posterior_mean, posterior_variance, posterior_log_variance_clipped
135
+
136
+ def p_mean_variance(self, x, t, cond):
137
+ noise_pred = self.denoise_fn(x, t, cond=cond)
138
+ x_recon = self.predict_start_from_noise(x, t=t, noise=noise_pred)
139
+
140
+ x_recon.clamp_(-1., 1.)
141
+
142
+ model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t)
143
+ return model_mean, posterior_variance, posterior_log_variance
144
+
145
+ @torch.no_grad()
146
+ def p_sample_ddim(self, x, t, interval, cond):
147
+ """
148
+ Use the DDIM method from
149
+ """
150
+ a_t = extract(self.alphas_cumprod, t, x.shape)
151
+ a_prev = extract(self.alphas_cumprod, torch.max(t - interval, torch.zeros_like(t)), x.shape)
152
+
153
+ noise_pred = self.denoise_fn(x, t, cond=cond)
154
+ x_prev = a_prev.sqrt() * (x / a_t.sqrt() + (((1 - a_prev) / a_prev).sqrt()-((1 - a_t) / a_t).sqrt()) * noise_pred)
155
+ return x_prev
156
+
157
+ @torch.no_grad()
158
+ def p_sample(self, x, t, cond, clip_denoised=True, repeat_noise=False):
159
+ b, *_, device = *x.shape, x.device
160
+ model_mean, _, model_log_variance = self.p_mean_variance(x=x, t=t, cond=cond)
161
+ noise = noise_like(x.shape, device, repeat_noise)
162
+ # no noise when t == 0
163
+ nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1)))
164
+ return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise
165
+
166
+ @torch.no_grad()
167
+ def p_sample_plms(self, x, t, interval, cond, clip_denoised=True, repeat_noise=False):
168
+ """
169
+ Use the PLMS method from
170
+ [Pseudo Numerical Methods for Diffusion Models on Manifolds](https://arxiv.org/abs/2202.09778).
171
+ """
172
+
173
+ def get_x_pred(x, noise_t, t):
174
+ a_t = extract(self.alphas_cumprod, t, x.shape)
175
+ a_prev = extract(self.alphas_cumprod, torch.max(t - interval, torch.zeros_like(t)), x.shape)
176
+ a_t_sq, a_prev_sq = a_t.sqrt(), a_prev.sqrt()
177
+
178
+ x_delta = (a_prev - a_t) * ((1 / (a_t_sq * (a_t_sq + a_prev_sq))) * x - 1 / (
179
+ a_t_sq * (((1 - a_prev) * a_t).sqrt() + ((1 - a_t) * a_prev).sqrt())) * noise_t)
180
+ x_pred = x + x_delta
181
+
182
+ return x_pred
183
+
184
+ noise_list = self.noise_list
185
+ noise_pred = self.denoise_fn(x, t, cond=cond)
186
+
187
+ if len(noise_list) == 0:
188
+ x_pred = get_x_pred(x, noise_pred, t)
189
+ noise_pred_prev = self.denoise_fn(x_pred, max(t - interval, 0), cond=cond)
190
+ noise_pred_prime = (noise_pred + noise_pred_prev) / 2
191
+ elif len(noise_list) == 1:
192
+ noise_pred_prime = (3 * noise_pred - noise_list[-1]) / 2
193
+ elif len(noise_list) == 2:
194
+ noise_pred_prime = (23 * noise_pred - 16 * noise_list[-1] + 5 * noise_list[-2]) / 12
195
+ else:
196
+ noise_pred_prime = (55 * noise_pred - 59 * noise_list[-1] + 37 * noise_list[-2] - 9 * noise_list[-3]) / 24
197
+
198
+ x_prev = get_x_pred(x, noise_pred_prime, t)
199
+ noise_list.append(noise_pred)
200
+
201
+ return x_prev
202
+
203
+ def q_sample(self, x_start, t, noise=None):
204
+ noise = default(noise, lambda: torch.randn_like(x_start))
205
+ return (
206
+ extract(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start +
207
+ extract(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * noise
208
+ )
209
+
210
+ def p_losses(self, x_start, t, cond, noise=None, loss_type='l2'):
211
+ noise = default(noise, lambda: torch.randn_like(x_start))
212
+
213
+ x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise)
214
+ x_recon = self.denoise_fn(x_noisy, t, cond)
215
+
216
+ if loss_type == 'l1':
217
+ loss = (noise - x_recon).abs().mean()
218
+ elif loss_type == 'l2':
219
+ loss = F.mse_loss(noise, x_recon)
220
+ else:
221
+ raise NotImplementedError()
222
+
223
+ return loss
224
+
225
+ def forward(self,
226
+ condition,
227
+ gt_spec=None,
228
+ infer=True,
229
+ infer_speedup=10,
230
+ method='dpm-solver',
231
+ k_step=300,
232
+ use_tqdm=True):
233
+ """
234
+ conditioning diffusion, use fastspeech2 encoder output as the condition
235
+ """
236
+ cond = condition.transpose(1, 2)
237
+ b, device = condition.shape[0], condition.device
238
+
239
+ if not infer:
240
+ spec = self.norm_spec(gt_spec)
241
+ t = torch.randint(0, self.k_step, (b,), device=device).long()
242
+ norm_spec = spec.transpose(1, 2)[:, None, :, :] # [B, 1, M, T]
243
+ return self.p_losses(norm_spec, t, cond=cond)
244
+ else:
245
+ shape = (cond.shape[0], 1, self.out_dims, cond.shape[2])
246
+
247
+ if gt_spec is None:
248
+ t = self.k_step
249
+ x = torch.randn(shape, device=device)
250
+ else:
251
+ t = k_step
252
+ norm_spec = self.norm_spec(gt_spec)
253
+ norm_spec = norm_spec.transpose(1, 2)[:, None, :, :]
254
+ x = self.q_sample(x_start=norm_spec, t=torch.tensor([t - 1], device=device).long())
255
+
256
+ if method is not None and infer_speedup > 1:
257
+ if method == 'dpm-solver' or method == 'dpm-solver++':
258
+ from .dpm_solver_pytorch import (
259
+ DPM_Solver,
260
+ NoiseScheduleVP,
261
+ model_wrapper,
262
+ )
263
+ # 1. Define the noise schedule.
264
+ noise_schedule = NoiseScheduleVP(schedule='discrete', betas=self.betas[:t])
265
+
266
+ # 2. Convert your discrete-time `model` to the continuous-time
267
+ # noise prediction model. Here is an example for a diffusion model
268
+ # `model` with the noise prediction type ("noise") .
269
+ def my_wrapper(fn):
270
+ def wrapped(x, t, **kwargs):
271
+ ret = fn(x, t, **kwargs)
272
+ if use_tqdm:
273
+ self.bar.update(1)
274
+ return ret
275
+
276
+ return wrapped
277
+
278
+ model_fn = model_wrapper(
279
+ my_wrapper(self.denoise_fn),
280
+ noise_schedule,
281
+ model_type="noise", # or "x_start" or "v" or "score"
282
+ model_kwargs={"cond": cond}
283
+ )
284
+
285
+ # 3. Define dpm-solver and sample by singlestep DPM-Solver.
286
+ # (We recommend singlestep DPM-Solver for unconditional sampling)
287
+ # You can adjust the `steps` to balance the computation
288
+ # costs and the sample quality.
289
+ if method == 'dpm-solver':
290
+ dpm_solver = DPM_Solver(model_fn, noise_schedule, algorithm_type="dpmsolver")
291
+ elif method == 'dpm-solver++':
292
+ dpm_solver = DPM_Solver(model_fn, noise_schedule, algorithm_type="dpmsolver++")
293
+
294
+ steps = t // infer_speedup
295
+ if use_tqdm:
296
+ self.bar = tqdm(desc="sample time step", total=steps)
297
+ x = dpm_solver.sample(
298
+ x,
299
+ steps=steps,
300
+ order=2,
301
+ skip_type="time_uniform",
302
+ method="multistep",
303
+ )
304
+ if use_tqdm:
305
+ self.bar.close()
306
+ elif method == 'pndm':
307
+ self.noise_list = deque(maxlen=4)
308
+ if use_tqdm:
309
+ for i in tqdm(
310
+ reversed(range(0, t, infer_speedup)), desc='sample time step',
311
+ total=t // infer_speedup,
312
+ ):
313
+ x = self.p_sample_plms(
314
+ x, torch.full((b,), i, device=device, dtype=torch.long),
315
+ infer_speedup, cond=cond
316
+ )
317
+ else:
318
+ for i in reversed(range(0, t, infer_speedup)):
319
+ x = self.p_sample_plms(
320
+ x, torch.full((b,), i, device=device, dtype=torch.long),
321
+ infer_speedup, cond=cond
322
+ )
323
+ elif method == 'ddim':
324
+ if use_tqdm:
325
+ for i in tqdm(
326
+ reversed(range(0, t, infer_speedup)), desc='sample time step',
327
+ total=t // infer_speedup,
328
+ ):
329
+ x = self.p_sample_ddim(
330
+ x, torch.full((b,), i, device=device, dtype=torch.long),
331
+ infer_speedup, cond=cond
332
+ )
333
+ else:
334
+ for i in reversed(range(0, t, infer_speedup)):
335
+ x = self.p_sample_ddim(
336
+ x, torch.full((b,), i, device=device, dtype=torch.long),
337
+ infer_speedup, cond=cond
338
+ )
339
+ elif method == 'unipc':
340
+ from .uni_pc import NoiseScheduleVP, UniPC, model_wrapper
341
+ # 1. Define the noise schedule.
342
+ noise_schedule = NoiseScheduleVP(schedule='discrete', betas=self.betas[:t])
343
+
344
+ # 2. Convert your discrete-time `model` to the continuous-time
345
+ # noise prediction model. Here is an example for a diffusion model
346
+ # `model` with the noise prediction type ("noise") .
347
+ def my_wrapper(fn):
348
+ def wrapped(x, t, **kwargs):
349
+ ret = fn(x, t, **kwargs)
350
+ if use_tqdm:
351
+ self.bar.update(1)
352
+ return ret
353
+
354
+ return wrapped
355
+
356
+ model_fn = model_wrapper(
357
+ my_wrapper(self.denoise_fn),
358
+ noise_schedule,
359
+ model_type="noise", # or "x_start" or "v" or "score"
360
+ model_kwargs={"cond": cond}
361
+ )
362
+
363
+ # 3. Define uni_pc and sample by multistep UniPC.
364
+ # You can adjust the `steps` to balance the computation
365
+ # costs and the sample quality.
366
+ uni_pc = UniPC(model_fn, noise_schedule, variant='bh2')
367
+
368
+ steps = t // infer_speedup
369
+ if use_tqdm:
370
+ self.bar = tqdm(desc="sample time step", total=steps)
371
+ x = uni_pc.sample(
372
+ x,
373
+ steps=steps,
374
+ order=2,
375
+ skip_type="time_uniform",
376
+ method="multistep",
377
+ )
378
+ if use_tqdm:
379
+ self.bar.close()
380
+ else:
381
+ raise NotImplementedError(method)
382
+ else:
383
+ if use_tqdm:
384
+ for i in tqdm(reversed(range(0, t)), desc='sample time step', total=t):
385
+ x = self.p_sample(x, torch.full((b,), i, device=device, dtype=torch.long), cond)
386
+ else:
387
+ for i in reversed(range(0, t)):
388
+ x = self.p_sample(x, torch.full((b,), i, device=device, dtype=torch.long), cond)
389
+ x = x.squeeze(1).transpose(1, 2) # [B, T, M]
390
+ return self.denorm_spec(x)
391
+
392
+ def norm_spec(self, x):
393
+ return (x - self.spec_min) / (self.spec_max - self.spec_min) * 2 - 1
394
+
395
+ def denorm_spec(self, x):
396
+ return (x + 1) / 2 * (self.spec_max - self.spec_min) + self.spec_min
diffusion/diffusion_onnx.py ADDED
@@ -0,0 +1,614 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+ from collections import deque
3
+ from functools import partial
4
+ from inspect import isfunction
5
+
6
+ import numpy as np
7
+ import torch
8
+ import torch.nn.functional as F
9
+ from torch import nn
10
+ from torch.nn import Conv1d, Mish
11
+ from tqdm import tqdm
12
+
13
+
14
+ def exists(x):
15
+ return x is not None
16
+
17
+
18
+ def default(val, d):
19
+ if exists(val):
20
+ return val
21
+ return d() if isfunction(d) else d
22
+
23
+
24
+ def extract(a, t):
25
+ return a[t].reshape((1, 1, 1, 1))
26
+
27
+
28
+ def noise_like(shape, device, repeat=False):
29
+ def repeat_noise():
30
+ return torch.randn((1, *shape[1:]), device=device).repeat(shape[0], *((1,) * (len(shape) - 1)))
31
+ def noise():
32
+ return torch.randn(shape, device=device)
33
+ return repeat_noise() if repeat else noise()
34
+
35
+
36
+ def linear_beta_schedule(timesteps, max_beta=0.02):
37
+ """
38
+ linear schedule
39
+ """
40
+ betas = np.linspace(1e-4, max_beta, timesteps)
41
+ return betas
42
+
43
+
44
+ def cosine_beta_schedule(timesteps, s=0.008):
45
+ """
46
+ cosine schedule
47
+ as proposed in https://openreview.net/forum?id=-NEXDKk8gZ
48
+ """
49
+ steps = timesteps + 1
50
+ x = np.linspace(0, steps, steps)
51
+ alphas_cumprod = np.cos(((x / steps) + s) / (1 + s) * np.pi * 0.5) ** 2
52
+ alphas_cumprod = alphas_cumprod / alphas_cumprod[0]
53
+ betas = 1 - (alphas_cumprod[1:] / alphas_cumprod[:-1])
54
+ return np.clip(betas, a_min=0, a_max=0.999)
55
+
56
+
57
+ beta_schedule = {
58
+ "cosine": cosine_beta_schedule,
59
+ "linear": linear_beta_schedule,
60
+ }
61
+
62
+
63
+ def extract_1(a, t):
64
+ return a[t].reshape((1, 1, 1, 1))
65
+
66
+
67
+ def predict_stage0(noise_pred, noise_pred_prev):
68
+ return (noise_pred + noise_pred_prev) / 2
69
+
70
+
71
+ def predict_stage1(noise_pred, noise_list):
72
+ return (noise_pred * 3
73
+ - noise_list[-1]) / 2
74
+
75
+
76
+ def predict_stage2(noise_pred, noise_list):
77
+ return (noise_pred * 23
78
+ - noise_list[-1] * 16
79
+ + noise_list[-2] * 5) / 12
80
+
81
+
82
+ def predict_stage3(noise_pred, noise_list):
83
+ return (noise_pred * 55
84
+ - noise_list[-1] * 59
85
+ + noise_list[-2] * 37
86
+ - noise_list[-3] * 9) / 24
87
+
88
+
89
+ class SinusoidalPosEmb(nn.Module):
90
+ def __init__(self, dim):
91
+ super().__init__()
92
+ self.dim = dim
93
+ self.half_dim = dim // 2
94
+ self.emb = 9.21034037 / (self.half_dim - 1)
95
+ self.emb = torch.exp(torch.arange(self.half_dim) * torch.tensor(-self.emb)).unsqueeze(0)
96
+ self.emb = self.emb.cpu()
97
+
98
+ def forward(self, x):
99
+ emb = self.emb * x
100
+ emb = torch.cat((emb.sin(), emb.cos()), dim=-1)
101
+ return emb
102
+
103
+
104
+ class ResidualBlock(nn.Module):
105
+ def __init__(self, encoder_hidden, residual_channels, dilation):
106
+ super().__init__()
107
+ self.residual_channels = residual_channels
108
+ self.dilated_conv = Conv1d(residual_channels, 2 * residual_channels, 3, padding=dilation, dilation=dilation)
109
+ self.diffusion_projection = nn.Linear(residual_channels, residual_channels)
110
+ self.conditioner_projection = Conv1d(encoder_hidden, 2 * residual_channels, 1)
111
+ self.output_projection = Conv1d(residual_channels, 2 * residual_channels, 1)
112
+
113
+ def forward(self, x, conditioner, diffusion_step):
114
+ diffusion_step = self.diffusion_projection(diffusion_step).unsqueeze(-1)
115
+ conditioner = self.conditioner_projection(conditioner)
116
+ y = x + diffusion_step
117
+ y = self.dilated_conv(y) + conditioner
118
+
119
+ gate, filter_1 = torch.split(y, [self.residual_channels, self.residual_channels], dim=1)
120
+
121
+ y = torch.sigmoid(gate) * torch.tanh(filter_1)
122
+ y = self.output_projection(y)
123
+
124
+ residual, skip = torch.split(y, [self.residual_channels, self.residual_channels], dim=1)
125
+
126
+ return (x + residual) / 1.41421356, skip
127
+
128
+
129
+ class DiffNet(nn.Module):
130
+ def __init__(self, in_dims, n_layers, n_chans, n_hidden):
131
+ super().__init__()
132
+ self.encoder_hidden = n_hidden
133
+ self.residual_layers = n_layers
134
+ self.residual_channels = n_chans
135
+ self.input_projection = Conv1d(in_dims, self.residual_channels, 1)
136
+ self.diffusion_embedding = SinusoidalPosEmb(self.residual_channels)
137
+ dim = self.residual_channels
138
+ self.mlp = nn.Sequential(
139
+ nn.Linear(dim, dim * 4),
140
+ Mish(),
141
+ nn.Linear(dim * 4, dim)
142
+ )
143
+ self.residual_layers = nn.ModuleList([
144
+ ResidualBlock(self.encoder_hidden, self.residual_channels, 1)
145
+ for i in range(self.residual_layers)
146
+ ])
147
+ self.skip_projection = Conv1d(self.residual_channels, self.residual_channels, 1)
148
+ self.output_projection = Conv1d(self.residual_channels, in_dims, 1)
149
+ nn.init.zeros_(self.output_projection.weight)
150
+
151
+ def forward(self, spec, diffusion_step, cond):
152
+ x = spec.squeeze(0)
153
+ x = self.input_projection(x) # x [B, residual_channel, T]
154
+ x = F.relu(x)
155
+ # skip = torch.randn_like(x)
156
+ diffusion_step = diffusion_step.float()
157
+ diffusion_step = self.diffusion_embedding(diffusion_step)
158
+ diffusion_step = self.mlp(diffusion_step)
159
+
160
+ x, skip = self.residual_layers[0](x, cond, diffusion_step)
161
+ # noinspection PyTypeChecker
162
+ for layer in self.residual_layers[1:]:
163
+ x, skip_connection = layer.forward(x, cond, diffusion_step)
164
+ skip = skip + skip_connection
165
+ x = skip / math.sqrt(len(self.residual_layers))
166
+ x = self.skip_projection(x)
167
+ x = F.relu(x)
168
+ x = self.output_projection(x) # [B, 80, T]
169
+ return x.unsqueeze(1)
170
+
171
+
172
+ class AfterDiffusion(nn.Module):
173
+ def __init__(self, spec_max, spec_min, v_type='a'):
174
+ super().__init__()
175
+ self.spec_max = spec_max
176
+ self.spec_min = spec_min
177
+ self.type = v_type
178
+
179
+ def forward(self, x):
180
+ x = x.squeeze(1).permute(0, 2, 1)
181
+ mel_out = (x + 1) / 2 * (self.spec_max - self.spec_min) + self.spec_min
182
+ if self.type == 'nsf-hifigan-log10':
183
+ mel_out = mel_out * 0.434294
184
+ return mel_out.transpose(2, 1)
185
+
186
+
187
+ class Pred(nn.Module):
188
+ def __init__(self, alphas_cumprod):
189
+ super().__init__()
190
+ self.alphas_cumprod = alphas_cumprod
191
+
192
+ def forward(self, x_1, noise_t, t_1, t_prev):
193
+ a_t = extract(self.alphas_cumprod, t_1).cpu()
194
+ a_prev = extract(self.alphas_cumprod, t_prev).cpu()
195
+ a_t_sq, a_prev_sq = a_t.sqrt().cpu(), a_prev.sqrt().cpu()
196
+ x_delta = (a_prev - a_t) * ((1 / (a_t_sq * (a_t_sq + a_prev_sq))) * x_1 - 1 / (
197
+ a_t_sq * (((1 - a_prev) * a_t).sqrt() + ((1 - a_t) * a_prev).sqrt())) * noise_t)
198
+ x_pred = x_1 + x_delta.cpu()
199
+
200
+ return x_pred
201
+
202
+
203
+ class GaussianDiffusion(nn.Module):
204
+ def __init__(self,
205
+ out_dims=128,
206
+ n_layers=20,
207
+ n_chans=384,
208
+ n_hidden=256,
209
+ timesteps=1000,
210
+ k_step=1000,
211
+ max_beta=0.02,
212
+ spec_min=-12,
213
+ spec_max=2):
214
+ super().__init__()
215
+ self.denoise_fn = DiffNet(out_dims, n_layers, n_chans, n_hidden)
216
+ self.out_dims = out_dims
217
+ self.mel_bins = out_dims
218
+ self.n_hidden = n_hidden
219
+ betas = beta_schedule['linear'](timesteps, max_beta=max_beta)
220
+
221
+ alphas = 1. - betas
222
+ alphas_cumprod = np.cumprod(alphas, axis=0)
223
+ alphas_cumprod_prev = np.append(1., alphas_cumprod[:-1])
224
+ timesteps, = betas.shape
225
+ self.num_timesteps = int(timesteps)
226
+ self.k_step = k_step
227
+
228
+ self.noise_list = deque(maxlen=4)
229
+
230
+ to_torch = partial(torch.tensor, dtype=torch.float32)
231
+
232
+ self.register_buffer('betas', to_torch(betas))
233
+ self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod))
234
+ self.register_buffer('alphas_cumprod_prev', to_torch(alphas_cumprod_prev))
235
+
236
+ # calculations for diffusion q(x_t | x_{t-1}) and others
237
+ self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod)))
238
+ self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod)))
239
+ self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod)))
240
+ self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod)))
241
+ self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod - 1)))
242
+
243
+ # calculations for posterior q(x_{t-1} | x_t, x_0)
244
+ posterior_variance = betas * (1. - alphas_cumprod_prev) / (1. - alphas_cumprod)
245
+ # above: equal to 1. / (1. / (1. - alpha_cumprod_tm1) + alpha_t / beta_t)
246
+ self.register_buffer('posterior_variance', to_torch(posterior_variance))
247
+ # below: log calculation clipped because the posterior variance is 0 at the beginning of the diffusion chain
248
+ self.register_buffer('posterior_log_variance_clipped', to_torch(np.log(np.maximum(posterior_variance, 1e-20))))
249
+ self.register_buffer('posterior_mean_coef1', to_torch(
250
+ betas * np.sqrt(alphas_cumprod_prev) / (1. - alphas_cumprod)))
251
+ self.register_buffer('posterior_mean_coef2', to_torch(
252
+ (1. - alphas_cumprod_prev) * np.sqrt(alphas) / (1. - alphas_cumprod)))
253
+
254
+ self.register_buffer('spec_min', torch.FloatTensor([spec_min])[None, None, :out_dims])
255
+ self.register_buffer('spec_max', torch.FloatTensor([spec_max])[None, None, :out_dims])
256
+ self.ad = AfterDiffusion(self.spec_max, self.spec_min)
257
+ self.xp = Pred(self.alphas_cumprod)
258
+
259
+ def q_mean_variance(self, x_start, t):
260
+ mean = extract(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start
261
+ variance = extract(1. - self.alphas_cumprod, t, x_start.shape)
262
+ log_variance = extract(self.log_one_minus_alphas_cumprod, t, x_start.shape)
263
+ return mean, variance, log_variance
264
+
265
+ def predict_start_from_noise(self, x_t, t, noise):
266
+ return (
267
+ extract(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t -
268
+ extract(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * noise
269
+ )
270
+
271
+ def q_posterior(self, x_start, x_t, t):
272
+ posterior_mean = (
273
+ extract(self.posterior_mean_coef1, t, x_t.shape) * x_start +
274
+ extract(self.posterior_mean_coef2, t, x_t.shape) * x_t
275
+ )
276
+ posterior_variance = extract(self.posterior_variance, t, x_t.shape)
277
+ posterior_log_variance_clipped = extract(self.posterior_log_variance_clipped, t, x_t.shape)
278
+ return posterior_mean, posterior_variance, posterior_log_variance_clipped
279
+
280
+ def p_mean_variance(self, x, t, cond):
281
+ noise_pred = self.denoise_fn(x, t, cond=cond)
282
+ x_recon = self.predict_start_from_noise(x, t=t, noise=noise_pred)
283
+
284
+ x_recon.clamp_(-1., 1.)
285
+
286
+ model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t)
287
+ return model_mean, posterior_variance, posterior_log_variance
288
+
289
+ @torch.no_grad()
290
+ def p_sample(self, x, t, cond, clip_denoised=True, repeat_noise=False):
291
+ b, *_, device = *x.shape, x.device
292
+ model_mean, _, model_log_variance = self.p_mean_variance(x=x, t=t, cond=cond)
293
+ noise = noise_like(x.shape, device, repeat_noise)
294
+ # no noise when t == 0
295
+ nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1)))
296
+ return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise
297
+
298
+ @torch.no_grad()
299
+ def p_sample_plms(self, x, t, interval, cond, clip_denoised=True, repeat_noise=False):
300
+ """
301
+ Use the PLMS method from
302
+ [Pseudo Numerical Methods for Diffusion Models on Manifolds](https://arxiv.org/abs/2202.09778).
303
+ """
304
+
305
+ def get_x_pred(x, noise_t, t):
306
+ a_t = extract(self.alphas_cumprod, t)
307
+ a_prev = extract(self.alphas_cumprod, torch.max(t - interval, torch.zeros_like(t)))
308
+ a_t_sq, a_prev_sq = a_t.sqrt(), a_prev.sqrt()
309
+
310
+ x_delta = (a_prev - a_t) * ((1 / (a_t_sq * (a_t_sq + a_prev_sq))) * x - 1 / (
311
+ a_t_sq * (((1 - a_prev) * a_t).sqrt() + ((1 - a_t) * a_prev).sqrt())) * noise_t)
312
+ x_pred = x + x_delta
313
+
314
+ return x_pred
315
+
316
+ noise_list = self.noise_list
317
+ noise_pred = self.denoise_fn(x, t, cond=cond)
318
+
319
+ if len(noise_list) == 0:
320
+ x_pred = get_x_pred(x, noise_pred, t)
321
+ noise_pred_prev = self.denoise_fn(x_pred, max(t - interval, 0), cond=cond)
322
+ noise_pred_prime = (noise_pred + noise_pred_prev) / 2
323
+ elif len(noise_list) == 1:
324
+ noise_pred_prime = (3 * noise_pred - noise_list[-1]) / 2
325
+ elif len(noise_list) == 2:
326
+ noise_pred_prime = (23 * noise_pred - 16 * noise_list[-1] + 5 * noise_list[-2]) / 12
327
+ else:
328
+ noise_pred_prime = (55 * noise_pred - 59 * noise_list[-1] + 37 * noise_list[-2] - 9 * noise_list[-3]) / 24
329
+
330
+ x_prev = get_x_pred(x, noise_pred_prime, t)
331
+ noise_list.append(noise_pred)
332
+
333
+ return x_prev
334
+
335
+ def q_sample(self, x_start, t, noise=None):
336
+ noise = default(noise, lambda: torch.randn_like(x_start))
337
+ return (
338
+ extract(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start +
339
+ extract(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * noise
340
+ )
341
+
342
+ def p_losses(self, x_start, t, cond, noise=None, loss_type='l2'):
343
+ noise = default(noise, lambda: torch.randn_like(x_start))
344
+
345
+ x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise)
346
+ x_recon = self.denoise_fn(x_noisy, t, cond)
347
+
348
+ if loss_type == 'l1':
349
+ loss = (noise - x_recon).abs().mean()
350
+ elif loss_type == 'l2':
351
+ loss = F.mse_loss(noise, x_recon)
352
+ else:
353
+ raise NotImplementedError()
354
+
355
+ return loss
356
+
357
+ def org_forward(self,
358
+ condition,
359
+ init_noise=None,
360
+ gt_spec=None,
361
+ infer=True,
362
+ infer_speedup=100,
363
+ method='pndm',
364
+ k_step=1000,
365
+ use_tqdm=True):
366
+ """
367
+ conditioning diffusion, use fastspeech2 encoder output as the condition
368
+ """
369
+ cond = condition
370
+ b, device = condition.shape[0], condition.device
371
+ if not infer:
372
+ spec = self.norm_spec(gt_spec)
373
+ t = torch.randint(0, self.k_step, (b,), device=device).long()
374
+ norm_spec = spec.transpose(1, 2)[:, None, :, :] # [B, 1, M, T]
375
+ return self.p_losses(norm_spec, t, cond=cond)
376
+ else:
377
+ shape = (cond.shape[0], 1, self.out_dims, cond.shape[2])
378
+
379
+ if gt_spec is None:
380
+ t = self.k_step
381
+ if init_noise is None:
382
+ x = torch.randn(shape, device=device)
383
+ else:
384
+ x = init_noise
385
+ else:
386
+ t = k_step
387
+ norm_spec = self.norm_spec(gt_spec)
388
+ norm_spec = norm_spec.transpose(1, 2)[:, None, :, :]
389
+ x = self.q_sample(x_start=norm_spec, t=torch.tensor([t - 1], device=device).long())
390
+
391
+ if method is not None and infer_speedup > 1:
392
+ if method == 'dpm-solver':
393
+ from .dpm_solver_pytorch import (
394
+ DPM_Solver,
395
+ NoiseScheduleVP,
396
+ model_wrapper,
397
+ )
398
+ # 1. Define the noise schedule.
399
+ noise_schedule = NoiseScheduleVP(schedule='discrete', betas=self.betas[:t])
400
+
401
+ # 2. Convert your discrete-time `model` to the continuous-time
402
+ # noise prediction model. Here is an example for a diffusion model
403
+ # `model` with the noise prediction type ("noise") .
404
+ def my_wrapper(fn):
405
+ def wrapped(x, t, **kwargs):
406
+ ret = fn(x, t, **kwargs)
407
+ if use_tqdm:
408
+ self.bar.update(1)
409
+ return ret
410
+
411
+ return wrapped
412
+
413
+ model_fn = model_wrapper(
414
+ my_wrapper(self.denoise_fn),
415
+ noise_schedule,
416
+ model_type="noise", # or "x_start" or "v" or "score"
417
+ model_kwargs={"cond": cond}
418
+ )
419
+
420
+ # 3. Define dpm-solver and sample by singlestep DPM-Solver.
421
+ # (We recommend singlestep DPM-Solver for unconditional sampling)
422
+ # You can adjust the `steps` to balance the computation
423
+ # costs and the sample quality.
424
+ dpm_solver = DPM_Solver(model_fn, noise_schedule)
425
+
426
+ steps = t // infer_speedup
427
+ if use_tqdm:
428
+ self.bar = tqdm(desc="sample time step", total=steps)
429
+ x = dpm_solver.sample(
430
+ x,
431
+ steps=steps,
432
+ order=3,
433
+ skip_type="time_uniform",
434
+ method="singlestep",
435
+ )
436
+ if use_tqdm:
437
+ self.bar.close()
438
+ elif method == 'pndm':
439
+ self.noise_list = deque(maxlen=4)
440
+ if use_tqdm:
441
+ for i in tqdm(
442
+ reversed(range(0, t, infer_speedup)), desc='sample time step',
443
+ total=t // infer_speedup,
444
+ ):
445
+ x = self.p_sample_plms(
446
+ x, torch.full((b,), i, device=device, dtype=torch.long),
447
+ infer_speedup, cond=cond
448
+ )
449
+ else:
450
+ for i in reversed(range(0, t, infer_speedup)):
451
+ x = self.p_sample_plms(
452
+ x, torch.full((b,), i, device=device, dtype=torch.long),
453
+ infer_speedup, cond=cond
454
+ )
455
+ else:
456
+ raise NotImplementedError(method)
457
+ else:
458
+ if use_tqdm:
459
+ for i in tqdm(reversed(range(0, t)), desc='sample time step', total=t):
460
+ x = self.p_sample(x, torch.full((b,), i, device=device, dtype=torch.long), cond)
461
+ else:
462
+ for i in reversed(range(0, t)):
463
+ x = self.p_sample(x, torch.full((b,), i, device=device, dtype=torch.long), cond)
464
+ x = x.squeeze(1).transpose(1, 2) # [B, T, M]
465
+ return self.denorm_spec(x).transpose(2, 1)
466
+
467
+ def norm_spec(self, x):
468
+ return (x - self.spec_min) / (self.spec_max - self.spec_min) * 2 - 1
469
+
470
+ def denorm_spec(self, x):
471
+ return (x + 1) / 2 * (self.spec_max - self.spec_min) + self.spec_min
472
+
473
+ def get_x_pred(self, x_1, noise_t, t_1, t_prev):
474
+ a_t = extract(self.alphas_cumprod, t_1)
475
+ a_prev = extract(self.alphas_cumprod, t_prev)
476
+ a_t_sq, a_prev_sq = a_t.sqrt(), a_prev.sqrt()
477
+ x_delta = (a_prev - a_t) * ((1 / (a_t_sq * (a_t_sq + a_prev_sq))) * x_1 - 1 / (
478
+ a_t_sq * (((1 - a_prev) * a_t).sqrt() + ((1 - a_t) * a_prev).sqrt())) * noise_t)
479
+ x_pred = x_1 + x_delta
480
+ return x_pred
481
+
482
+ def OnnxExport(self, project_name=None, init_noise=None, hidden_channels=256, export_denoise=True, export_pred=True, export_after=True):
483
+ cond = torch.randn([1, self.n_hidden, 10]).cpu()
484
+ if init_noise is None:
485
+ x = torch.randn((1, 1, self.mel_bins, cond.shape[2]), dtype=torch.float32).cpu()
486
+ else:
487
+ x = init_noise
488
+ pndms = 100
489
+
490
+ org_y_x = self.org_forward(cond, init_noise=x)
491
+
492
+ device = cond.device
493
+ n_frames = cond.shape[2]
494
+ step_range = torch.arange(0, self.k_step, pndms, dtype=torch.long, device=device).flip(0)
495
+ plms_noise_stage = torch.tensor(0, dtype=torch.long, device=device)
496
+ noise_list = torch.zeros((0, 1, 1, self.mel_bins, n_frames), device=device)
497
+
498
+ ot = step_range[0]
499
+ ot_1 = torch.full((1,), ot, device=device, dtype=torch.long)
500
+ if export_denoise:
501
+ torch.onnx.export(
502
+ self.denoise_fn,
503
+ (x.cpu(), ot_1.cpu(), cond.cpu()),
504
+ f"{project_name}_denoise.onnx",
505
+ input_names=["noise", "time", "condition"],
506
+ output_names=["noise_pred"],
507
+ dynamic_axes={
508
+ "noise": [3],
509
+ "condition": [2]
510
+ },
511
+ opset_version=16
512
+ )
513
+
514
+ for t in step_range:
515
+ t_1 = torch.full((1,), t, device=device, dtype=torch.long)
516
+ noise_pred = self.denoise_fn(x, t_1, cond)
517
+ t_prev = t_1 - pndms
518
+ t_prev = t_prev * (t_prev > 0)
519
+ if plms_noise_stage == 0:
520
+ if export_pred:
521
+ torch.onnx.export(
522
+ self.xp,
523
+ (x.cpu(), noise_pred.cpu(), t_1.cpu(), t_prev.cpu()),
524
+ f"{project_name}_pred.onnx",
525
+ input_names=["noise", "noise_pred", "time", "time_prev"],
526
+ output_names=["noise_pred_o"],
527
+ dynamic_axes={
528
+ "noise": [3],
529
+ "noise_pred": [3]
530
+ },
531
+ opset_version=16
532
+ )
533
+
534
+ x_pred = self.get_x_pred(x, noise_pred, t_1, t_prev)
535
+ noise_pred_prev = self.denoise_fn(x_pred, t_prev, cond=cond)
536
+ noise_pred_prime = predict_stage0(noise_pred, noise_pred_prev)
537
+
538
+ elif plms_noise_stage == 1:
539
+ noise_pred_prime = predict_stage1(noise_pred, noise_list)
540
+
541
+ elif plms_noise_stage == 2:
542
+ noise_pred_prime = predict_stage2(noise_pred, noise_list)
543
+
544
+ else:
545
+ noise_pred_prime = predict_stage3(noise_pred, noise_list)
546
+
547
+ noise_pred = noise_pred.unsqueeze(0)
548
+
549
+ if plms_noise_stage < 3:
550
+ noise_list = torch.cat((noise_list, noise_pred), dim=0)
551
+ plms_noise_stage = plms_noise_stage + 1
552
+
553
+ else:
554
+ noise_list = torch.cat((noise_list[-2:], noise_pred), dim=0)
555
+
556
+ x = self.get_x_pred(x, noise_pred_prime, t_1, t_prev)
557
+ if export_after:
558
+ torch.onnx.export(
559
+ self.ad,
560
+ x.cpu(),
561
+ f"{project_name}_after.onnx",
562
+ input_names=["x"],
563
+ output_names=["mel_out"],
564
+ dynamic_axes={
565
+ "x": [3]
566
+ },
567
+ opset_version=16
568
+ )
569
+ x = self.ad(x)
570
+
571
+ print((x == org_y_x).all())
572
+ return x
573
+
574
+ def forward(self, condition=None, init_noise=None, pndms=None, k_step=None):
575
+ cond = condition
576
+ x = init_noise
577
+
578
+ device = cond.device
579
+ n_frames = cond.shape[2]
580
+ step_range = torch.arange(0, k_step.item(), pndms.item(), dtype=torch.long, device=device).flip(0)
581
+ plms_noise_stage = torch.tensor(0, dtype=torch.long, device=device)
582
+ noise_list = torch.zeros((0, 1, 1, self.mel_bins, n_frames), device=device)
583
+
584
+ for t in step_range:
585
+ t_1 = torch.full((1,), t, device=device, dtype=torch.long)
586
+ noise_pred = self.denoise_fn(x, t_1, cond)
587
+ t_prev = t_1 - pndms
588
+ t_prev = t_prev * (t_prev > 0)
589
+ if plms_noise_stage == 0:
590
+ x_pred = self.get_x_pred(x, noise_pred, t_1, t_prev)
591
+ noise_pred_prev = self.denoise_fn(x_pred, t_prev, cond=cond)
592
+ noise_pred_prime = predict_stage0(noise_pred, noise_pred_prev)
593
+
594
+ elif plms_noise_stage == 1:
595
+ noise_pred_prime = predict_stage1(noise_pred, noise_list)
596
+
597
+ elif plms_noise_stage == 2:
598
+ noise_pred_prime = predict_stage2(noise_pred, noise_list)
599
+
600
+ else:
601
+ noise_pred_prime = predict_stage3(noise_pred, noise_list)
602
+
603
+ noise_pred = noise_pred.unsqueeze(0)
604
+
605
+ if plms_noise_stage < 3:
606
+ noise_list = torch.cat((noise_list, noise_pred), dim=0)
607
+ plms_noise_stage = plms_noise_stage + 1
608
+
609
+ else:
610
+ noise_list = torch.cat((noise_list[-2:], noise_pred), dim=0)
611
+
612
+ x = self.get_x_pred(x, noise_pred_prime, t_1, t_prev)
613
+ x = self.ad(x)
614
+ return x
diffusion/dpm_solver_pytorch.py ADDED
@@ -0,0 +1,1307 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+
3
+
4
+ class NoiseScheduleVP:
5
+ def __init__(
6
+ self,
7
+ schedule='discrete',
8
+ betas=None,
9
+ alphas_cumprod=None,
10
+ continuous_beta_0=0.1,
11
+ continuous_beta_1=20.,
12
+ dtype=torch.float32,
13
+ ):
14
+ """Create a wrapper class for the forward SDE (VP type).
15
+
16
+ ***
17
+ Update: We support discrete-time diffusion models by implementing a picewise linear interpolation for log_alpha_t.
18
+ We recommend to use schedule='discrete' for the discrete-time diffusion models, especially for high-resolution images.
19
+ ***
20
+
21
+ The forward SDE ensures that the condition distribution q_{t|0}(x_t | x_0) = N ( alpha_t * x_0, sigma_t^2 * I ).
22
+ We further define lambda_t = log(alpha_t) - log(sigma_t), which is the half-logSNR (described in the DPM-Solver paper).
23
+ Therefore, we implement the functions for computing alpha_t, sigma_t and lambda_t. For t in [0, T], we have:
24
+
25
+ log_alpha_t = self.marginal_log_mean_coeff(t)
26
+ sigma_t = self.marginal_std(t)
27
+ lambda_t = self.marginal_lambda(t)
28
+
29
+ Moreover, as lambda(t) is an invertible function, we also support its inverse function:
30
+
31
+ t = self.inverse_lambda(lambda_t)
32
+
33
+ ===============================================================
34
+
35
+ We support both discrete-time DPMs (trained on n = 0, 1, ..., N-1) and continuous-time DPMs (trained on t in [t_0, T]).
36
+
37
+ 1. For discrete-time DPMs:
38
+
39
+ For discrete-time DPMs trained on n = 0, 1, ..., N-1, we convert the discrete steps to continuous time steps by:
40
+ t_i = (i + 1) / N
41
+ e.g. for N = 1000, we have t_0 = 1e-3 and T = t_{N-1} = 1.
42
+ We solve the corresponding diffusion ODE from time T = 1 to time t_0 = 1e-3.
43
+
44
+ Args:
45
+ betas: A `torch.Tensor`. The beta array for the discrete-time DPM. (See the original DDPM paper for details)
46
+ alphas_cumprod: A `torch.Tensor`. The cumprod alphas for the discrete-time DPM. (See the original DDPM paper for details)
47
+
48
+ Note that we always have alphas_cumprod = cumprod(1 - betas). Therefore, we only need to set one of `betas` and `alphas_cumprod`.
49
+
50
+ **Important**: Please pay special attention for the args for `alphas_cumprod`:
51
+ The `alphas_cumprod` is the \hat{alpha_n} arrays in the notations of DDPM. Specifically, DDPMs assume that
52
+ q_{t_n | 0}(x_{t_n} | x_0) = N ( \sqrt{\hat{alpha_n}} * x_0, (1 - \hat{alpha_n}) * I ).
53
+ Therefore, the notation \hat{alpha_n} is different from the notation alpha_t in DPM-Solver. In fact, we have
54
+ alpha_{t_n} = \sqrt{\hat{alpha_n}},
55
+ and
56
+ log(alpha_{t_n}) = 0.5 * log(\hat{alpha_n}).
57
+
58
+
59
+ 2. For continuous-time DPMs:
60
+
61
+ We support the linear VPSDE for the continuous time setting. The hyperparameters for the noise
62
+ schedule are the default settings in Yang Song's ScoreSDE:
63
+
64
+ Args:
65
+ beta_min: A `float` number. The smallest beta for the linear schedule.
66
+ beta_max: A `float` number. The largest beta for the linear schedule.
67
+ T: A `float` number. The ending time of the forward process.
68
+
69
+ ===============================================================
70
+
71
+ Args:
72
+ schedule: A `str`. The noise schedule of the forward SDE. 'discrete' for discrete-time DPMs,
73
+ 'linear' for continuous-time DPMs.
74
+ Returns:
75
+ A wrapper object of the forward SDE (VP type).
76
+
77
+ ===============================================================
78
+
79
+ Example:
80
+
81
+ # For discrete-time DPMs, given betas (the beta array for n = 0, 1, ..., N - 1):
82
+ >>> ns = NoiseScheduleVP('discrete', betas=betas)
83
+
84
+ # For discrete-time DPMs, given alphas_cumprod (the \hat{alpha_n} array for n = 0, 1, ..., N - 1):
85
+ >>> ns = NoiseScheduleVP('discrete', alphas_cumprod=alphas_cumprod)
86
+
87
+ # For continuous-time DPMs (VPSDE), linear schedule:
88
+ >>> ns = NoiseScheduleVP('linear', continuous_beta_0=0.1, continuous_beta_1=20.)
89
+
90
+ """
91
+
92
+ if schedule not in ['discrete', 'linear']:
93
+ raise ValueError("Unsupported noise schedule {}. The schedule needs to be 'discrete' or 'linear'".format(schedule))
94
+
95
+ self.schedule = schedule
96
+ if schedule == 'discrete':
97
+ if betas is not None:
98
+ log_alphas = 0.5 * torch.log(1 - betas).cumsum(dim=0)
99
+ else:
100
+ assert alphas_cumprod is not None
101
+ log_alphas = 0.5 * torch.log(alphas_cumprod)
102
+ self.T = 1.
103
+ self.log_alpha_array = self.numerical_clip_alpha(log_alphas).reshape((1, -1,)).to(dtype=dtype)
104
+ self.total_N = self.log_alpha_array.shape[1]
105
+ self.t_array = torch.linspace(0., 1., self.total_N + 1)[1:].reshape((1, -1)).to(dtype=dtype)
106
+ else:
107
+ self.T = 1.
108
+ self.total_N = 1000
109
+ self.beta_0 = continuous_beta_0
110
+ self.beta_1 = continuous_beta_1
111
+
112
+ def numerical_clip_alpha(self, log_alphas, clipped_lambda=-5.1):
113
+ """
114
+ For some beta schedules such as cosine schedule, the log-SNR has numerical isssues.
115
+ We clip the log-SNR near t=T within -5.1 to ensure the stability.
116
+ Such a trick is very useful for diffusion models with the cosine schedule, such as i-DDPM, guided-diffusion and GLIDE.
117
+ """
118
+ log_sigmas = 0.5 * torch.log(1. - torch.exp(2. * log_alphas))
119
+ lambs = log_alphas - log_sigmas
120
+ idx = torch.searchsorted(torch.flip(lambs, [0]), clipped_lambda)
121
+ if idx > 0:
122
+ log_alphas = log_alphas[:-idx]
123
+ return log_alphas
124
+
125
+ def marginal_log_mean_coeff(self, t):
126
+ """
127
+ Compute log(alpha_t) of a given continuous-time label t in [0, T].
128
+ """
129
+ if self.schedule == 'discrete':
130
+ return interpolate_fn(t.reshape((-1, 1)), self.t_array.to(t.device), self.log_alpha_array.to(t.device)).reshape((-1))
131
+ elif self.schedule == 'linear':
132
+ return -0.25 * t ** 2 * (self.beta_1 - self.beta_0) - 0.5 * t * self.beta_0
133
+
134
+ def marginal_alpha(self, t):
135
+ """
136
+ Compute alpha_t of a given continuous-time label t in [0, T].
137
+ """
138
+ return torch.exp(self.marginal_log_mean_coeff(t))
139
+
140
+ def marginal_std(self, t):
141
+ """
142
+ Compute sigma_t of a given continuous-time label t in [0, T].
143
+ """
144
+ return torch.sqrt(1. - torch.exp(2. * self.marginal_log_mean_coeff(t)))
145
+
146
+ def marginal_lambda(self, t):
147
+ """
148
+ Compute lambda_t = log(alpha_t) - log(sigma_t) of a given continuous-time label t in [0, T].
149
+ """
150
+ log_mean_coeff = self.marginal_log_mean_coeff(t)
151
+ log_std = 0.5 * torch.log(1. - torch.exp(2. * log_mean_coeff))
152
+ return log_mean_coeff - log_std
153
+
154
+ def inverse_lambda(self, lamb):
155
+ """
156
+ Compute the continuous-time label t in [0, T] of a given half-logSNR lambda_t.
157
+ """
158
+ if self.schedule == 'linear':
159
+ tmp = 2. * (self.beta_1 - self.beta_0) * torch.logaddexp(-2. * lamb, torch.zeros((1,)).to(lamb))
160
+ Delta = self.beta_0**2 + tmp
161
+ return tmp / (torch.sqrt(Delta) + self.beta_0) / (self.beta_1 - self.beta_0)
162
+ elif self.schedule == 'discrete':
163
+ log_alpha = -0.5 * torch.logaddexp(torch.zeros((1,)).to(lamb.device), -2. * lamb)
164
+ t = interpolate_fn(log_alpha.reshape((-1, 1)), torch.flip(self.log_alpha_array.to(lamb.device), [1]), torch.flip(self.t_array.to(lamb.device), [1]))
165
+ return t.reshape((-1,))
166
+
167
+
168
+ def model_wrapper(
169
+ model,
170
+ noise_schedule,
171
+ model_type="noise",
172
+ model_kwargs={},
173
+ guidance_type="uncond",
174
+ condition=None,
175
+ unconditional_condition=None,
176
+ guidance_scale=1.,
177
+ classifier_fn=None,
178
+ classifier_kwargs={},
179
+ ):
180
+ """Create a wrapper function for the noise prediction model.
181
+
182
+ DPM-Solver needs to solve the continuous-time diffusion ODEs. For DPMs trained on discrete-time labels, we need to
183
+ firstly wrap the model function to a noise prediction model that accepts the continuous time as the input.
184
+
185
+ We support four types of the diffusion model by setting `model_type`:
186
+
187
+ 1. "noise": noise prediction model. (Trained by predicting noise).
188
+
189
+ 2. "x_start": data prediction model. (Trained by predicting the data x_0 at time 0).
190
+
191
+ 3. "v": velocity prediction model. (Trained by predicting the velocity).
192
+ The "v" prediction is derivation detailed in Appendix D of [1], and is used in Imagen-Video [2].
193
+
194
+ [1] Salimans, Tim, and Jonathan Ho. "Progressive distillation for fast sampling of diffusion models."
195
+ arXiv preprint arXiv:2202.00512 (2022).
196
+ [2] Ho, Jonathan, et al. "Imagen Video: High Definition Video Generation with Diffusion Models."
197
+ arXiv preprint arXiv:2210.02303 (2022).
198
+
199
+ 4. "score": marginal score function. (Trained by denoising score matching).
200
+ Note that the score function and the noise prediction model follows a simple relationship:
201
+ ```
202
+ noise(x_t, t) = -sigma_t * score(x_t, t)
203
+ ```
204
+
205
+ We support three types of guided sampling by DPMs by setting `guidance_type`:
206
+ 1. "uncond": unconditional sampling by DPMs.
207
+ The input `model` has the following format:
208
+ ``
209
+ model(x, t_input, **model_kwargs) -> noise | x_start | v | score
210
+ ``
211
+
212
+ 2. "classifier": classifier guidance sampling [3] by DPMs and another classifier.
213
+ The input `model` has the following format:
214
+ ``
215
+ model(x, t_input, **model_kwargs) -> noise | x_start | v | score
216
+ ``
217
+
218
+ The input `classifier_fn` has the following format:
219
+ ``
220
+ classifier_fn(x, t_input, cond, **classifier_kwargs) -> logits(x, t_input, cond)
221
+ ``
222
+
223
+ [3] P. Dhariwal and A. Q. Nichol, "Diffusion models beat GANs on image synthesis,"
224
+ in Advances in Neural Information Processing Systems, vol. 34, 2021, pp. 8780-8794.
225
+
226
+ 3. "classifier-free": classifier-free guidance sampling by conditional DPMs.
227
+ The input `model` has the following format:
228
+ ``
229
+ model(x, t_input, cond, **model_kwargs) -> noise | x_start | v | score
230
+ ``
231
+ And if cond == `unconditional_condition`, the model output is the unconditional DPM output.
232
+
233
+ [4] Ho, Jonathan, and Tim Salimans. "Classifier-free diffusion guidance."
234
+ arXiv preprint arXiv:2207.12598 (2022).
235
+
236
+
237
+ The `t_input` is the time label of the model, which may be discrete-time labels (i.e. 0 to 999)
238
+ or continuous-time labels (i.e. epsilon to T).
239
+
240
+ We wrap the model function to accept only `x` and `t_continuous` as inputs, and outputs the predicted noise:
241
+ ``
242
+ def model_fn(x, t_continuous) -> noise:
243
+ t_input = get_model_input_time(t_continuous)
244
+ return noise_pred(model, x, t_input, **model_kwargs)
245
+ ``
246
+ where `t_continuous` is the continuous time labels (i.e. epsilon to T). And we use `model_fn` for DPM-Solver.
247
+
248
+ ===============================================================
249
+
250
+ Args:
251
+ model: A diffusion model with the corresponding format described above.
252
+ noise_schedule: A noise schedule object, such as NoiseScheduleVP.
253
+ model_type: A `str`. The parameterization type of the diffusion model.
254
+ "noise" or "x_start" or "v" or "score".
255
+ model_kwargs: A `dict`. A dict for the other inputs of the model function.
256
+ guidance_type: A `str`. The type of the guidance for sampling.
257
+ "uncond" or "classifier" or "classifier-free".
258
+ condition: A pytorch tensor. The condition for the guided sampling.
259
+ Only used for "classifier" or "classifier-free" guidance type.
260
+ unconditional_condition: A pytorch tensor. The condition for the unconditional sampling.
261
+ Only used for "classifier-free" guidance type.
262
+ guidance_scale: A `float`. The scale for the guided sampling.
263
+ classifier_fn: A classifier function. Only used for the classifier guidance.
264
+ classifier_kwargs: A `dict`. A dict for the other inputs of the classifier function.
265
+ Returns:
266
+ A noise prediction model that accepts the noised data and the continuous time as the inputs.
267
+ """
268
+
269
+ def get_model_input_time(t_continuous):
270
+ """
271
+ Convert the continuous-time `t_continuous` (in [epsilon, T]) to the model input time.
272
+ For discrete-time DPMs, we convert `t_continuous` in [1 / N, 1] to `t_input` in [0, 1000 * (N - 1) / N].
273
+ For continuous-time DPMs, we just use `t_continuous`.
274
+ """
275
+ if noise_schedule.schedule == 'discrete':
276
+ return (t_continuous - 1. / noise_schedule.total_N) * noise_schedule.total_N
277
+ else:
278
+ return t_continuous
279
+
280
+ def noise_pred_fn(x, t_continuous, cond=None):
281
+ t_input = get_model_input_time(t_continuous)
282
+ if cond is None:
283
+ output = model(x, t_input, **model_kwargs)
284
+ else:
285
+ output = model(x, t_input, cond, **model_kwargs)
286
+ if model_type == "noise":
287
+ return output
288
+ elif model_type == "x_start":
289
+ alpha_t, sigma_t = noise_schedule.marginal_alpha(t_continuous), noise_schedule.marginal_std(t_continuous)
290
+ return (x - expand_dims(alpha_t, x.dim()) * output) / expand_dims(sigma_t, x.dim())
291
+ elif model_type == "v":
292
+ alpha_t, sigma_t = noise_schedule.marginal_alpha(t_continuous), noise_schedule.marginal_std(t_continuous)
293
+ return expand_dims(alpha_t, x.dim()) * output + expand_dims(sigma_t, x.dim()) * x
294
+ elif model_type == "score":
295
+ sigma_t = noise_schedule.marginal_std(t_continuous)
296
+ return -expand_dims(sigma_t, x.dim()) * output
297
+
298
+ def cond_grad_fn(x, t_input):
299
+ """
300
+ Compute the gradient of the classifier, i.e. nabla_{x} log p_t(cond | x_t).
301
+ """
302
+ with torch.enable_grad():
303
+ x_in = x.detach().requires_grad_(True)
304
+ log_prob = classifier_fn(x_in, t_input, condition, **classifier_kwargs)
305
+ return torch.autograd.grad(log_prob.sum(), x_in)[0]
306
+
307
+ def model_fn(x, t_continuous):
308
+ """
309
+ The noise predicition model function that is used for DPM-Solver.
310
+ """
311
+ if guidance_type == "uncond":
312
+ return noise_pred_fn(x, t_continuous)
313
+ elif guidance_type == "classifier":
314
+ assert classifier_fn is not None
315
+ t_input = get_model_input_time(t_continuous)
316
+ cond_grad = cond_grad_fn(x, t_input)
317
+ sigma_t = noise_schedule.marginal_std(t_continuous)
318
+ noise = noise_pred_fn(x, t_continuous)
319
+ return noise - guidance_scale * expand_dims(sigma_t, x.dim()) * cond_grad
320
+ elif guidance_type == "classifier-free":
321
+ if guidance_scale == 1. or unconditional_condition is None:
322
+ return noise_pred_fn(x, t_continuous, cond=condition)
323
+ else:
324
+ x_in = torch.cat([x] * 2)
325
+ t_in = torch.cat([t_continuous] * 2)
326
+ c_in = torch.cat([unconditional_condition, condition])
327
+ noise_uncond, noise = noise_pred_fn(x_in, t_in, cond=c_in).chunk(2)
328
+ return noise_uncond + guidance_scale * (noise - noise_uncond)
329
+
330
+ assert model_type in ["noise", "x_start", "v", "score"]
331
+ assert guidance_type in ["uncond", "classifier", "classifier-free"]
332
+ return model_fn
333
+
334
+
335
+ class DPM_Solver:
336
+ def __init__(
337
+ self,
338
+ model_fn,
339
+ noise_schedule,
340
+ algorithm_type="dpmsolver++",
341
+ correcting_x0_fn=None,
342
+ correcting_xt_fn=None,
343
+ thresholding_max_val=1.,
344
+ dynamic_thresholding_ratio=0.995,
345
+ ):
346
+ """Construct a DPM-Solver.
347
+
348
+ We support both DPM-Solver (`algorithm_type="dpmsolver"`) and DPM-Solver++ (`algorithm_type="dpmsolver++"`).
349
+
350
+ We also support the "dynamic thresholding" method in Imagen[1]. For pixel-space diffusion models, you
351
+ can set both `algorithm_type="dpmsolver++"` and `correcting_x0_fn="dynamic_thresholding"` to use the
352
+ dynamic thresholding. The "dynamic thresholding" can greatly improve the sample quality for pixel-space
353
+ DPMs with large guidance scales. Note that the thresholding method is **unsuitable** for latent-space
354
+ DPMs (such as stable-diffusion).
355
+
356
+ To support advanced algorithms in image-to-image applications, we also support corrector functions for
357
+ both x0 and xt.
358
+
359
+ Args:
360
+ model_fn: A noise prediction model function which accepts the continuous-time input (t in [epsilon, T]):
361
+ ``
362
+ def model_fn(x, t_continuous):
363
+ return noise
364
+ ``
365
+ The shape of `x` is `(batch_size, **shape)`, and the shape of `t_continuous` is `(batch_size,)`.
366
+ noise_schedule: A noise schedule object, such as NoiseScheduleVP.
367
+ algorithm_type: A `str`. Either "dpmsolver" or "dpmsolver++".
368
+ correcting_x0_fn: A `str` or a function with the following format:
369
+ ```
370
+ def correcting_x0_fn(x0, t):
371
+ x0_new = ...
372
+ return x0_new
373
+ ```
374
+ This function is to correct the outputs of the data prediction model at each sampling step. e.g.,
375
+ ```
376
+ x0_pred = data_pred_model(xt, t)
377
+ if correcting_x0_fn is not None:
378
+ x0_pred = correcting_x0_fn(x0_pred, t)
379
+ xt_1 = update(x0_pred, xt, t)
380
+ ```
381
+ If `correcting_x0_fn="dynamic_thresholding"`, we use the dynamic thresholding proposed in Imagen[1].
382
+ correcting_xt_fn: A function with the following format:
383
+ ```
384
+ def correcting_xt_fn(xt, t, step):
385
+ x_new = ...
386
+ return x_new
387
+ ```
388
+ This function is to correct the intermediate samples xt at each sampling step. e.g.,
389
+ ```
390
+ xt = ...
391
+ xt = correcting_xt_fn(xt, t, step)
392
+ ```
393
+ thresholding_max_val: A `float`. The max value for thresholding.
394
+ Valid only when use `dpmsolver++` and `correcting_x0_fn="dynamic_thresholding"`.
395
+ dynamic_thresholding_ratio: A `float`. The ratio for dynamic thresholding (see Imagen[1] for details).
396
+ Valid only when use `dpmsolver++` and `correcting_x0_fn="dynamic_thresholding"`.
397
+
398
+ [1] Chitwan Saharia, William Chan, Saurabh Saxena, Lala Li, Jay Whang, Emily Denton, Seyed Kamyar Seyed Ghasemipour,
399
+ Burcu Karagol Ayan, S Sara Mahdavi, Rapha Gontijo Lopes, et al. Photorealistic text-to-image diffusion models
400
+ with deep language understanding. arXiv preprint arXiv:2205.11487, 2022b.
401
+ """
402
+ self.model = lambda x, t: model_fn(x, t.expand((x.shape[0])))
403
+ self.noise_schedule = noise_schedule
404
+ assert algorithm_type in ["dpmsolver", "dpmsolver++"]
405
+ self.algorithm_type = algorithm_type
406
+ if correcting_x0_fn == "dynamic_thresholding":
407
+ self.correcting_x0_fn = self.dynamic_thresholding_fn
408
+ else:
409
+ self.correcting_x0_fn = correcting_x0_fn
410
+ self.correcting_xt_fn = correcting_xt_fn
411
+ self.dynamic_thresholding_ratio = dynamic_thresholding_ratio
412
+ self.thresholding_max_val = thresholding_max_val
413
+
414
+ def dynamic_thresholding_fn(self, x0, t):
415
+ """
416
+ The dynamic thresholding method.
417
+ """
418
+ dims = x0.dim()
419
+ p = self.dynamic_thresholding_ratio
420
+ s = torch.quantile(torch.abs(x0).reshape((x0.shape[0], -1)), p, dim=1)
421
+ s = expand_dims(torch.maximum(s, self.thresholding_max_val * torch.ones_like(s).to(s.device)), dims)
422
+ x0 = torch.clamp(x0, -s, s) / s
423
+ return x0
424
+
425
+ def noise_prediction_fn(self, x, t):
426
+ """
427
+ Return the noise prediction model.
428
+ """
429
+ return self.model(x, t)
430
+
431
+ def data_prediction_fn(self, x, t):
432
+ """
433
+ Return the data prediction model (with corrector).
434
+ """
435
+ noise = self.noise_prediction_fn(x, t)
436
+ alpha_t, sigma_t = self.noise_schedule.marginal_alpha(t), self.noise_schedule.marginal_std(t)
437
+ x0 = (x - sigma_t * noise) / alpha_t
438
+ if self.correcting_x0_fn is not None:
439
+ x0 = self.correcting_x0_fn(x0, t)
440
+ return x0
441
+
442
+ def model_fn(self, x, t):
443
+ """
444
+ Convert the model to the noise prediction model or the data prediction model.
445
+ """
446
+ if self.algorithm_type == "dpmsolver++":
447
+ return self.data_prediction_fn(x, t)
448
+ else:
449
+ return self.noise_prediction_fn(x, t)
450
+
451
+ def get_time_steps(self, skip_type, t_T, t_0, N, device):
452
+ """Compute the intermediate time steps for sampling.
453
+
454
+ Args:
455
+ skip_type: A `str`. The type for the spacing of the time steps. We support three types:
456
+ - 'logSNR': uniform logSNR for the time steps.
457
+ - 'time_uniform': uniform time for the time steps. (**Recommended for high-resolutional data**.)
458
+ - 'time_quadratic': quadratic time for the time steps. (Used in DDIM for low-resolutional data.)
459
+ t_T: A `float`. The starting time of the sampling (default is T).
460
+ t_0: A `float`. The ending time of the sampling (default is epsilon).
461
+ N: A `int`. The total number of the spacing of the time steps.
462
+ device: A torch device.
463
+ Returns:
464
+ A pytorch tensor of the time steps, with the shape (N + 1,).
465
+ """
466
+ if skip_type == 'logSNR':
467
+ lambda_T = self.noise_schedule.marginal_lambda(torch.tensor(t_T).to(device))
468
+ lambda_0 = self.noise_schedule.marginal_lambda(torch.tensor(t_0).to(device))
469
+ logSNR_steps = torch.linspace(lambda_T.cpu().item(), lambda_0.cpu().item(), N + 1).to(device)
470
+ return self.noise_schedule.inverse_lambda(logSNR_steps)
471
+ elif skip_type == 'time_uniform':
472
+ return torch.linspace(t_T, t_0, N + 1).to(device)
473
+ elif skip_type == 'time_quadratic':
474
+ t_order = 2
475
+ t = torch.linspace(t_T**(1. / t_order), t_0**(1. / t_order), N + 1).pow(t_order).to(device)
476
+ return t
477
+ else:
478
+ raise ValueError("Unsupported skip_type {}, need to be 'logSNR' or 'time_uniform' or 'time_quadratic'".format(skip_type))
479
+
480
+ def get_orders_and_timesteps_for_singlestep_solver(self, steps, order, skip_type, t_T, t_0, device):
481
+ """
482
+ Get the order of each step for sampling by the singlestep DPM-Solver.
483
+
484
+ We combine both DPM-Solver-1,2,3 to use all the function evaluations, which is named as "DPM-Solver-fast".
485
+ Given a fixed number of function evaluations by `steps`, the sampling procedure by DPM-Solver-fast is:
486
+ - If order == 1:
487
+ We take `steps` of DPM-Solver-1 (i.e. DDIM).
488
+ - If order == 2:
489
+ - Denote K = (steps // 2). We take K or (K + 1) intermediate time steps for sampling.
490
+ - If steps % 2 == 0, we use K steps of DPM-Solver-2.
491
+ - If steps % 2 == 1, we use K steps of DPM-Solver-2 and 1 step of DPM-Solver-1.
492
+ - If order == 3:
493
+ - Denote K = (steps // 3 + 1). We take K intermediate time steps for sampling.
494
+ - If steps % 3 == 0, we use (K - 2) steps of DPM-Solver-3, and 1 step of DPM-Solver-2 and 1 step of DPM-Solver-1.
495
+ - If steps % 3 == 1, we use (K - 1) steps of DPM-Solver-3 and 1 step of DPM-Solver-1.
496
+ - If steps % 3 == 2, we use (K - 1) steps of DPM-Solver-3 and 1 step of DPM-Solver-2.
497
+
498
+ ============================================
499
+ Args:
500
+ order: A `int`. The max order for the solver (2 or 3).
501
+ steps: A `int`. The total number of function evaluations (NFE).
502
+ skip_type: A `str`. The type for the spacing of the time steps. We support three types:
503
+ - 'logSNR': uniform logSNR for the time steps.
504
+ - 'time_uniform': uniform time for the time steps. (**Recommended for high-resolutional data**.)
505
+ - 'time_quadratic': quadratic time for the time steps. (Used in DDIM for low-resolutional data.)
506
+ t_T: A `float`. The starting time of the sampling (default is T).
507
+ t_0: A `float`. The ending time of the sampling (default is epsilon).
508
+ device: A torch device.
509
+ Returns:
510
+ orders: A list of the solver order of each step.
511
+ """
512
+ if order == 3:
513
+ K = steps // 3 + 1
514
+ if steps % 3 == 0:
515
+ orders = [3,] * (K - 2) + [2, 1]
516
+ elif steps % 3 == 1:
517
+ orders = [3,] * (K - 1) + [1]
518
+ else:
519
+ orders = [3,] * (K - 1) + [2]
520
+ elif order == 2:
521
+ if steps % 2 == 0:
522
+ K = steps // 2
523
+ orders = [2,] * K
524
+ else:
525
+ K = steps // 2 + 1
526
+ orders = [2,] * (K - 1) + [1]
527
+ elif order == 1:
528
+ K = 1
529
+ orders = [1,] * steps
530
+ else:
531
+ raise ValueError("'order' must be '1' or '2' or '3'.")
532
+ if skip_type == 'logSNR':
533
+ # To reproduce the results in DPM-Solver paper
534
+ timesteps_outer = self.get_time_steps(skip_type, t_T, t_0, K, device)
535
+ else:
536
+ timesteps_outer = self.get_time_steps(skip_type, t_T, t_0, steps, device)[torch.cumsum(torch.tensor([0,] + orders), 0).to(device)]
537
+ return timesteps_outer, orders
538
+
539
+ def denoise_to_zero_fn(self, x, s):
540
+ """
541
+ Denoise at the final step, which is equivalent to solve the ODE from lambda_s to infty by first-order discretization.
542
+ """
543
+ return self.data_prediction_fn(x, s)
544
+
545
+ def dpm_solver_first_update(self, x, s, t, model_s=None, return_intermediate=False):
546
+ """
547
+ DPM-Solver-1 (equivalent to DDIM) from time `s` to time `t`.
548
+
549
+ Args:
550
+ x: A pytorch tensor. The initial value at time `s`.
551
+ s: A pytorch tensor. The starting time, with the shape (1,).
552
+ t: A pytorch tensor. The ending time, with the shape (1,).
553
+ model_s: A pytorch tensor. The model function evaluated at time `s`.
554
+ If `model_s` is None, we evaluate the model by `x` and `s`; otherwise we directly use it.
555
+ return_intermediate: A `bool`. If true, also return the model value at time `s`.
556
+ Returns:
557
+ x_t: A pytorch tensor. The approximated solution at time `t`.
558
+ """
559
+ ns = self.noise_schedule
560
+ lambda_s, lambda_t = ns.marginal_lambda(s), ns.marginal_lambda(t)
561
+ h = lambda_t - lambda_s
562
+ log_alpha_s, log_alpha_t = ns.marginal_log_mean_coeff(s), ns.marginal_log_mean_coeff(t)
563
+ sigma_s, sigma_t = ns.marginal_std(s), ns.marginal_std(t)
564
+ alpha_t = torch.exp(log_alpha_t)
565
+
566
+ if self.algorithm_type == "dpmsolver++":
567
+ phi_1 = torch.expm1(-h)
568
+ if model_s is None:
569
+ model_s = self.model_fn(x, s)
570
+ x_t = (
571
+ sigma_t / sigma_s * x
572
+ - alpha_t * phi_1 * model_s
573
+ )
574
+ if return_intermediate:
575
+ return x_t, {'model_s': model_s}
576
+ else:
577
+ return x_t
578
+ else:
579
+ phi_1 = torch.expm1(h)
580
+ if model_s is None:
581
+ model_s = self.model_fn(x, s)
582
+ x_t = (
583
+ torch.exp(log_alpha_t - log_alpha_s) * x
584
+ - (sigma_t * phi_1) * model_s
585
+ )
586
+ if return_intermediate:
587
+ return x_t, {'model_s': model_s}
588
+ else:
589
+ return x_t
590
+
591
+ def singlestep_dpm_solver_second_update(self, x, s, t, r1=0.5, model_s=None, return_intermediate=False, solver_type='dpmsolver'):
592
+ """
593
+ Singlestep solver DPM-Solver-2 from time `s` to time `t`.
594
+
595
+ Args:
596
+ x: A pytorch tensor. The initial value at time `s`.
597
+ s: A pytorch tensor. The starting time, with the shape (1,).
598
+ t: A pytorch tensor. The ending time, with the shape (1,).
599
+ r1: A `float`. The hyperparameter of the second-order solver.
600
+ model_s: A pytorch tensor. The model function evaluated at time `s`.
601
+ If `model_s` is None, we evaluate the model by `x` and `s`; otherwise we directly use it.
602
+ return_intermediate: A `bool`. If true, also return the model value at time `s` and `s1` (the intermediate time).
603
+ solver_type: either 'dpmsolver' or 'taylor'. The type for the high-order solvers.
604
+ The type slightly impacts the performance. We recommend to use 'dpmsolver' type.
605
+ Returns:
606
+ x_t: A pytorch tensor. The approximated solution at time `t`.
607
+ """
608
+ if solver_type not in ['dpmsolver', 'taylor']:
609
+ raise ValueError("'solver_type' must be either 'dpmsolver' or 'taylor', got {}".format(solver_type))
610
+ if r1 is None:
611
+ r1 = 0.5
612
+ ns = self.noise_schedule
613
+ lambda_s, lambda_t = ns.marginal_lambda(s), ns.marginal_lambda(t)
614
+ h = lambda_t - lambda_s
615
+ lambda_s1 = lambda_s + r1 * h
616
+ s1 = ns.inverse_lambda(lambda_s1)
617
+ log_alpha_s, log_alpha_s1, log_alpha_t = ns.marginal_log_mean_coeff(s), ns.marginal_log_mean_coeff(s1), ns.marginal_log_mean_coeff(t)
618
+ sigma_s, sigma_s1, sigma_t = ns.marginal_std(s), ns.marginal_std(s1), ns.marginal_std(t)
619
+ alpha_s1, alpha_t = torch.exp(log_alpha_s1), torch.exp(log_alpha_t)
620
+
621
+ if self.algorithm_type == "dpmsolver++":
622
+ phi_11 = torch.expm1(-r1 * h)
623
+ phi_1 = torch.expm1(-h)
624
+
625
+ if model_s is None:
626
+ model_s = self.model_fn(x, s)
627
+ x_s1 = (
628
+ (sigma_s1 / sigma_s) * x
629
+ - (alpha_s1 * phi_11) * model_s
630
+ )
631
+ model_s1 = self.model_fn(x_s1, s1)
632
+ if solver_type == 'dpmsolver':
633
+ x_t = (
634
+ (sigma_t / sigma_s) * x
635
+ - (alpha_t * phi_1) * model_s
636
+ - (0.5 / r1) * (alpha_t * phi_1) * (model_s1 - model_s)
637
+ )
638
+ elif solver_type == 'taylor':
639
+ x_t = (
640
+ (sigma_t / sigma_s) * x
641
+ - (alpha_t * phi_1) * model_s
642
+ + (1. / r1) * (alpha_t * (phi_1 / h + 1.)) * (model_s1 - model_s)
643
+ )
644
+ else:
645
+ phi_11 = torch.expm1(r1 * h)
646
+ phi_1 = torch.expm1(h)
647
+
648
+ if model_s is None:
649
+ model_s = self.model_fn(x, s)
650
+ x_s1 = (
651
+ torch.exp(log_alpha_s1 - log_alpha_s) * x
652
+ - (sigma_s1 * phi_11) * model_s
653
+ )
654
+ model_s1 = self.model_fn(x_s1, s1)
655
+ if solver_type == 'dpmsolver':
656
+ x_t = (
657
+ torch.exp(log_alpha_t - log_alpha_s) * x
658
+ - (sigma_t * phi_1) * model_s
659
+ - (0.5 / r1) * (sigma_t * phi_1) * (model_s1 - model_s)
660
+ )
661
+ elif solver_type == 'taylor':
662
+ x_t = (
663
+ torch.exp(log_alpha_t - log_alpha_s) * x
664
+ - (sigma_t * phi_1) * model_s
665
+ - (1. / r1) * (sigma_t * (phi_1 / h - 1.)) * (model_s1 - model_s)
666
+ )
667
+ if return_intermediate:
668
+ return x_t, {'model_s': model_s, 'model_s1': model_s1}
669
+ else:
670
+ return x_t
671
+
672
+ def singlestep_dpm_solver_third_update(self, x, s, t, r1=1./3., r2=2./3., model_s=None, model_s1=None, return_intermediate=False, solver_type='dpmsolver'):
673
+ """
674
+ Singlestep solver DPM-Solver-3 from time `s` to time `t`.
675
+
676
+ Args:
677
+ x: A pytorch tensor. The initial value at time `s`.
678
+ s: A pytorch tensor. The starting time, with the shape (1,).
679
+ t: A pytorch tensor. The ending time, with the shape (1,).
680
+ r1: A `float`. The hyperparameter of the third-order solver.
681
+ r2: A `float`. The hyperparameter of the third-order solver.
682
+ model_s: A pytorch tensor. The model function evaluated at time `s`.
683
+ If `model_s` is None, we evaluate the model by `x` and `s`; otherwise we directly use it.
684
+ model_s1: A pytorch tensor. The model function evaluated at time `s1` (the intermediate time given by `r1`).
685
+ If `model_s1` is None, we evaluate the model at `s1`; otherwise we directly use it.
686
+ return_intermediate: A `bool`. If true, also return the model value at time `s`, `s1` and `s2` (the intermediate times).
687
+ solver_type: either 'dpmsolver' or 'taylor'. The type for the high-order solvers.
688
+ The type slightly impacts the performance. We recommend to use 'dpmsolver' type.
689
+ Returns:
690
+ x_t: A pytorch tensor. The approximated solution at time `t`.
691
+ """
692
+ if solver_type not in ['dpmsolver', 'taylor']:
693
+ raise ValueError("'solver_type' must be either 'dpmsolver' or 'taylor', got {}".format(solver_type))
694
+ if r1 is None:
695
+ r1 = 1. / 3.
696
+ if r2 is None:
697
+ r2 = 2. / 3.
698
+ ns = self.noise_schedule
699
+ lambda_s, lambda_t = ns.marginal_lambda(s), ns.marginal_lambda(t)
700
+ h = lambda_t - lambda_s
701
+ lambda_s1 = lambda_s + r1 * h
702
+ lambda_s2 = lambda_s + r2 * h
703
+ s1 = ns.inverse_lambda(lambda_s1)
704
+ s2 = ns.inverse_lambda(lambda_s2)
705
+ log_alpha_s, log_alpha_s1, log_alpha_s2, log_alpha_t = ns.marginal_log_mean_coeff(s), ns.marginal_log_mean_coeff(s1), ns.marginal_log_mean_coeff(s2), ns.marginal_log_mean_coeff(t)
706
+ sigma_s, sigma_s1, sigma_s2, sigma_t = ns.marginal_std(s), ns.marginal_std(s1), ns.marginal_std(s2), ns.marginal_std(t)
707
+ alpha_s1, alpha_s2, alpha_t = torch.exp(log_alpha_s1), torch.exp(log_alpha_s2), torch.exp(log_alpha_t)
708
+
709
+ if self.algorithm_type == "dpmsolver++":
710
+ phi_11 = torch.expm1(-r1 * h)
711
+ phi_12 = torch.expm1(-r2 * h)
712
+ phi_1 = torch.expm1(-h)
713
+ phi_22 = torch.expm1(-r2 * h) / (r2 * h) + 1.
714
+ phi_2 = phi_1 / h + 1.
715
+ phi_3 = phi_2 / h - 0.5
716
+
717
+ if model_s is None:
718
+ model_s = self.model_fn(x, s)
719
+ if model_s1 is None:
720
+ x_s1 = (
721
+ (sigma_s1 / sigma_s) * x
722
+ - (alpha_s1 * phi_11) * model_s
723
+ )
724
+ model_s1 = self.model_fn(x_s1, s1)
725
+ x_s2 = (
726
+ (sigma_s2 / sigma_s) * x
727
+ - (alpha_s2 * phi_12) * model_s
728
+ + r2 / r1 * (alpha_s2 * phi_22) * (model_s1 - model_s)
729
+ )
730
+ model_s2 = self.model_fn(x_s2, s2)
731
+ if solver_type == 'dpmsolver':
732
+ x_t = (
733
+ (sigma_t / sigma_s) * x
734
+ - (alpha_t * phi_1) * model_s
735
+ + (1. / r2) * (alpha_t * phi_2) * (model_s2 - model_s)
736
+ )
737
+ elif solver_type == 'taylor':
738
+ D1_0 = (1. / r1) * (model_s1 - model_s)
739
+ D1_1 = (1. / r2) * (model_s2 - model_s)
740
+ D1 = (r2 * D1_0 - r1 * D1_1) / (r2 - r1)
741
+ D2 = 2. * (D1_1 - D1_0) / (r2 - r1)
742
+ x_t = (
743
+ (sigma_t / sigma_s) * x
744
+ - (alpha_t * phi_1) * model_s
745
+ + (alpha_t * phi_2) * D1
746
+ - (alpha_t * phi_3) * D2
747
+ )
748
+ else:
749
+ phi_11 = torch.expm1(r1 * h)
750
+ phi_12 = torch.expm1(r2 * h)
751
+ phi_1 = torch.expm1(h)
752
+ phi_22 = torch.expm1(r2 * h) / (r2 * h) - 1.
753
+ phi_2 = phi_1 / h - 1.
754
+ phi_3 = phi_2 / h - 0.5
755
+
756
+ if model_s is None:
757
+ model_s = self.model_fn(x, s)
758
+ if model_s1 is None:
759
+ x_s1 = (
760
+ (torch.exp(log_alpha_s1 - log_alpha_s)) * x
761
+ - (sigma_s1 * phi_11) * model_s
762
+ )
763
+ model_s1 = self.model_fn(x_s1, s1)
764
+ x_s2 = (
765
+ (torch.exp(log_alpha_s2 - log_alpha_s)) * x
766
+ - (sigma_s2 * phi_12) * model_s
767
+ - r2 / r1 * (sigma_s2 * phi_22) * (model_s1 - model_s)
768
+ )
769
+ model_s2 = self.model_fn(x_s2, s2)
770
+ if solver_type == 'dpmsolver':
771
+ x_t = (
772
+ (torch.exp(log_alpha_t - log_alpha_s)) * x
773
+ - (sigma_t * phi_1) * model_s
774
+ - (1. / r2) * (sigma_t * phi_2) * (model_s2 - model_s)
775
+ )
776
+ elif solver_type == 'taylor':
777
+ D1_0 = (1. / r1) * (model_s1 - model_s)
778
+ D1_1 = (1. / r2) * (model_s2 - model_s)
779
+ D1 = (r2 * D1_0 - r1 * D1_1) / (r2 - r1)
780
+ D2 = 2. * (D1_1 - D1_0) / (r2 - r1)
781
+ x_t = (
782
+ (torch.exp(log_alpha_t - log_alpha_s)) * x
783
+ - (sigma_t * phi_1) * model_s
784
+ - (sigma_t * phi_2) * D1
785
+ - (sigma_t * phi_3) * D2
786
+ )
787
+
788
+ if return_intermediate:
789
+ return x_t, {'model_s': model_s, 'model_s1': model_s1, 'model_s2': model_s2}
790
+ else:
791
+ return x_t
792
+
793
+ def multistep_dpm_solver_second_update(self, x, model_prev_list, t_prev_list, t, solver_type="dpmsolver"):
794
+ """
795
+ Multistep solver DPM-Solver-2 from time `t_prev_list[-1]` to time `t`.
796
+
797
+ Args:
798
+ x: A pytorch tensor. The initial value at time `s`.
799
+ model_prev_list: A list of pytorch tensor. The previous computed model values.
800
+ t_prev_list: A list of pytorch tensor. The previous times, each time has the shape (1,)
801
+ t: A pytorch tensor. The ending time, with the shape (1,).
802
+ solver_type: either 'dpmsolver' or 'taylor'. The type for the high-order solvers.
803
+ The type slightly impacts the performance. We recommend to use 'dpmsolver' type.
804
+ Returns:
805
+ x_t: A pytorch tensor. The approximated solution at time `t`.
806
+ """
807
+ if solver_type not in ['dpmsolver', 'taylor']:
808
+ raise ValueError("'solver_type' must be either 'dpmsolver' or 'taylor', got {}".format(solver_type))
809
+ ns = self.noise_schedule
810
+ model_prev_1, model_prev_0 = model_prev_list[-2], model_prev_list[-1]
811
+ t_prev_1, t_prev_0 = t_prev_list[-2], t_prev_list[-1]
812
+ lambda_prev_1, lambda_prev_0, lambda_t = ns.marginal_lambda(t_prev_1), ns.marginal_lambda(t_prev_0), ns.marginal_lambda(t)
813
+ log_alpha_prev_0, log_alpha_t = ns.marginal_log_mean_coeff(t_prev_0), ns.marginal_log_mean_coeff(t)
814
+ sigma_prev_0, sigma_t = ns.marginal_std(t_prev_0), ns.marginal_std(t)
815
+ alpha_t = torch.exp(log_alpha_t)
816
+
817
+ h_0 = lambda_prev_0 - lambda_prev_1
818
+ h = lambda_t - lambda_prev_0
819
+ r0 = h_0 / h
820
+ D1_0 = (1. / r0) * (model_prev_0 - model_prev_1)
821
+ if self.algorithm_type == "dpmsolver++":
822
+ phi_1 = torch.expm1(-h)
823
+ if solver_type == 'dpmsolver':
824
+ x_t = (
825
+ (sigma_t / sigma_prev_0) * x
826
+ - (alpha_t * phi_1) * model_prev_0
827
+ - 0.5 * (alpha_t * phi_1) * D1_0
828
+ )
829
+ elif solver_type == 'taylor':
830
+ x_t = (
831
+ (sigma_t / sigma_prev_0) * x
832
+ - (alpha_t * phi_1) * model_prev_0
833
+ + (alpha_t * (phi_1 / h + 1.)) * D1_0
834
+ )
835
+ else:
836
+ phi_1 = torch.expm1(h)
837
+ if solver_type == 'dpmsolver':
838
+ x_t = (
839
+ (torch.exp(log_alpha_t - log_alpha_prev_0)) * x
840
+ - (sigma_t * phi_1) * model_prev_0
841
+ - 0.5 * (sigma_t * phi_1) * D1_0
842
+ )
843
+ elif solver_type == 'taylor':
844
+ x_t = (
845
+ (torch.exp(log_alpha_t - log_alpha_prev_0)) * x
846
+ - (sigma_t * phi_1) * model_prev_0
847
+ - (sigma_t * (phi_1 / h - 1.)) * D1_0
848
+ )
849
+ return x_t
850
+
851
+ def multistep_dpm_solver_third_update(self, x, model_prev_list, t_prev_list, t, solver_type='dpmsolver'):
852
+ """
853
+ Multistep solver DPM-Solver-3 from time `t_prev_list[-1]` to time `t`.
854
+
855
+ Args:
856
+ x: A pytorch tensor. The initial value at time `s`.
857
+ model_prev_list: A list of pytorch tensor. The previous computed model values.
858
+ t_prev_list: A list of pytorch tensor. The previous times, each time has the shape (1,)
859
+ t: A pytorch tensor. The ending time, with the shape (1,).
860
+ solver_type: either 'dpmsolver' or 'taylor'. The type for the high-order solvers.
861
+ The type slightly impacts the performance. We recommend to use 'dpmsolver' type.
862
+ Returns:
863
+ x_t: A pytorch tensor. The approximated solution at time `t`.
864
+ """
865
+ ns = self.noise_schedule
866
+ model_prev_2, model_prev_1, model_prev_0 = model_prev_list
867
+ t_prev_2, t_prev_1, t_prev_0 = t_prev_list
868
+ lambda_prev_2, lambda_prev_1, lambda_prev_0, lambda_t = ns.marginal_lambda(t_prev_2), ns.marginal_lambda(t_prev_1), ns.marginal_lambda(t_prev_0), ns.marginal_lambda(t)
869
+ log_alpha_prev_0, log_alpha_t = ns.marginal_log_mean_coeff(t_prev_0), ns.marginal_log_mean_coeff(t)
870
+ sigma_prev_0, sigma_t = ns.marginal_std(t_prev_0), ns.marginal_std(t)
871
+ alpha_t = torch.exp(log_alpha_t)
872
+
873
+ h_1 = lambda_prev_1 - lambda_prev_2
874
+ h_0 = lambda_prev_0 - lambda_prev_1
875
+ h = lambda_t - lambda_prev_0
876
+ r0, r1 = h_0 / h, h_1 / h
877
+ D1_0 = (1. / r0) * (model_prev_0 - model_prev_1)
878
+ D1_1 = (1. / r1) * (model_prev_1 - model_prev_2)
879
+ D1 = D1_0 + (r0 / (r0 + r1)) * (D1_0 - D1_1)
880
+ D2 = (1. / (r0 + r1)) * (D1_0 - D1_1)
881
+ if self.algorithm_type == "dpmsolver++":
882
+ phi_1 = torch.expm1(-h)
883
+ phi_2 = phi_1 / h + 1.
884
+ phi_3 = phi_2 / h - 0.5
885
+ x_t = (
886
+ (sigma_t / sigma_prev_0) * x
887
+ - (alpha_t * phi_1) * model_prev_0
888
+ + (alpha_t * phi_2) * D1
889
+ - (alpha_t * phi_3) * D2
890
+ )
891
+ else:
892
+ phi_1 = torch.expm1(h)
893
+ phi_2 = phi_1 / h - 1.
894
+ phi_3 = phi_2 / h - 0.5
895
+ x_t = (
896
+ (torch.exp(log_alpha_t - log_alpha_prev_0)) * x
897
+ - (sigma_t * phi_1) * model_prev_0
898
+ - (sigma_t * phi_2) * D1
899
+ - (sigma_t * phi_3) * D2
900
+ )
901
+ return x_t
902
+
903
+ def singlestep_dpm_solver_update(self, x, s, t, order, return_intermediate=False, solver_type='dpmsolver', r1=None, r2=None):
904
+ """
905
+ Singlestep DPM-Solver with the order `order` from time `s` to time `t`.
906
+
907
+ Args:
908
+ x: A pytorch tensor. The initial value at time `s`.
909
+ s: A pytorch tensor. The starting time, with the shape (1,).
910
+ t: A pytorch tensor. The ending time, with the shape (1,).
911
+ order: A `int`. The order of DPM-Solver. We only support order == 1 or 2 or 3.
912
+ return_intermediate: A `bool`. If true, also return the model value at time `s`, `s1` and `s2` (the intermediate times).
913
+ solver_type: either 'dpmsolver' or 'taylor'. The type for the high-order solvers.
914
+ The type slightly impacts the performance. We recommend to use 'dpmsolver' type.
915
+ r1: A `float`. The hyperparameter of the second-order or third-order solver.
916
+ r2: A `float`. The hyperparameter of the third-order solver.
917
+ Returns:
918
+ x_t: A pytorch tensor. The approximated solution at time `t`.
919
+ """
920
+ if order == 1:
921
+ return self.dpm_solver_first_update(x, s, t, return_intermediate=return_intermediate)
922
+ elif order == 2:
923
+ return self.singlestep_dpm_solver_second_update(x, s, t, return_intermediate=return_intermediate, solver_type=solver_type, r1=r1)
924
+ elif order == 3:
925
+ return self.singlestep_dpm_solver_third_update(x, s, t, return_intermediate=return_intermediate, solver_type=solver_type, r1=r1, r2=r2)
926
+ else:
927
+ raise ValueError("Solver order must be 1 or 2 or 3, got {}".format(order))
928
+
929
+ def multistep_dpm_solver_update(self, x, model_prev_list, t_prev_list, t, order, solver_type='dpmsolver'):
930
+ """
931
+ Multistep DPM-Solver with the order `order` from time `t_prev_list[-1]` to time `t`.
932
+
933
+ Args:
934
+ x: A pytorch tensor. The initial value at time `s`.
935
+ model_prev_list: A list of pytorch tensor. The previous computed model values.
936
+ t_prev_list: A list of pytorch tensor. The previous times, each time has the shape (1,)
937
+ t: A pytorch tensor. The ending time, with the shape (1,).
938
+ order: A `int`. The order of DPM-Solver. We only support order == 1 or 2 or 3.
939
+ solver_type: either 'dpmsolver' or 'taylor'. The type for the high-order solvers.
940
+ The type slightly impacts the performance. We recommend to use 'dpmsolver' type.
941
+ Returns:
942
+ x_t: A pytorch tensor. The approximated solution at time `t`.
943
+ """
944
+ if order == 1:
945
+ return self.dpm_solver_first_update(x, t_prev_list[-1], t, model_s=model_prev_list[-1])
946
+ elif order == 2:
947
+ return self.multistep_dpm_solver_second_update(x, model_prev_list, t_prev_list, t, solver_type=solver_type)
948
+ elif order == 3:
949
+ return self.multistep_dpm_solver_third_update(x, model_prev_list, t_prev_list, t, solver_type=solver_type)
950
+ else:
951
+ raise ValueError("Solver order must be 1 or 2 or 3, got {}".format(order))
952
+
953
+ def dpm_solver_adaptive(self, x, order, t_T, t_0, h_init=0.05, atol=0.0078, rtol=0.05, theta=0.9, t_err=1e-5, solver_type='dpmsolver'):
954
+ """
955
+ The adaptive step size solver based on singlestep DPM-Solver.
956
+
957
+ Args:
958
+ x: A pytorch tensor. The initial value at time `t_T`.
959
+ order: A `int`. The (higher) order of the solver. We only support order == 2 or 3.
960
+ t_T: A `float`. The starting time of the sampling (default is T).
961
+ t_0: A `float`. The ending time of the sampling (default is epsilon).
962
+ h_init: A `float`. The initial step size (for logSNR).
963
+ atol: A `float`. The absolute tolerance of the solver. For image data, the default setting is 0.0078, followed [1].
964
+ rtol: A `float`. The relative tolerance of the solver. The default setting is 0.05.
965
+ theta: A `float`. The safety hyperparameter for adapting the step size. The default setting is 0.9, followed [1].
966
+ t_err: A `float`. The tolerance for the time. We solve the diffusion ODE until the absolute error between the
967
+ current time and `t_0` is less than `t_err`. The default setting is 1e-5.
968
+ solver_type: either 'dpmsolver' or 'taylor'. The type for the high-order solvers.
969
+ The type slightly impacts the performance. We recommend to use 'dpmsolver' type.
970
+ Returns:
971
+ x_0: A pytorch tensor. The approximated solution at time `t_0`.
972
+
973
+ [1] A. Jolicoeur-Martineau, K. Li, R. Piché-Taillefer, T. Kachman, and I. Mitliagkas, "Gotta go fast when generating data with score-based models," arXiv preprint arXiv:2105.14080, 2021.
974
+ """
975
+ ns = self.noise_schedule
976
+ s = t_T * torch.ones((1,)).to(x)
977
+ lambda_s = ns.marginal_lambda(s)
978
+ lambda_0 = ns.marginal_lambda(t_0 * torch.ones_like(s).to(x))
979
+ h = h_init * torch.ones_like(s).to(x)
980
+ x_prev = x
981
+ nfe = 0
982
+ if order == 2:
983
+ r1 = 0.5
984
+ def lower_update(x, s, t):
985
+ return self.dpm_solver_first_update(x, s, t, return_intermediate=True)
986
+ def higher_update(x, s, t, **kwargs):
987
+ return self.singlestep_dpm_solver_second_update(x, s, t, r1=r1, solver_type=solver_type, **kwargs)
988
+ elif order == 3:
989
+ r1, r2 = 1. / 3., 2. / 3.
990
+ def lower_update(x, s, t):
991
+ return self.singlestep_dpm_solver_second_update(x, s, t, r1=r1, return_intermediate=True, solver_type=solver_type)
992
+ def higher_update(x, s, t, **kwargs):
993
+ return self.singlestep_dpm_solver_third_update(x, s, t, r1=r1, r2=r2, solver_type=solver_type, **kwargs)
994
+ else:
995
+ raise ValueError("For adaptive step size solver, order must be 2 or 3, got {}".format(order))
996
+ while torch.abs((s - t_0)).mean() > t_err:
997
+ t = ns.inverse_lambda(lambda_s + h)
998
+ x_lower, lower_noise_kwargs = lower_update(x, s, t)
999
+ x_higher = higher_update(x, s, t, **lower_noise_kwargs)
1000
+ delta = torch.max(torch.ones_like(x).to(x) * atol, rtol * torch.max(torch.abs(x_lower), torch.abs(x_prev)))
1001
+ def norm_fn(v):
1002
+ return torch.sqrt(torch.square(v.reshape((v.shape[0], -1))).mean(dim=-1, keepdim=True))
1003
+ E = norm_fn((x_higher - x_lower) / delta).max()
1004
+ if torch.all(E <= 1.):
1005
+ x = x_higher
1006
+ s = t
1007
+ x_prev = x_lower
1008
+ lambda_s = ns.marginal_lambda(s)
1009
+ h = torch.min(theta * h * torch.float_power(E, -1. / order).float(), lambda_0 - lambda_s)
1010
+ nfe += order
1011
+ print('adaptive solver nfe', nfe)
1012
+ return x
1013
+
1014
+ def add_noise(self, x, t, noise=None):
1015
+ """
1016
+ Compute the noised input xt = alpha_t * x + sigma_t * noise.
1017
+
1018
+ Args:
1019
+ x: A `torch.Tensor` with shape `(batch_size, *shape)`.
1020
+ t: A `torch.Tensor` with shape `(t_size,)`.
1021
+ Returns:
1022
+ xt with shape `(t_size, batch_size, *shape)`.
1023
+ """
1024
+ alpha_t, sigma_t = self.noise_schedule.marginal_alpha(t), self.noise_schedule.marginal_std(t)
1025
+ if noise is None:
1026
+ noise = torch.randn((t.shape[0], *x.shape), device=x.device)
1027
+ x = x.reshape((-1, *x.shape))
1028
+ xt = expand_dims(alpha_t, x.dim()) * x + expand_dims(sigma_t, x.dim()) * noise
1029
+ if t.shape[0] == 1:
1030
+ return xt.squeeze(0)
1031
+ else:
1032
+ return xt
1033
+
1034
+ def inverse(self, x, steps=20, t_start=None, t_end=None, order=2, skip_type='time_uniform',
1035
+ method='multistep', lower_order_final=True, denoise_to_zero=False, solver_type='dpmsolver',
1036
+ atol=0.0078, rtol=0.05, return_intermediate=False,
1037
+ ):
1038
+ """
1039
+ Inverse the sample `x` from time `t_start` to `t_end` by DPM-Solver.
1040
+ For discrete-time DPMs, we use `t_start=1/N`, where `N` is the total time steps during training.
1041
+ """
1042
+ t_0 = 1. / self.noise_schedule.total_N if t_start is None else t_start
1043
+ t_T = self.noise_schedule.T if t_end is None else t_end
1044
+ assert t_0 > 0 and t_T > 0, "Time range needs to be greater than 0. For discrete-time DPMs, it needs to be in [1 / N, 1], where N is the length of betas array"
1045
+ return self.sample(x, steps=steps, t_start=t_0, t_end=t_T, order=order, skip_type=skip_type,
1046
+ method=method, lower_order_final=lower_order_final, denoise_to_zero=denoise_to_zero, solver_type=solver_type,
1047
+ atol=atol, rtol=rtol, return_intermediate=return_intermediate)
1048
+
1049
+ def sample(self, x, steps=20, t_start=None, t_end=None, order=2, skip_type='time_uniform',
1050
+ method='multistep', lower_order_final=True, denoise_to_zero=False, solver_type='dpmsolver',
1051
+ atol=0.0078, rtol=0.05, return_intermediate=False,
1052
+ ):
1053
+ """
1054
+ Compute the sample at time `t_end` by DPM-Solver, given the initial `x` at time `t_start`.
1055
+
1056
+ =====================================================
1057
+
1058
+ We support the following algorithms for both noise prediction model and data prediction model:
1059
+ - 'singlestep':
1060
+ Singlestep DPM-Solver (i.e. "DPM-Solver-fast" in the paper), which combines different orders of singlestep DPM-Solver.
1061
+ We combine all the singlestep solvers with order <= `order` to use up all the function evaluations (steps).
1062
+ The total number of function evaluations (NFE) == `steps`.
1063
+ Given a fixed NFE == `steps`, the sampling procedure is:
1064
+ - If `order` == 1:
1065
+ - Denote K = steps. We use K steps of DPM-Solver-1 (i.e. DDIM).
1066
+ - If `order` == 2:
1067
+ - Denote K = (steps // 2) + (steps % 2). We take K intermediate time steps for sampling.
1068
+ - If steps % 2 == 0, we use K steps of singlestep DPM-Solver-2.
1069
+ - If steps % 2 == 1, we use (K - 1) steps of singlestep DPM-Solver-2 and 1 step of DPM-Solver-1.
1070
+ - If `order` == 3:
1071
+ - Denote K = (steps // 3 + 1). We take K intermediate time steps for sampling.
1072
+ - If steps % 3 == 0, we use (K - 2) steps of singlestep DPM-Solver-3, and 1 step of singlestep DPM-Solver-2 and 1 step of DPM-Solver-1.
1073
+ - If steps % 3 == 1, we use (K - 1) steps of singlestep DPM-Solver-3 and 1 step of DPM-Solver-1.
1074
+ - If steps % 3 == 2, we use (K - 1) steps of singlestep DPM-Solver-3 and 1 step of singlestep DPM-Solver-2.
1075
+ - 'multistep':
1076
+ Multistep DPM-Solver with the order of `order`. The total number of function evaluations (NFE) == `steps`.
1077
+ We initialize the first `order` values by lower order multistep solvers.
1078
+ Given a fixed NFE == `steps`, the sampling procedure is:
1079
+ Denote K = steps.
1080
+ - If `order` == 1:
1081
+ - We use K steps of DPM-Solver-1 (i.e. DDIM).
1082
+ - If `order` == 2:
1083
+ - We firstly use 1 step of DPM-Solver-1, then use (K - 1) step of multistep DPM-Solver-2.
1084
+ - If `order` == 3:
1085
+ - We firstly use 1 step of DPM-Solver-1, then 1 step of multistep DPM-Solver-2, then (K - 2) step of multistep DPM-Solver-3.
1086
+ - 'singlestep_fixed':
1087
+ Fixed order singlestep DPM-Solver (i.e. DPM-Solver-1 or singlestep DPM-Solver-2 or singlestep DPM-Solver-3).
1088
+ We use singlestep DPM-Solver-`order` for `order`=1 or 2 or 3, with total [`steps` // `order`] * `order` NFE.
1089
+ - 'adaptive':
1090
+ Adaptive step size DPM-Solver (i.e. "DPM-Solver-12" and "DPM-Solver-23" in the paper).
1091
+ We ignore `steps` and use adaptive step size DPM-Solver with a higher order of `order`.
1092
+ You can adjust the absolute tolerance `atol` and the relative tolerance `rtol` to balance the computatation costs
1093
+ (NFE) and the sample quality.
1094
+ - If `order` == 2, we use DPM-Solver-12 which combines DPM-Solver-1 and singlestep DPM-Solver-2.
1095
+ - If `order` == 3, we use DPM-Solver-23 which combines singlestep DPM-Solver-2 and singlestep DPM-Solver-3.
1096
+
1097
+ =====================================================
1098
+
1099
+ Some advices for choosing the algorithm:
1100
+ - For **unconditional sampling** or **guided sampling with small guidance scale** by DPMs:
1101
+ Use singlestep DPM-Solver or DPM-Solver++ ("DPM-Solver-fast" in the paper) with `order = 3`.
1102
+ e.g., DPM-Solver:
1103
+ >>> dpm_solver = DPM_Solver(model_fn, noise_schedule, algorithm_type="dpmsolver")
1104
+ >>> x_sample = dpm_solver.sample(x, steps=steps, t_start=t_start, t_end=t_end, order=3,
1105
+ skip_type='time_uniform', method='singlestep')
1106
+ e.g., DPM-Solver++:
1107
+ >>> dpm_solver = DPM_Solver(model_fn, noise_schedule, algorithm_type="dpmsolver++")
1108
+ >>> x_sample = dpm_solver.sample(x, steps=steps, t_start=t_start, t_end=t_end, order=3,
1109
+ skip_type='time_uniform', method='singlestep')
1110
+ - For **guided sampling with large guidance scale** by DPMs:
1111
+ Use multistep DPM-Solver with `algorithm_type="dpmsolver++"` and `order = 2`.
1112
+ e.g.
1113
+ >>> dpm_solver = DPM_Solver(model_fn, noise_schedule, algorithm_type="dpmsolver++")
1114
+ >>> x_sample = dpm_solver.sample(x, steps=steps, t_start=t_start, t_end=t_end, order=2,
1115
+ skip_type='time_uniform', method='multistep')
1116
+
1117
+ We support three types of `skip_type`:
1118
+ - 'logSNR': uniform logSNR for the time steps. **Recommended for low-resolutional images**
1119
+ - 'time_uniform': uniform time for the time steps. **Recommended for high-resolutional images**.
1120
+ - 'time_quadratic': quadratic time for the time steps.
1121
+
1122
+ =====================================================
1123
+ Args:
1124
+ x: A pytorch tensor. The initial value at time `t_start`
1125
+ e.g. if `t_start` == T, then `x` is a sample from the standard normal distribution.
1126
+ steps: A `int`. The total number of function evaluations (NFE).
1127
+ t_start: A `float`. The starting time of the sampling.
1128
+ If `T` is None, we use self.noise_schedule.T (default is 1.0).
1129
+ t_end: A `float`. The ending time of the sampling.
1130
+ If `t_end` is None, we use 1. / self.noise_schedule.total_N.
1131
+ e.g. if total_N == 1000, we have `t_end` == 1e-3.
1132
+ For discrete-time DPMs:
1133
+ - We recommend `t_end` == 1. / self.noise_schedule.total_N.
1134
+ For continuous-time DPMs:
1135
+ - We recommend `t_end` == 1e-3 when `steps` <= 15; and `t_end` == 1e-4 when `steps` > 15.
1136
+ order: A `int`. The order of DPM-Solver.
1137
+ skip_type: A `str`. The type for the spacing of the time steps. 'time_uniform' or 'logSNR' or 'time_quadratic'.
1138
+ method: A `str`. The method for sampling. 'singlestep' or 'multistep' or 'singlestep_fixed' or 'adaptive'.
1139
+ denoise_to_zero: A `bool`. Whether to denoise to time 0 at the final step.
1140
+ Default is `False`. If `denoise_to_zero` is `True`, the total NFE is (`steps` + 1).
1141
+
1142
+ This trick is firstly proposed by DDPM (https://arxiv.org/abs/2006.11239) and
1143
+ score_sde (https://arxiv.org/abs/2011.13456). Such trick can improve the FID
1144
+ for diffusion models sampling by diffusion SDEs for low-resolutional images
1145
+ (such as CIFAR-10). However, we observed that such trick does not matter for
1146
+ high-resolutional images. As it needs an additional NFE, we do not recommend
1147
+ it for high-resolutional images.
1148
+ lower_order_final: A `bool`. Whether to use lower order solvers at the final steps.
1149
+ Only valid for `method=multistep` and `steps < 15`. We empirically find that
1150
+ this trick is a key to stabilizing the sampling by DPM-Solver with very few steps
1151
+ (especially for steps <= 10). So we recommend to set it to be `True`.
1152
+ solver_type: A `str`. The taylor expansion type for the solver. `dpmsolver` or `taylor`. We recommend `dpmsolver`.
1153
+ atol: A `float`. The absolute tolerance of the adaptive step size solver. Valid when `method` == 'adaptive'.
1154
+ rtol: A `float`. The relative tolerance of the adaptive step size solver. Valid when `method` == 'adaptive'.
1155
+ return_intermediate: A `bool`. Whether to save the xt at each step.
1156
+ When set to `True`, method returns a tuple (x0, intermediates); when set to False, method returns only x0.
1157
+ Returns:
1158
+ x_end: A pytorch tensor. The approximated solution at time `t_end`.
1159
+
1160
+ """
1161
+ t_0 = 1. / self.noise_schedule.total_N if t_end is None else t_end
1162
+ t_T = self.noise_schedule.T if t_start is None else t_start
1163
+ assert t_0 > 0 and t_T > 0, "Time range needs to be greater than 0. For discrete-time DPMs, it needs to be in [1 / N, 1], where N is the length of betas array"
1164
+ if return_intermediate:
1165
+ assert method in ['multistep', 'singlestep', 'singlestep_fixed'], "Cannot use adaptive solver when saving intermediate values"
1166
+ if self.correcting_xt_fn is not None:
1167
+ assert method in ['multistep', 'singlestep', 'singlestep_fixed'], "Cannot use adaptive solver when correcting_xt_fn is not None"
1168
+ device = x.device
1169
+ intermediates = []
1170
+ with torch.no_grad():
1171
+ if method == 'adaptive':
1172
+ x = self.dpm_solver_adaptive(x, order=order, t_T=t_T, t_0=t_0, atol=atol, rtol=rtol, solver_type=solver_type)
1173
+ elif method == 'multistep':
1174
+ assert steps >= order
1175
+ timesteps = self.get_time_steps(skip_type=skip_type, t_T=t_T, t_0=t_0, N=steps, device=device)
1176
+ assert timesteps.shape[0] - 1 == steps
1177
+ # Init the initial values.
1178
+ step = 0
1179
+ t = timesteps[step]
1180
+ t_prev_list = [t]
1181
+ model_prev_list = [self.model_fn(x, t)]
1182
+ if self.correcting_xt_fn is not None:
1183
+ x = self.correcting_xt_fn(x, t, step)
1184
+ if return_intermediate:
1185
+ intermediates.append(x)
1186
+ # Init the first `order` values by lower order multistep DPM-Solver.
1187
+ for step in range(1, order):
1188
+ t = timesteps[step]
1189
+ x = self.multistep_dpm_solver_update(x, model_prev_list, t_prev_list, t, step, solver_type=solver_type)
1190
+ if self.correcting_xt_fn is not None:
1191
+ x = self.correcting_xt_fn(x, t, step)
1192
+ if return_intermediate:
1193
+ intermediates.append(x)
1194
+ t_prev_list.append(t)
1195
+ model_prev_list.append(self.model_fn(x, t))
1196
+ # Compute the remaining values by `order`-th order multistep DPM-Solver.
1197
+ for step in range(order, steps + 1):
1198
+ t = timesteps[step]
1199
+ # We only use lower order for steps < 10
1200
+ if lower_order_final and steps < 10:
1201
+ step_order = min(order, steps + 1 - step)
1202
+ else:
1203
+ step_order = order
1204
+ x = self.multistep_dpm_solver_update(x, model_prev_list, t_prev_list, t, step_order, solver_type=solver_type)
1205
+ if self.correcting_xt_fn is not None:
1206
+ x = self.correcting_xt_fn(x, t, step)
1207
+ if return_intermediate:
1208
+ intermediates.append(x)
1209
+ for i in range(order - 1):
1210
+ t_prev_list[i] = t_prev_list[i + 1]
1211
+ model_prev_list[i] = model_prev_list[i + 1]
1212
+ t_prev_list[-1] = t
1213
+ # We do not need to evaluate the final model value.
1214
+ if step < steps:
1215
+ model_prev_list[-1] = self.model_fn(x, t)
1216
+ elif method in ['singlestep', 'singlestep_fixed']:
1217
+ if method == 'singlestep':
1218
+ timesteps_outer, orders = self.get_orders_and_timesteps_for_singlestep_solver(steps=steps, order=order, skip_type=skip_type, t_T=t_T, t_0=t_0, device=device)
1219
+ elif method == 'singlestep_fixed':
1220
+ K = steps // order
1221
+ orders = [order,] * K
1222
+ timesteps_outer = self.get_time_steps(skip_type=skip_type, t_T=t_T, t_0=t_0, N=K, device=device)
1223
+ for step, order in enumerate(orders):
1224
+ s, t = timesteps_outer[step], timesteps_outer[step + 1]
1225
+ timesteps_inner = self.get_time_steps(skip_type=skip_type, t_T=s.item(), t_0=t.item(), N=order, device=device)
1226
+ lambda_inner = self.noise_schedule.marginal_lambda(timesteps_inner)
1227
+ h = lambda_inner[-1] - lambda_inner[0]
1228
+ r1 = None if order <= 1 else (lambda_inner[1] - lambda_inner[0]) / h
1229
+ r2 = None if order <= 2 else (lambda_inner[2] - lambda_inner[0]) / h
1230
+ x = self.singlestep_dpm_solver_update(x, s, t, order, solver_type=solver_type, r1=r1, r2=r2)
1231
+ if self.correcting_xt_fn is not None:
1232
+ x = self.correcting_xt_fn(x, t, step)
1233
+ if return_intermediate:
1234
+ intermediates.append(x)
1235
+ else:
1236
+ raise ValueError("Got wrong method {}".format(method))
1237
+ if denoise_to_zero:
1238
+ t = torch.ones((1,)).to(device) * t_0
1239
+ x = self.denoise_to_zero_fn(x, t)
1240
+ if self.correcting_xt_fn is not None:
1241
+ x = self.correcting_xt_fn(x, t, step + 1)
1242
+ if return_intermediate:
1243
+ intermediates.append(x)
1244
+ if return_intermediate:
1245
+ return x, intermediates
1246
+ else:
1247
+ return x
1248
+
1249
+
1250
+
1251
+ #############################################################
1252
+ # other utility functions
1253
+ #############################################################
1254
+
1255
+ def interpolate_fn(x, xp, yp):
1256
+ """
1257
+ A piecewise linear function y = f(x), using xp and yp as keypoints.
1258
+ We implement f(x) in a differentiable way (i.e. applicable for autograd).
1259
+ The function f(x) is well-defined for all x-axis. (For x beyond the bounds of xp, we use the outmost points of xp to define the linear function.)
1260
+
1261
+ Args:
1262
+ x: PyTorch tensor with shape [N, C], where N is the batch size, C is the number of channels (we use C = 1 for DPM-Solver).
1263
+ xp: PyTorch tensor with shape [C, K], where K is the number of keypoints.
1264
+ yp: PyTorch tensor with shape [C, K].
1265
+ Returns:
1266
+ The function values f(x), with shape [N, C].
1267
+ """
1268
+ N, K = x.shape[0], xp.shape[1]
1269
+ all_x = torch.cat([x.unsqueeze(2), xp.unsqueeze(0).repeat((N, 1, 1))], dim=2)
1270
+ sorted_all_x, x_indices = torch.sort(all_x, dim=2)
1271
+ x_idx = torch.argmin(x_indices, dim=2)
1272
+ cand_start_idx = x_idx - 1
1273
+ start_idx = torch.where(
1274
+ torch.eq(x_idx, 0),
1275
+ torch.tensor(1, device=x.device),
1276
+ torch.where(
1277
+ torch.eq(x_idx, K), torch.tensor(K - 2, device=x.device), cand_start_idx,
1278
+ ),
1279
+ )
1280
+ end_idx = torch.where(torch.eq(start_idx, cand_start_idx), start_idx + 2, start_idx + 1)
1281
+ start_x = torch.gather(sorted_all_x, dim=2, index=start_idx.unsqueeze(2)).squeeze(2)
1282
+ end_x = torch.gather(sorted_all_x, dim=2, index=end_idx.unsqueeze(2)).squeeze(2)
1283
+ start_idx2 = torch.where(
1284
+ torch.eq(x_idx, 0),
1285
+ torch.tensor(0, device=x.device),
1286
+ torch.where(
1287
+ torch.eq(x_idx, K), torch.tensor(K - 2, device=x.device), cand_start_idx,
1288
+ ),
1289
+ )
1290
+ y_positions_expanded = yp.unsqueeze(0).expand(N, -1, -1)
1291
+ start_y = torch.gather(y_positions_expanded, dim=2, index=start_idx2.unsqueeze(2)).squeeze(2)
1292
+ end_y = torch.gather(y_positions_expanded, dim=2, index=(start_idx2 + 1).unsqueeze(2)).squeeze(2)
1293
+ cand = start_y + (x - start_x) * (end_y - start_y) / (end_x - start_x)
1294
+ return cand
1295
+
1296
+
1297
+ def expand_dims(v, dims):
1298
+ """
1299
+ Expand the tensor `v` to the dim `dims`.
1300
+
1301
+ Args:
1302
+ `v`: a PyTorch tensor with shape [N].
1303
+ `dim`: a `int`.
1304
+ Returns:
1305
+ a PyTorch tensor with shape [N, 1, 1, ..., 1] and the total dimension is `dims`.
1306
+ """
1307
+ return v[(...,) + (None,)*(dims - 1)]
diffusion/how to export onnx.md ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ - Open [onnx_export](onnx_export.py)
2
+ - project_name = "dddsp" change "project_name" to your project name
3
+ - model_path = f'{project_name}/model_500000.pt' change "model_path" to your model path
4
+ - Run
diffusion/infer_gt_mel.py ADDED
@@ -0,0 +1,74 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn.functional as F
3
+
4
+ from diffusion.unit2mel import load_model_vocoder
5
+
6
+
7
+ class DiffGtMel:
8
+ def __init__(self, project_path=None, device=None):
9
+ self.project_path = project_path
10
+ if device is not None:
11
+ self.device = device
12
+ else:
13
+ self.device = 'cuda' if torch.cuda.is_available() else 'cpu'
14
+ self.model = None
15
+ self.vocoder = None
16
+ self.args = None
17
+
18
+ def flush_model(self, project_path, ddsp_config=None):
19
+ if (self.model is None) or (project_path != self.project_path):
20
+ model, vocoder, args = load_model_vocoder(project_path, device=self.device)
21
+ if self.check_args(ddsp_config, args):
22
+ self.model = model
23
+ self.vocoder = vocoder
24
+ self.args = args
25
+
26
+ def check_args(self, args1, args2):
27
+ if args1.data.block_size != args2.data.block_size:
28
+ raise ValueError("DDSP与DIFF模型的block_size不一致")
29
+ if args1.data.sampling_rate != args2.data.sampling_rate:
30
+ raise ValueError("DDSP与DIFF模型的sampling_rate不一致")
31
+ if args1.data.encoder != args2.data.encoder:
32
+ raise ValueError("DDSP与DIFF模型的encoder不一致")
33
+ return True
34
+
35
+ def __call__(self, audio, f0, hubert, volume, acc=1, spk_id=1, k_step=0, method='pndm',
36
+ spk_mix_dict=None, start_frame=0):
37
+ input_mel = self.vocoder.extract(audio, self.args.data.sampling_rate)
38
+ out_mel = self.model(
39
+ hubert,
40
+ f0,
41
+ volume,
42
+ spk_id=spk_id,
43
+ spk_mix_dict=spk_mix_dict,
44
+ gt_spec=input_mel,
45
+ infer=True,
46
+ infer_speedup=acc,
47
+ method=method,
48
+ k_step=k_step,
49
+ use_tqdm=False)
50
+ if start_frame > 0:
51
+ out_mel = out_mel[:, start_frame:, :]
52
+ f0 = f0[:, start_frame:, :]
53
+ output = self.vocoder.infer(out_mel, f0)
54
+ if start_frame > 0:
55
+ output = F.pad(output, (start_frame * self.vocoder.vocoder_hop_size, 0))
56
+ return output
57
+
58
+ def infer(self, audio, f0, hubert, volume, acc=1, spk_id=1, k_step=0, method='pndm', silence_front=0,
59
+ use_silence=False, spk_mix_dict=None):
60
+ start_frame = int(silence_front * self.vocoder.vocoder_sample_rate / self.vocoder.vocoder_hop_size)
61
+ if use_silence:
62
+ audio = audio[:, start_frame * self.vocoder.vocoder_hop_size:]
63
+ f0 = f0[:, start_frame:, :]
64
+ hubert = hubert[:, start_frame:, :]
65
+ volume = volume[:, start_frame:, :]
66
+ _start_frame = 0
67
+ else:
68
+ _start_frame = start_frame
69
+ audio = self.__call__(audio, f0, hubert, volume, acc=acc, spk_id=spk_id, k_step=k_step,
70
+ method=method, spk_mix_dict=spk_mix_dict, start_frame=_start_frame)
71
+ if use_silence:
72
+ if start_frame > 0:
73
+ audio = F.pad(audio, (start_frame * self.vocoder.vocoder_hop_size, 0))
74
+ return audio
diffusion/logger/__init__.py ADDED
File without changes
diffusion/logger/__pycache__/__init__.cpython-38.pyc ADDED
Binary file (134 Bytes). View file
 
diffusion/logger/__pycache__/saver.cpython-38.pyc ADDED
Binary file (4 kB). View file
 
diffusion/logger/__pycache__/utils.cpython-38.pyc ADDED
Binary file (3.8 kB). View file
 
diffusion/logger/saver.py ADDED
@@ -0,0 +1,145 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ '''
2
+ author: wayn391@mastertones
3
+ '''
4
+
5
+ import datetime
6
+ import os
7
+ import time
8
+
9
+ import matplotlib.pyplot as plt
10
+ import torch
11
+ import yaml
12
+ from torch.utils.tensorboard import SummaryWriter
13
+
14
+
15
+ class Saver(object):
16
+ def __init__(
17
+ self,
18
+ args,
19
+ initial_global_step=-1):
20
+
21
+ self.expdir = args.env.expdir
22
+ self.sample_rate = args.data.sampling_rate
23
+
24
+ # cold start
25
+ self.global_step = initial_global_step
26
+ self.init_time = time.time()
27
+ self.last_time = time.time()
28
+
29
+ # makedirs
30
+ os.makedirs(self.expdir, exist_ok=True)
31
+
32
+ # path
33
+ self.path_log_info = os.path.join(self.expdir, 'log_info.txt')
34
+
35
+ # ckpt
36
+ os.makedirs(self.expdir, exist_ok=True)
37
+
38
+ # writer
39
+ self.writer = SummaryWriter(os.path.join(self.expdir, 'logs'))
40
+
41
+ # save config
42
+ path_config = os.path.join(self.expdir, 'config.yaml')
43
+ with open(path_config, "w") as out_config:
44
+ yaml.dump(dict(args), out_config)
45
+
46
+
47
+ def log_info(self, msg):
48
+ '''log method'''
49
+ if isinstance(msg, dict):
50
+ msg_list = []
51
+ for k, v in msg.items():
52
+ tmp_str = ''
53
+ if isinstance(v, int):
54
+ tmp_str = '{}: {:,}'.format(k, v)
55
+ else:
56
+ tmp_str = '{}: {}'.format(k, v)
57
+
58
+ msg_list.append(tmp_str)
59
+ msg_str = '\n'.join(msg_list)
60
+ else:
61
+ msg_str = msg
62
+
63
+ # dsplay
64
+ print(msg_str)
65
+
66
+ # save
67
+ with open(self.path_log_info, 'a') as fp:
68
+ fp.write(msg_str+'\n')
69
+
70
+ def log_value(self, dict):
71
+ for k, v in dict.items():
72
+ self.writer.add_scalar(k, v, self.global_step)
73
+
74
+ def log_spec(self, name, spec, spec_out, vmin=-14, vmax=3.5):
75
+ spec_cat = torch.cat([(spec_out - spec).abs() + vmin, spec, spec_out], -1)
76
+ spec = spec_cat[0]
77
+ if isinstance(spec, torch.Tensor):
78
+ spec = spec.cpu().numpy()
79
+ fig = plt.figure(figsize=(12, 9))
80
+ plt.pcolor(spec.T, vmin=vmin, vmax=vmax)
81
+ plt.tight_layout()
82
+ self.writer.add_figure(name, fig, self.global_step)
83
+
84
+ def log_audio(self, dict):
85
+ for k, v in dict.items():
86
+ self.writer.add_audio(k, v, global_step=self.global_step, sample_rate=self.sample_rate)
87
+
88
+ def get_interval_time(self, update=True):
89
+ cur_time = time.time()
90
+ time_interval = cur_time - self.last_time
91
+ if update:
92
+ self.last_time = cur_time
93
+ return time_interval
94
+
95
+ def get_total_time(self, to_str=True):
96
+ total_time = time.time() - self.init_time
97
+ if to_str:
98
+ total_time = str(datetime.timedelta(
99
+ seconds=total_time))[:-5]
100
+ return total_time
101
+
102
+ def save_model(
103
+ self,
104
+ model,
105
+ optimizer,
106
+ name='model',
107
+ postfix='',
108
+ to_json=False):
109
+ # path
110
+ if postfix:
111
+ postfix = '_' + postfix
112
+ path_pt = os.path.join(
113
+ self.expdir , name+postfix+'.pt')
114
+
115
+ # check
116
+ print(' [*] model checkpoint saved: {}'.format(path_pt))
117
+
118
+ # save
119
+ if optimizer is not None:
120
+ torch.save({
121
+ 'global_step': self.global_step,
122
+ 'model': model.state_dict(),
123
+ 'optimizer': optimizer.state_dict()}, path_pt)
124
+ else:
125
+ torch.save({
126
+ 'global_step': self.global_step,
127
+ 'model': model.state_dict()}, path_pt)
128
+
129
+
130
+ def delete_model(self, name='model', postfix=''):
131
+ # path
132
+ if postfix:
133
+ postfix = '_' + postfix
134
+ path_pt = os.path.join(
135
+ self.expdir , name+postfix+'.pt')
136
+
137
+ # delete
138
+ if os.path.exists(path_pt):
139
+ os.remove(path_pt)
140
+ print(' [*] model checkpoint deleted: {}'.format(path_pt))
141
+
142
+ def global_step_increment(self):
143
+ self.global_step += 1
144
+
145
+
diffusion/logger/utils.py ADDED
@@ -0,0 +1,127 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import os
3
+
4
+ import torch
5
+ import yaml
6
+
7
+
8
+ def traverse_dir(
9
+ root_dir,
10
+ extensions,
11
+ amount=None,
12
+ str_include=None,
13
+ str_exclude=None,
14
+ is_pure=False,
15
+ is_sort=False,
16
+ is_ext=True):
17
+
18
+ file_list = []
19
+ cnt = 0
20
+ for root, _, files in os.walk(root_dir):
21
+ for file in files:
22
+ if any([file.endswith(f".{ext}") for ext in extensions]):
23
+ # path
24
+ mix_path = os.path.join(root, file)
25
+ pure_path = mix_path[len(root_dir)+1:] if is_pure else mix_path
26
+
27
+ # amount
28
+ if (amount is not None) and (cnt == amount):
29
+ if is_sort:
30
+ file_list.sort()
31
+ return file_list
32
+
33
+ # check string
34
+ if (str_include is not None) and (str_include not in pure_path):
35
+ continue
36
+ if (str_exclude is not None) and (str_exclude in pure_path):
37
+ continue
38
+
39
+ if not is_ext:
40
+ ext = pure_path.split('.')[-1]
41
+ pure_path = pure_path[:-(len(ext)+1)]
42
+ file_list.append(pure_path)
43
+ cnt += 1
44
+ if is_sort:
45
+ file_list.sort()
46
+ return file_list
47
+
48
+
49
+
50
+ class DotDict(dict):
51
+ def __getattr__(*args):
52
+ val = dict.get(*args)
53
+ return DotDict(val) if type(val) is dict else val
54
+
55
+ __setattr__ = dict.__setitem__
56
+ __delattr__ = dict.__delitem__
57
+
58
+
59
+ def get_network_paras_amount(model_dict):
60
+ info = dict()
61
+ for model_name, model in model_dict.items():
62
+ # all_params = sum(p.numel() for p in model.parameters())
63
+ trainable_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
64
+
65
+ info[model_name] = trainable_params
66
+ return info
67
+
68
+
69
+ def load_config(path_config):
70
+ with open(path_config, "r") as config:
71
+ args = yaml.safe_load(config)
72
+ args = DotDict(args)
73
+ # print(args)
74
+ return args
75
+
76
+ def save_config(path_config,config):
77
+ config = dict(config)
78
+ with open(path_config, "w") as f:
79
+ yaml.dump(config, f)
80
+
81
+ def to_json(path_params, path_json):
82
+ params = torch.load(path_params, map_location=torch.device('cpu'))
83
+ raw_state_dict = {}
84
+ for k, v in params.items():
85
+ val = v.flatten().numpy().tolist()
86
+ raw_state_dict[k] = val
87
+
88
+ with open(path_json, 'w') as outfile:
89
+ json.dump(raw_state_dict, outfile,indent= "\t")
90
+
91
+
92
+ def convert_tensor_to_numpy(tensor, is_squeeze=True):
93
+ if is_squeeze:
94
+ tensor = tensor.squeeze()
95
+ if tensor.requires_grad:
96
+ tensor = tensor.detach()
97
+ if tensor.is_cuda:
98
+ tensor = tensor.cpu()
99
+ return tensor.numpy()
100
+
101
+
102
+ def load_model(
103
+ expdir,
104
+ model,
105
+ optimizer,
106
+ name='model',
107
+ postfix='',
108
+ device='cpu'):
109
+ if postfix == '':
110
+ postfix = '_' + postfix
111
+ path = os.path.join(expdir, name+postfix)
112
+ path_pt = traverse_dir(expdir, ['pt'], is_ext=False)
113
+ global_step = 0
114
+ if len(path_pt) > 0:
115
+ steps = [s[len(path):] for s in path_pt]
116
+ maxstep = max([int(s) if s.isdigit() else 0 for s in steps])
117
+ if maxstep >= 0:
118
+ path_pt = path+str(maxstep)+'.pt'
119
+ else:
120
+ path_pt = path+'best.pt'
121
+ print(' [*] restoring model from', path_pt)
122
+ ckpt = torch.load(path_pt, map_location=torch.device(device))
123
+ global_step = ckpt['global_step']
124
+ model.load_state_dict(ckpt['model'], strict=False)
125
+ if ckpt.get("optimizer") is not None:
126
+ optimizer.load_state_dict(ckpt['optimizer'])
127
+ return global_step, model, optimizer
diffusion/onnx_export.py ADDED
@@ -0,0 +1,235 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+
3
+ import numpy as np
4
+ import torch
5
+ import torch.nn as nn
6
+ import torch.nn.functional as F
7
+ import yaml
8
+ from diffusion_onnx import GaussianDiffusion
9
+
10
+
11
+ class DotDict(dict):
12
+ def __getattr__(*args):
13
+ val = dict.get(*args)
14
+ return DotDict(val) if type(val) is dict else val
15
+
16
+ __setattr__ = dict.__setitem__
17
+ __delattr__ = dict.__delitem__
18
+
19
+
20
+ def load_model_vocoder(
21
+ model_path,
22
+ device='cpu'):
23
+ config_file = os.path.join(os.path.split(model_path)[0], 'config.yaml')
24
+ with open(config_file, "r") as config:
25
+ args = yaml.safe_load(config)
26
+ args = DotDict(args)
27
+
28
+ # load model
29
+ model = Unit2Mel(
30
+ args.data.encoder_out_channels,
31
+ args.model.n_spk,
32
+ args.model.use_pitch_aug,
33
+ 128,
34
+ args.model.n_layers,
35
+ args.model.n_chans,
36
+ args.model.n_hidden,
37
+ args.model.timesteps,
38
+ args.model.k_step_max)
39
+
40
+ print(' [Loading] ' + model_path)
41
+ ckpt = torch.load(model_path, map_location=torch.device(device))
42
+ model.to(device)
43
+ model.load_state_dict(ckpt['model'])
44
+ model.eval()
45
+ return model, args
46
+
47
+
48
+ class Unit2Mel(nn.Module):
49
+ def __init__(
50
+ self,
51
+ input_channel,
52
+ n_spk,
53
+ use_pitch_aug=False,
54
+ out_dims=128,
55
+ n_layers=20,
56
+ n_chans=384,
57
+ n_hidden=256,
58
+ timesteps=1000,
59
+ k_step_max=1000):
60
+ super().__init__()
61
+
62
+ self.unit_embed = nn.Linear(input_channel, n_hidden)
63
+ self.f0_embed = nn.Linear(1, n_hidden)
64
+ self.volume_embed = nn.Linear(1, n_hidden)
65
+ if use_pitch_aug:
66
+ self.aug_shift_embed = nn.Linear(1, n_hidden, bias=False)
67
+ else:
68
+ self.aug_shift_embed = None
69
+ self.n_spk = n_spk
70
+ if n_spk is not None and n_spk > 1:
71
+ self.spk_embed = nn.Embedding(n_spk, n_hidden)
72
+
73
+ self.timesteps = timesteps if timesteps is not None else 1000
74
+ self.k_step_max = k_step_max if k_step_max is not None and k_step_max>0 and k_step_max<self.timesteps else self.timesteps
75
+
76
+
77
+ # diffusion
78
+ self.decoder = GaussianDiffusion(out_dims, n_layers, n_chans, n_hidden,self.timesteps,self.k_step_max)
79
+ self.hidden_size = n_hidden
80
+ self.speaker_map = torch.zeros((self.n_spk,1,1,n_hidden))
81
+
82
+
83
+
84
+ def forward(self, units, mel2ph, f0, volume, g = None):
85
+
86
+ '''
87
+ input:
88
+ B x n_frames x n_unit
89
+ return:
90
+ dict of B x n_frames x feat
91
+ '''
92
+
93
+ decoder_inp = F.pad(units, [0, 0, 1, 0])
94
+ mel2ph_ = mel2ph.unsqueeze(2).repeat([1, 1, units.shape[-1]])
95
+ units = torch.gather(decoder_inp, 1, mel2ph_) # [B, T, H]
96
+
97
+ x = self.unit_embed(units) + self.f0_embed((1 + f0.unsqueeze(-1) / 700).log()) + self.volume_embed(volume.unsqueeze(-1))
98
+
99
+ if self.n_spk is not None and self.n_spk > 1: # [N, S] * [S, B, 1, H]
100
+ g = g.reshape((g.shape[0], g.shape[1], 1, 1, 1)) # [N, S, B, 1, 1]
101
+ g = g * self.speaker_map # [N, S, B, 1, H]
102
+ g = torch.sum(g, dim=1) # [N, 1, B, 1, H]
103
+ g = g.transpose(0, -1).transpose(0, -2).squeeze(0) # [B, H, N]
104
+ x = x.transpose(1, 2) + g
105
+ return x
106
+ else:
107
+ return x.transpose(1, 2)
108
+
109
+
110
+ def init_spkembed(self, units, f0, volume, spk_id = None, spk_mix_dict = None, aug_shift = None,
111
+ gt_spec=None, infer=True, infer_speedup=10, method='dpm-solver', k_step=300, use_tqdm=True):
112
+
113
+ '''
114
+ input:
115
+ B x n_frames x n_unit
116
+ return:
117
+ dict of B x n_frames x feat
118
+ '''
119
+ x = self.unit_embed(units) + self.f0_embed((1+ f0 / 700).log()) + self.volume_embed(volume)
120
+ if self.n_spk is not None and self.n_spk > 1:
121
+ if spk_mix_dict is not None:
122
+ spk_embed_mix = torch.zeros((1,1,self.hidden_size))
123
+ for k, v in spk_mix_dict.items():
124
+ spk_id_torch = torch.LongTensor(np.array([[k]])).to(units.device)
125
+ spk_embeddd = self.spk_embed(spk_id_torch)
126
+ self.speaker_map[k] = spk_embeddd
127
+ spk_embed_mix = spk_embed_mix + v * spk_embeddd
128
+ x = x + spk_embed_mix
129
+ else:
130
+ x = x + self.spk_embed(spk_id - 1)
131
+ self.speaker_map = self.speaker_map.unsqueeze(0)
132
+ self.speaker_map = self.speaker_map.detach()
133
+ return x.transpose(1, 2)
134
+
135
+ def OnnxExport(self, project_name=None, init_noise=None, export_encoder=True, export_denoise=True, export_pred=True, export_after=True):
136
+ hubert_hidden_size = 768
137
+ n_frames = 100
138
+ hubert = torch.randn((1, n_frames, hubert_hidden_size))
139
+ mel2ph = torch.arange(end=n_frames).unsqueeze(0).long()
140
+ f0 = torch.randn((1, n_frames))
141
+ volume = torch.randn((1, n_frames))
142
+ spk_mix = []
143
+ spks = {}
144
+ if self.n_spk is not None and self.n_spk > 1:
145
+ for i in range(self.n_spk):
146
+ spk_mix.append(1.0/float(self.n_spk))
147
+ spks.update({i:1.0/float(self.n_spk)})
148
+ spk_mix = torch.tensor(spk_mix)
149
+ spk_mix = spk_mix.repeat(n_frames, 1)
150
+ self.init_spkembed(hubert, f0.unsqueeze(-1), volume.unsqueeze(-1), spk_mix_dict=spks)
151
+ self.forward(hubert, mel2ph, f0, volume, spk_mix)
152
+ if export_encoder:
153
+ torch.onnx.export(
154
+ self,
155
+ (hubert, mel2ph, f0, volume, spk_mix),
156
+ f"{project_name}_encoder.onnx",
157
+ input_names=["hubert", "mel2ph", "f0", "volume", "spk_mix"],
158
+ output_names=["mel_pred"],
159
+ dynamic_axes={
160
+ "hubert": [1],
161
+ "f0": [1],
162
+ "volume": [1],
163
+ "mel2ph": [1],
164
+ "spk_mix": [0],
165
+ },
166
+ opset_version=16
167
+ )
168
+
169
+ self.decoder.OnnxExport(project_name, init_noise=init_noise, export_denoise=export_denoise, export_pred=export_pred, export_after=export_after)
170
+
171
+ def ExportOnnx(self, project_name=None):
172
+ hubert_hidden_size = 768
173
+ n_frames = 100
174
+ hubert = torch.randn((1, n_frames, hubert_hidden_size))
175
+ mel2ph = torch.arange(end=n_frames).unsqueeze(0).long()
176
+ f0 = torch.randn((1, n_frames))
177
+ volume = torch.randn((1, n_frames))
178
+ spk_mix = []
179
+ spks = {}
180
+ if self.n_spk is not None and self.n_spk > 1:
181
+ for i in range(self.n_spk):
182
+ spk_mix.append(1.0/float(self.n_spk))
183
+ spks.update({i:1.0/float(self.n_spk)})
184
+ spk_mix = torch.tensor(spk_mix)
185
+ self.orgforward(hubert, f0.unsqueeze(-1), volume.unsqueeze(-1), spk_mix_dict=spks)
186
+ self.forward(hubert, mel2ph, f0, volume, spk_mix)
187
+
188
+ torch.onnx.export(
189
+ self,
190
+ (hubert, mel2ph, f0, volume, spk_mix),
191
+ f"{project_name}_encoder.onnx",
192
+ input_names=["hubert", "mel2ph", "f0", "volume", "spk_mix"],
193
+ output_names=["mel_pred"],
194
+ dynamic_axes={
195
+ "hubert": [1],
196
+ "f0": [1],
197
+ "volume": [1],
198
+ "mel2ph": [1]
199
+ },
200
+ opset_version=16
201
+ )
202
+
203
+ condition = torch.randn(1,self.decoder.n_hidden,n_frames)
204
+ noise = torch.randn((1, 1, self.decoder.mel_bins, condition.shape[2]), dtype=torch.float32)
205
+ pndm_speedup = torch.LongTensor([100])
206
+ K_steps = torch.LongTensor([1000])
207
+ self.decoder = torch.jit.script(self.decoder)
208
+ self.decoder(condition, noise, pndm_speedup, K_steps)
209
+
210
+ torch.onnx.export(
211
+ self.decoder,
212
+ (condition, noise, pndm_speedup, K_steps),
213
+ f"{project_name}_diffusion.onnx",
214
+ input_names=["condition", "noise", "pndm_speedup", "K_steps"],
215
+ output_names=["mel"],
216
+ dynamic_axes={
217
+ "condition": [2],
218
+ "noise": [3],
219
+ },
220
+ opset_version=16
221
+ )
222
+
223
+
224
+ if __name__ == "__main__":
225
+ project_name = "dddsp"
226
+ model_path = f'{project_name}/model_500000.pt'
227
+
228
+ model, _ = load_model_vocoder(model_path)
229
+
230
+ # 分开Diffusion导出(需要使用MoeSS/MoeVoiceStudio或者自己编写Pndm/Dpm采样)
231
+ model.OnnxExport(project_name, export_encoder=True, export_denoise=True, export_pred=True, export_after=True)
232
+
233
+ # 合并Diffusion导出(Encoder和Diffusion分开,直接将Encoder的结果和初始噪声输入Diffusion即可)
234
+ # model.ExportOnnx(project_name)
235
+
diffusion/solver.py ADDED
@@ -0,0 +1,200 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import time
2
+
3
+ import librosa
4
+ import numpy as np
5
+ import torch
6
+ from torch import autocast
7
+ from torch.cuda.amp import GradScaler
8
+
9
+ from diffusion.logger import utils
10
+ from diffusion.logger.saver import Saver
11
+
12
+
13
+ def test(args, model, vocoder, loader_test, saver):
14
+ print(' [*] testing...')
15
+ model.eval()
16
+
17
+ # losses
18
+ test_loss = 0.
19
+
20
+ # intialization
21
+ num_batches = len(loader_test)
22
+ rtf_all = []
23
+
24
+ # run
25
+ with torch.no_grad():
26
+ for bidx, data in enumerate(loader_test):
27
+ fn = data['name'][0].split("/")[-1]
28
+ speaker = data['name'][0].split("/")[-2]
29
+ print('--------')
30
+ print('{}/{} - {}'.format(bidx, num_batches, fn))
31
+
32
+ # unpack data
33
+ for k in data.keys():
34
+ if not k.startswith('name'):
35
+ data[k] = data[k].to(args.device)
36
+ print('>>', data['name'][0])
37
+
38
+ # forward
39
+ st_time = time.time()
40
+ mel = model(
41
+ data['units'],
42
+ data['f0'],
43
+ data['volume'],
44
+ data['spk_id'],
45
+ gt_spec=None if model.k_step_max == model.timesteps else data['mel'],
46
+ infer=True,
47
+ infer_speedup=args.infer.speedup,
48
+ method=args.infer.method,
49
+ k_step=model.k_step_max
50
+ )
51
+ signal = vocoder.infer(mel, data['f0'])
52
+ ed_time = time.time()
53
+
54
+ # RTF
55
+ run_time = ed_time - st_time
56
+ song_time = signal.shape[-1] / args.data.sampling_rate
57
+ rtf = run_time / song_time
58
+ print('RTF: {} | {} / {}'.format(rtf, run_time, song_time))
59
+ rtf_all.append(rtf)
60
+
61
+ # loss
62
+ for i in range(args.train.batch_size):
63
+ loss = model(
64
+ data['units'],
65
+ data['f0'],
66
+ data['volume'],
67
+ data['spk_id'],
68
+ gt_spec=data['mel'],
69
+ infer=False,
70
+ k_step=model.k_step_max)
71
+ test_loss += loss.item()
72
+
73
+ # log mel
74
+ saver.log_spec(f"{speaker}_{fn}.wav", data['mel'], mel)
75
+
76
+ # log audi
77
+ path_audio = data['name_ext'][0]
78
+ audio, sr = librosa.load(path_audio, sr=args.data.sampling_rate)
79
+ if len(audio.shape) > 1:
80
+ audio = librosa.to_mono(audio)
81
+ audio = torch.from_numpy(audio).unsqueeze(0).to(signal)
82
+ saver.log_audio({f"{speaker}_{fn}_gt.wav": audio,f"{speaker}_{fn}_pred.wav": signal})
83
+ # report
84
+ test_loss /= args.train.batch_size
85
+ test_loss /= num_batches
86
+
87
+ # check
88
+ print(' [test_loss] test_loss:', test_loss)
89
+ print(' Real Time Factor', np.mean(rtf_all))
90
+ return test_loss
91
+
92
+
93
+ def train(args, initial_global_step, model, optimizer, scheduler, vocoder, loader_train, loader_test):
94
+ # saver
95
+ saver = Saver(args, initial_global_step=initial_global_step)
96
+
97
+ # model size
98
+ params_count = utils.get_network_paras_amount({'model': model})
99
+ saver.log_info('--- model size ---')
100
+ saver.log_info(params_count)
101
+
102
+ # run
103
+ num_batches = len(loader_train)
104
+ model.train()
105
+ saver.log_info('======= start training =======')
106
+ scaler = GradScaler()
107
+ if args.train.amp_dtype == 'fp32':
108
+ dtype = torch.float32
109
+ elif args.train.amp_dtype == 'fp16':
110
+ dtype = torch.float16
111
+ elif args.train.amp_dtype == 'bf16':
112
+ dtype = torch.bfloat16
113
+ else:
114
+ raise ValueError(' [x] Unknown amp_dtype: ' + args.train.amp_dtype)
115
+ saver.log_info("epoch|batch_idx/num_batches|output_dir|batch/s|lr|time|step")
116
+ for epoch in range(args.train.epochs):
117
+ for batch_idx, data in enumerate(loader_train):
118
+ saver.global_step_increment()
119
+ optimizer.zero_grad()
120
+
121
+ # unpack data
122
+ for k in data.keys():
123
+ if not k.startswith('name'):
124
+ data[k] = data[k].to(args.device)
125
+
126
+ # forward
127
+ if dtype == torch.float32:
128
+ loss = model(data['units'].float(), data['f0'], data['volume'], data['spk_id'],
129
+ aug_shift = data['aug_shift'], gt_spec=data['mel'].float(), infer=False, k_step=model.k_step_max)
130
+ else:
131
+ with autocast(device_type=args.device, dtype=dtype):
132
+ loss = model(data['units'], data['f0'], data['volume'], data['spk_id'],
133
+ aug_shift = data['aug_shift'], gt_spec=data['mel'], infer=False, k_step=model.k_step_max)
134
+
135
+ # handle nan loss
136
+ if torch.isnan(loss):
137
+ raise ValueError(' [x] nan loss ')
138
+ else:
139
+ # backpropagate
140
+ if dtype == torch.float32:
141
+ loss.backward()
142
+ optimizer.step()
143
+ else:
144
+ scaler.scale(loss).backward()
145
+ scaler.step(optimizer)
146
+ scaler.update()
147
+ scheduler.step()
148
+
149
+ # log loss
150
+ if saver.global_step % args.train.interval_log == 0:
151
+ current_lr = optimizer.param_groups[0]['lr']
152
+ saver.log_info(
153
+ 'epoch: {} | {:3d}/{:3d} | {} | batch/s: {:.2f} | lr: {:.6} | loss: {:.3f} | time: {} | step: {}'.format(
154
+ epoch,
155
+ batch_idx,
156
+ num_batches,
157
+ args.env.expdir,
158
+ args.train.interval_log/saver.get_interval_time(),
159
+ current_lr,
160
+ loss.item(),
161
+ saver.get_total_time(),
162
+ saver.global_step
163
+ )
164
+ )
165
+
166
+ saver.log_value({
167
+ 'train/loss': loss.item()
168
+ })
169
+
170
+ saver.log_value({
171
+ 'train/lr': current_lr
172
+ })
173
+
174
+ # validation
175
+ if saver.global_step % args.train.interval_val == 0:
176
+ optimizer_save = optimizer if args.train.save_opt else None
177
+
178
+ # save latest
179
+ saver.save_model(model, optimizer_save, postfix=f'{saver.global_step}')
180
+ last_val_step = saver.global_step - args.train.interval_val
181
+ if last_val_step % args.train.interval_force_save != 0:
182
+ saver.delete_model(postfix=f'{last_val_step}')
183
+
184
+ # run testing set
185
+ test_loss = test(args, model, vocoder, loader_test, saver)
186
+
187
+ # log loss
188
+ saver.log_info(
189
+ ' --- <validation> --- \nloss: {:.3f}. '.format(
190
+ test_loss,
191
+ )
192
+ )
193
+
194
+ saver.log_value({
195
+ 'validation/loss': test_loss
196
+ })
197
+
198
+ model.train()
199
+
200
+
diffusion/uni_pc.py ADDED
@@ -0,0 +1,733 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+
3
+ import torch
4
+
5
+
6
+ class NoiseScheduleVP:
7
+ def __init__(
8
+ self,
9
+ schedule='discrete',
10
+ betas=None,
11
+ alphas_cumprod=None,
12
+ continuous_beta_0=0.1,
13
+ continuous_beta_1=20.,
14
+ dtype=torch.float32,
15
+ ):
16
+ """Create a wrapper class for the forward SDE (VP type).
17
+ ***
18
+ Update: We support discrete-time diffusion models by implementing a picewise linear interpolation for log_alpha_t.
19
+ We recommend to use schedule='discrete' for the discrete-time diffusion models, especially for high-resolution images.
20
+ ***
21
+ The forward SDE ensures that the condition distribution q_{t|0}(x_t | x_0) = N ( alpha_t * x_0, sigma_t^2 * I ).
22
+ We further define lambda_t = log(alpha_t) - log(sigma_t), which is the half-logSNR (described in the DPM-Solver paper).
23
+ Therefore, we implement the functions for computing alpha_t, sigma_t and lambda_t. For t in [0, T], we have:
24
+ log_alpha_t = self.marginal_log_mean_coeff(t)
25
+ sigma_t = self.marginal_std(t)
26
+ lambda_t = self.marginal_lambda(t)
27
+ Moreover, as lambda(t) is an invertible function, we also support its inverse function:
28
+ t = self.inverse_lambda(lambda_t)
29
+ ===============================================================
30
+ We support both discrete-time DPMs (trained on n = 0, 1, ..., N-1) and continuous-time DPMs (trained on t in [t_0, T]).
31
+ 1. For discrete-time DPMs:
32
+ For discrete-time DPMs trained on n = 0, 1, ..., N-1, we convert the discrete steps to continuous time steps by:
33
+ t_i = (i + 1) / N
34
+ e.g. for N = 1000, we have t_0 = 1e-3 and T = t_{N-1} = 1.
35
+ We solve the corresponding diffusion ODE from time T = 1 to time t_0 = 1e-3.
36
+ Args:
37
+ betas: A `torch.Tensor`. The beta array for the discrete-time DPM. (See the original DDPM paper for details)
38
+ alphas_cumprod: A `torch.Tensor`. The cumprod alphas for the discrete-time DPM. (See the original DDPM paper for details)
39
+ Note that we always have alphas_cumprod = cumprod(1 - betas). Therefore, we only need to set one of `betas` and `alphas_cumprod`.
40
+ **Important**: Please pay special attention for the args for `alphas_cumprod`:
41
+ The `alphas_cumprod` is the \hat{alpha_n} arrays in the notations of DDPM. Specifically, DDPMs assume that
42
+ q_{t_n | 0}(x_{t_n} | x_0) = N ( \sqrt{\hat{alpha_n}} * x_0, (1 - \hat{alpha_n}) * I ).
43
+ Therefore, the notation \hat{alpha_n} is different from the notation alpha_t in DPM-Solver. In fact, we have
44
+ alpha_{t_n} = \sqrt{\hat{alpha_n}},
45
+ and
46
+ log(alpha_{t_n}) = 0.5 * log(\hat{alpha_n}).
47
+ 2. For continuous-time DPMs:
48
+ We support two types of VPSDEs: linear (DDPM) and cosine (improved-DDPM). The hyperparameters for the noise
49
+ schedule are the default settings in DDPM and improved-DDPM:
50
+ Args:
51
+ beta_min: A `float` number. The smallest beta for the linear schedule.
52
+ beta_max: A `float` number. The largest beta for the linear schedule.
53
+ cosine_s: A `float` number. The hyperparameter in the cosine schedule.
54
+ cosine_beta_max: A `float` number. The hyperparameter in the cosine schedule.
55
+ T: A `float` number. The ending time of the forward process.
56
+ ===============================================================
57
+ Args:
58
+ schedule: A `str`. The noise schedule of the forward SDE. 'discrete' for discrete-time DPMs,
59
+ 'linear' or 'cosine' for continuous-time DPMs.
60
+ Returns:
61
+ A wrapper object of the forward SDE (VP type).
62
+
63
+ ===============================================================
64
+ Example:
65
+ # For discrete-time DPMs, given betas (the beta array for n = 0, 1, ..., N - 1):
66
+ >>> ns = NoiseScheduleVP('discrete', betas=betas)
67
+ # For discrete-time DPMs, given alphas_cumprod (the \hat{alpha_n} array for n = 0, 1, ..., N - 1):
68
+ >>> ns = NoiseScheduleVP('discrete', alphas_cumprod=alphas_cumprod)
69
+ # For continuous-time DPMs (VPSDE), linear schedule:
70
+ >>> ns = NoiseScheduleVP('linear', continuous_beta_0=0.1, continuous_beta_1=20.)
71
+ """
72
+
73
+ if schedule not in ['discrete', 'linear', 'cosine']:
74
+ raise ValueError("Unsupported noise schedule {}. The schedule needs to be 'discrete' or 'linear' or 'cosine'".format(schedule))
75
+
76
+ self.schedule = schedule
77
+ if schedule == 'discrete':
78
+ if betas is not None:
79
+ log_alphas = 0.5 * torch.log(1 - betas).cumsum(dim=0)
80
+ else:
81
+ assert alphas_cumprod is not None
82
+ log_alphas = 0.5 * torch.log(alphas_cumprod)
83
+ self.total_N = len(log_alphas)
84
+ self.T = 1.
85
+ self.t_array = torch.linspace(0., 1., self.total_N + 1)[1:].reshape((1, -1)).to(dtype=dtype)
86
+ self.log_alpha_array = log_alphas.reshape((1, -1,)).to(dtype=dtype)
87
+ else:
88
+ self.total_N = 1000
89
+ self.beta_0 = continuous_beta_0
90
+ self.beta_1 = continuous_beta_1
91
+ self.cosine_s = 0.008
92
+ self.cosine_beta_max = 999.
93
+ self.cosine_t_max = math.atan(self.cosine_beta_max * (1. + self.cosine_s) / math.pi) * 2. * (1. + self.cosine_s) / math.pi - self.cosine_s
94
+ self.cosine_log_alpha_0 = math.log(math.cos(self.cosine_s / (1. + self.cosine_s) * math.pi / 2.))
95
+ self.schedule = schedule
96
+ if schedule == 'cosine':
97
+ # For the cosine schedule, T = 1 will have numerical issues. So we manually set the ending time T.
98
+ # Note that T = 0.9946 may be not the optimal setting. However, we find it works well.
99
+ self.T = 0.9946
100
+ else:
101
+ self.T = 1.
102
+
103
+ def marginal_log_mean_coeff(self, t):
104
+ """
105
+ Compute log(alpha_t) of a given continuous-time label t in [0, T].
106
+ """
107
+ if self.schedule == 'discrete':
108
+ return interpolate_fn(t.reshape((-1, 1)), self.t_array.to(t.device), self.log_alpha_array.to(t.device)).reshape((-1))
109
+ elif self.schedule == 'linear':
110
+ return -0.25 * t ** 2 * (self.beta_1 - self.beta_0) - 0.5 * t * self.beta_0
111
+ elif self.schedule == 'cosine':
112
+ def log_alpha_fn(s):
113
+ return torch.log(torch.cos((s + self.cosine_s) / (1.0 + self.cosine_s) * math.pi / 2.0))
114
+ log_alpha_t = log_alpha_fn(t) - self.cosine_log_alpha_0
115
+ return log_alpha_t
116
+
117
+ def marginal_alpha(self, t):
118
+ """
119
+ Compute alpha_t of a given continuous-time label t in [0, T].
120
+ """
121
+ return torch.exp(self.marginal_log_mean_coeff(t))
122
+
123
+ def marginal_std(self, t):
124
+ """
125
+ Compute sigma_t of a given continuous-time label t in [0, T].
126
+ """
127
+ return torch.sqrt(1. - torch.exp(2. * self.marginal_log_mean_coeff(t)))
128
+
129
+ def marginal_lambda(self, t):
130
+ """
131
+ Compute lambda_t = log(alpha_t) - log(sigma_t) of a given continuous-time label t in [0, T].
132
+ """
133
+ log_mean_coeff = self.marginal_log_mean_coeff(t)
134
+ log_std = 0.5 * torch.log(1. - torch.exp(2. * log_mean_coeff))
135
+ return log_mean_coeff - log_std
136
+
137
+ def inverse_lambda(self, lamb):
138
+ """
139
+ Compute the continuous-time label t in [0, T] of a given half-logSNR lambda_t.
140
+ """
141
+ if self.schedule == 'linear':
142
+ tmp = 2. * (self.beta_1 - self.beta_0) * torch.logaddexp(-2. * lamb, torch.zeros((1,)).to(lamb))
143
+ Delta = self.beta_0**2 + tmp
144
+ return tmp / (torch.sqrt(Delta) + self.beta_0) / (self.beta_1 - self.beta_0)
145
+ elif self.schedule == 'discrete':
146
+ log_alpha = -0.5 * torch.logaddexp(torch.zeros((1,)).to(lamb.device), -2. * lamb)
147
+ t = interpolate_fn(log_alpha.reshape((-1, 1)), torch.flip(self.log_alpha_array.to(lamb.device), [1]), torch.flip(self.t_array.to(lamb.device), [1]))
148
+ return t.reshape((-1,))
149
+ else:
150
+ log_alpha = -0.5 * torch.logaddexp(-2. * lamb, torch.zeros((1,)).to(lamb))
151
+ def t_fn(log_alpha_t):
152
+ return torch.arccos(torch.exp(log_alpha_t + self.cosine_log_alpha_0)) * 2.0 * (1.0 + self.cosine_s) / math.pi - self.cosine_s
153
+ t = t_fn(log_alpha)
154
+ return t
155
+
156
+
157
+ def model_wrapper(
158
+ model,
159
+ noise_schedule,
160
+ model_type="noise",
161
+ model_kwargs={},
162
+ guidance_type="uncond",
163
+ condition=None,
164
+ unconditional_condition=None,
165
+ guidance_scale=1.,
166
+ classifier_fn=None,
167
+ classifier_kwargs={},
168
+ ):
169
+ """Create a wrapper function for the noise prediction model.
170
+ """
171
+
172
+ def get_model_input_time(t_continuous):
173
+ """
174
+ Convert the continuous-time `t_continuous` (in [epsilon, T]) to the model input time.
175
+ For discrete-time DPMs, we convert `t_continuous` in [1 / N, 1] to `t_input` in [0, 1000 * (N - 1) / N].
176
+ For continuous-time DPMs, we just use `t_continuous`.
177
+ """
178
+ if noise_schedule.schedule == 'discrete':
179
+ return (t_continuous - 1. / noise_schedule.total_N) * noise_schedule.total_N
180
+ else:
181
+ return t_continuous
182
+
183
+ def noise_pred_fn(x, t_continuous, cond=None):
184
+ t_input = get_model_input_time(t_continuous)
185
+ if cond is None:
186
+ output = model(x, t_input, **model_kwargs)
187
+ else:
188
+ output = model(x, t_input, cond, **model_kwargs)
189
+ if model_type == "noise":
190
+ return output
191
+ elif model_type == "x_start":
192
+ alpha_t, sigma_t = noise_schedule.marginal_alpha(t_continuous), noise_schedule.marginal_std(t_continuous)
193
+ return (x - alpha_t * output) / sigma_t
194
+ elif model_type == "v":
195
+ alpha_t, sigma_t = noise_schedule.marginal_alpha(t_continuous), noise_schedule.marginal_std(t_continuous)
196
+ return alpha_t * output + sigma_t * x
197
+ elif model_type == "score":
198
+ sigma_t = noise_schedule.marginal_std(t_continuous)
199
+ return -sigma_t * output
200
+
201
+ def cond_grad_fn(x, t_input):
202
+ """
203
+ Compute the gradient of the classifier, i.e. nabla_{x} log p_t(cond | x_t).
204
+ """
205
+ with torch.enable_grad():
206
+ x_in = x.detach().requires_grad_(True)
207
+ log_prob = classifier_fn(x_in, t_input, condition, **classifier_kwargs)
208
+ return torch.autograd.grad(log_prob.sum(), x_in)[0]
209
+
210
+ def model_fn(x, t_continuous):
211
+ """
212
+ The noise predicition model function that is used for DPM-Solver.
213
+ """
214
+ if guidance_type == "uncond":
215
+ return noise_pred_fn(x, t_continuous)
216
+ elif guidance_type == "classifier":
217
+ assert classifier_fn is not None
218
+ t_input = get_model_input_time(t_continuous)
219
+ cond_grad = cond_grad_fn(x, t_input)
220
+ sigma_t = noise_schedule.marginal_std(t_continuous)
221
+ noise = noise_pred_fn(x, t_continuous)
222
+ return noise - guidance_scale * sigma_t * cond_grad
223
+ elif guidance_type == "classifier-free":
224
+ if guidance_scale == 1. or unconditional_condition is None:
225
+ return noise_pred_fn(x, t_continuous, cond=condition)
226
+ else:
227
+ x_in = torch.cat([x] * 2)
228
+ t_in = torch.cat([t_continuous] * 2)
229
+ c_in = torch.cat([unconditional_condition, condition])
230
+ noise_uncond, noise = noise_pred_fn(x_in, t_in, cond=c_in).chunk(2)
231
+ return noise_uncond + guidance_scale * (noise - noise_uncond)
232
+
233
+ assert model_type in ["noise", "x_start", "v"]
234
+ assert guidance_type in ["uncond", "classifier", "classifier-free"]
235
+ return model_fn
236
+
237
+
238
+ class UniPC:
239
+ def __init__(
240
+ self,
241
+ model_fn,
242
+ noise_schedule,
243
+ algorithm_type="data_prediction",
244
+ correcting_x0_fn=None,
245
+ correcting_xt_fn=None,
246
+ thresholding_max_val=1.,
247
+ dynamic_thresholding_ratio=0.995,
248
+ variant='bh1'
249
+ ):
250
+ """Construct a UniPC.
251
+
252
+ We support both data_prediction and noise_prediction.
253
+ """
254
+ self.model = lambda x, t: model_fn(x, t.expand((x.shape[0])))
255
+ self.noise_schedule = noise_schedule
256
+ assert algorithm_type in ["data_prediction", "noise_prediction"]
257
+
258
+ if correcting_x0_fn == "dynamic_thresholding":
259
+ self.correcting_x0_fn = self.dynamic_thresholding_fn
260
+ else:
261
+ self.correcting_x0_fn = correcting_x0_fn
262
+
263
+ self.correcting_xt_fn = correcting_xt_fn
264
+ self.dynamic_thresholding_ratio = dynamic_thresholding_ratio
265
+ self.thresholding_max_val = thresholding_max_val
266
+
267
+ self.variant = variant
268
+ self.predict_x0 = algorithm_type == "data_prediction"
269
+
270
+ def dynamic_thresholding_fn(self, x0, t=None):
271
+ """
272
+ The dynamic thresholding method.
273
+ """
274
+ dims = x0.dim()
275
+ p = self.dynamic_thresholding_ratio
276
+ s = torch.quantile(torch.abs(x0).reshape((x0.shape[0], -1)), p, dim=1)
277
+ s = expand_dims(torch.maximum(s, self.thresholding_max_val * torch.ones_like(s).to(s.device)), dims)
278
+ x0 = torch.clamp(x0, -s, s) / s
279
+ return x0
280
+
281
+ def noise_prediction_fn(self, x, t):
282
+ """
283
+ Return the noise prediction model.
284
+ """
285
+ return self.model(x, t)
286
+
287
+ def data_prediction_fn(self, x, t):
288
+ """
289
+ Return the data prediction model (with corrector).
290
+ """
291
+ noise = self.noise_prediction_fn(x, t)
292
+ alpha_t, sigma_t = self.noise_schedule.marginal_alpha(t), self.noise_schedule.marginal_std(t)
293
+ x0 = (x - sigma_t * noise) / alpha_t
294
+ if self.correcting_x0_fn is not None:
295
+ x0 = self.correcting_x0_fn(x0)
296
+ return x0
297
+
298
+ def model_fn(self, x, t):
299
+ """
300
+ Convert the model to the noise prediction model or the data prediction model.
301
+ """
302
+ if self.predict_x0:
303
+ return self.data_prediction_fn(x, t)
304
+ else:
305
+ return self.noise_prediction_fn(x, t)
306
+
307
+ def get_time_steps(self, skip_type, t_T, t_0, N, device):
308
+ """Compute the intermediate time steps for sampling.
309
+ """
310
+ if skip_type == 'logSNR':
311
+ lambda_T = self.noise_schedule.marginal_lambda(torch.tensor(t_T).to(device))
312
+ lambda_0 = self.noise_schedule.marginal_lambda(torch.tensor(t_0).to(device))
313
+ logSNR_steps = torch.linspace(lambda_T.cpu().item(), lambda_0.cpu().item(), N + 1).to(device)
314
+ return self.noise_schedule.inverse_lambda(logSNR_steps)
315
+ elif skip_type == 'time_uniform':
316
+ return torch.linspace(t_T, t_0, N + 1).to(device)
317
+ elif skip_type == 'time_quadratic':
318
+ t_order = 2
319
+ t = torch.linspace(t_T**(1. / t_order), t_0**(1. / t_order), N + 1).pow(t_order).to(device)
320
+ return t
321
+ else:
322
+ raise ValueError("Unsupported skip_type {}, need to be 'logSNR' or 'time_uniform' or 'time_quadratic'".format(skip_type))
323
+
324
+ def get_orders_and_timesteps_for_singlestep_solver(self, steps, order, skip_type, t_T, t_0, device):
325
+ """
326
+ Get the order of each step for sampling by the singlestep DPM-Solver.
327
+ """
328
+ if order == 3:
329
+ K = steps // 3 + 1
330
+ if steps % 3 == 0:
331
+ orders = [3,] * (K - 2) + [2, 1]
332
+ elif steps % 3 == 1:
333
+ orders = [3,] * (K - 1) + [1]
334
+ else:
335
+ orders = [3,] * (K - 1) + [2]
336
+ elif order == 2:
337
+ if steps % 2 == 0:
338
+ K = steps // 2
339
+ orders = [2,] * K
340
+ else:
341
+ K = steps // 2 + 1
342
+ orders = [2,] * (K - 1) + [1]
343
+ elif order == 1:
344
+ K = steps
345
+ orders = [1,] * steps
346
+ else:
347
+ raise ValueError("'order' must be '1' or '2' or '3'.")
348
+ if skip_type == 'logSNR':
349
+ # To reproduce the results in DPM-Solver paper
350
+ timesteps_outer = self.get_time_steps(skip_type, t_T, t_0, K, device)
351
+ else:
352
+ timesteps_outer = self.get_time_steps(skip_type, t_T, t_0, steps, device)[torch.cumsum(torch.tensor([0,] + orders), 0).to(device)]
353
+ return timesteps_outer, orders
354
+
355
+ def denoise_to_zero_fn(self, x, s):
356
+ """
357
+ Denoise at the final step, which is equivalent to solve the ODE from lambda_s to infty by first-order discretization.
358
+ """
359
+ return self.data_prediction_fn(x, s)
360
+
361
+ def multistep_uni_pc_update(self, x, model_prev_list, t_prev_list, t, order, **kwargs):
362
+ if len(t.shape) == 0:
363
+ t = t.view(-1)
364
+ if 'bh' in self.variant:
365
+ return self.multistep_uni_pc_bh_update(x, model_prev_list, t_prev_list, t, order, **kwargs)
366
+ else:
367
+ assert self.variant == 'vary_coeff'
368
+ return self.multistep_uni_pc_vary_update(x, model_prev_list, t_prev_list, t, order, **kwargs)
369
+
370
+ def multistep_uni_pc_vary_update(self, x, model_prev_list, t_prev_list, t, order, use_corrector=True):
371
+ #print(f'using unified predictor-corrector with order {order} (solver type: vary coeff)')
372
+ ns = self.noise_schedule
373
+ assert order <= len(model_prev_list)
374
+
375
+ # first compute rks
376
+ t_prev_0 = t_prev_list[-1]
377
+ lambda_prev_0 = ns.marginal_lambda(t_prev_0)
378
+ lambda_t = ns.marginal_lambda(t)
379
+ model_prev_0 = model_prev_list[-1]
380
+ sigma_prev_0, sigma_t = ns.marginal_std(t_prev_0), ns.marginal_std(t)
381
+ log_alpha_t = ns.marginal_log_mean_coeff(t)
382
+ alpha_t = torch.exp(log_alpha_t)
383
+
384
+ h = lambda_t - lambda_prev_0
385
+
386
+ rks = []
387
+ D1s = []
388
+ for i in range(1, order):
389
+ t_prev_i = t_prev_list[-(i + 1)]
390
+ model_prev_i = model_prev_list[-(i + 1)]
391
+ lambda_prev_i = ns.marginal_lambda(t_prev_i)
392
+ rk = (lambda_prev_i - lambda_prev_0) / h
393
+ rks.append(rk)
394
+ D1s.append((model_prev_i - model_prev_0) / rk)
395
+
396
+ rks.append(1.)
397
+ rks = torch.tensor(rks, device=x.device)
398
+
399
+ K = len(rks)
400
+ # build C matrix
401
+ C = []
402
+
403
+ col = torch.ones_like(rks)
404
+ for k in range(1, K + 1):
405
+ C.append(col)
406
+ col = col * rks / (k + 1)
407
+ C = torch.stack(C, dim=1)
408
+
409
+ if len(D1s) > 0:
410
+ D1s = torch.stack(D1s, dim=1) # (B, K)
411
+ C_inv_p = torch.linalg.inv(C[:-1, :-1])
412
+ A_p = C_inv_p
413
+
414
+ if use_corrector:
415
+ #print('using corrector')
416
+ C_inv = torch.linalg.inv(C)
417
+ A_c = C_inv
418
+
419
+ hh = -h if self.predict_x0 else h
420
+ h_phi_1 = torch.expm1(hh)
421
+ h_phi_ks = []
422
+ factorial_k = 1
423
+ h_phi_k = h_phi_1
424
+ for k in range(1, K + 2):
425
+ h_phi_ks.append(h_phi_k)
426
+ h_phi_k = h_phi_k / hh - 1 / factorial_k
427
+ factorial_k *= (k + 1)
428
+
429
+ model_t = None
430
+ if self.predict_x0:
431
+ x_t_ = (
432
+ sigma_t / sigma_prev_0 * x
433
+ - alpha_t * h_phi_1 * model_prev_0
434
+ )
435
+ # now predictor
436
+ x_t = x_t_
437
+ if len(D1s) > 0:
438
+ # compute the residuals for predictor
439
+ for k in range(K - 1):
440
+ x_t = x_t - alpha_t * h_phi_ks[k + 1] * torch.einsum('bkchw,k->bchw', D1s, A_p[k])
441
+ # now corrector
442
+ if use_corrector:
443
+ model_t = self.model_fn(x_t, t)
444
+ D1_t = (model_t - model_prev_0)
445
+ x_t = x_t_
446
+ k = 0
447
+ for k in range(K - 1):
448
+ x_t = x_t - alpha_t * h_phi_ks[k + 1] * torch.einsum('bkchw,k->bchw', D1s, A_c[k][:-1])
449
+ x_t = x_t - alpha_t * h_phi_ks[K] * (D1_t * A_c[k][-1])
450
+ else:
451
+ log_alpha_prev_0, log_alpha_t = ns.marginal_log_mean_coeff(t_prev_0), ns.marginal_log_mean_coeff(t)
452
+ x_t_ = (
453
+ (torch.exp(log_alpha_t - log_alpha_prev_0)) * x
454
+ - (sigma_t * h_phi_1) * model_prev_0
455
+ )
456
+ # now predictor
457
+ x_t = x_t_
458
+ if len(D1s) > 0:
459
+ # compute the residuals for predictor
460
+ for k in range(K - 1):
461
+ x_t = x_t - sigma_t * h_phi_ks[k + 1] * torch.einsum('bkchw,k->bchw', D1s, A_p[k])
462
+ # now corrector
463
+ if use_corrector:
464
+ model_t = self.model_fn(x_t, t)
465
+ D1_t = (model_t - model_prev_0)
466
+ x_t = x_t_
467
+ k = 0
468
+ for k in range(K - 1):
469
+ x_t = x_t - sigma_t * h_phi_ks[k + 1] * torch.einsum('bkchw,k->bchw', D1s, A_c[k][:-1])
470
+ x_t = x_t - sigma_t * h_phi_ks[K] * (D1_t * A_c[k][-1])
471
+ return x_t, model_t
472
+
473
+ def multistep_uni_pc_bh_update(self, x, model_prev_list, t_prev_list, t, order, x_t=None, use_corrector=True):
474
+ #print(f'using unified predictor-corrector with order {order} (solver type: B(h))')
475
+ ns = self.noise_schedule
476
+ assert order <= len(model_prev_list)
477
+
478
+ # first compute rks
479
+ t_prev_0 = t_prev_list[-1]
480
+ lambda_prev_0 = ns.marginal_lambda(t_prev_0)
481
+ lambda_t = ns.marginal_lambda(t)
482
+ model_prev_0 = model_prev_list[-1]
483
+ sigma_prev_0, sigma_t = ns.marginal_std(t_prev_0), ns.marginal_std(t)
484
+ log_alpha_prev_0, log_alpha_t = ns.marginal_log_mean_coeff(t_prev_0), ns.marginal_log_mean_coeff(t)
485
+ alpha_t = torch.exp(log_alpha_t)
486
+
487
+ h = lambda_t - lambda_prev_0
488
+
489
+ rks = []
490
+ D1s = []
491
+ for i in range(1, order):
492
+ t_prev_i = t_prev_list[-(i + 1)]
493
+ model_prev_i = model_prev_list[-(i + 1)]
494
+ lambda_prev_i = ns.marginal_lambda(t_prev_i)
495
+ rk = (lambda_prev_i - lambda_prev_0) / h
496
+ rks.append(rk)
497
+ D1s.append((model_prev_i - model_prev_0) / rk)
498
+
499
+ rks.append(1.)
500
+ rks = torch.tensor(rks, device=x.device)
501
+
502
+ R = []
503
+ b = []
504
+
505
+ hh = -h if self.predict_x0 else h
506
+ h_phi_1 = torch.expm1(hh) # h\phi_1(h) = e^h - 1
507
+ h_phi_k = h_phi_1 / hh - 1
508
+
509
+ factorial_i = 1
510
+
511
+ if self.variant == 'bh1':
512
+ B_h = hh
513
+ elif self.variant == 'bh2':
514
+ B_h = torch.expm1(hh)
515
+ else:
516
+ raise NotImplementedError()
517
+
518
+ for i in range(1, order + 1):
519
+ R.append(torch.pow(rks, i - 1))
520
+ b.append(h_phi_k * factorial_i / B_h)
521
+ factorial_i *= (i + 1)
522
+ h_phi_k = h_phi_k / hh - 1 / factorial_i
523
+
524
+ R = torch.stack(R)
525
+ b = torch.cat(b)
526
+
527
+ # now predictor
528
+ use_predictor = len(D1s) > 0 and x_t is None
529
+ if len(D1s) > 0:
530
+ D1s = torch.stack(D1s, dim=1) # (B, K)
531
+ if x_t is None:
532
+ # for order 2, we use a simplified version
533
+ if order == 2:
534
+ rhos_p = torch.tensor([0.5], device=b.device)
535
+ else:
536
+ rhos_p = torch.linalg.solve(R[:-1, :-1], b[:-1])
537
+ else:
538
+ D1s = None
539
+
540
+ if use_corrector:
541
+ #print('using corrector')
542
+ # for order 1, we use a simplified version
543
+ if order == 1:
544
+ rhos_c = torch.tensor([0.5], device=b.device)
545
+ else:
546
+ rhos_c = torch.linalg.solve(R, b)
547
+
548
+ model_t = None
549
+ if self.predict_x0:
550
+ x_t_ = (
551
+ sigma_t / sigma_prev_0 * x
552
+ - alpha_t * h_phi_1 * model_prev_0
553
+ )
554
+
555
+ if x_t is None:
556
+ if use_predictor:
557
+ pred_res = torch.einsum('k,bkchw->bchw', rhos_p, D1s)
558
+ else:
559
+ pred_res = 0
560
+ x_t = x_t_ - alpha_t * B_h * pred_res
561
+
562
+ if use_corrector:
563
+ model_t = self.model_fn(x_t, t)
564
+ if D1s is not None:
565
+ corr_res = torch.einsum('k,bkchw->bchw', rhos_c[:-1], D1s)
566
+ else:
567
+ corr_res = 0
568
+ D1_t = (model_t - model_prev_0)
569
+ x_t = x_t_ - alpha_t * B_h * (corr_res + rhos_c[-1] * D1_t)
570
+ else:
571
+ x_t_ = (
572
+ torch.exp(log_alpha_t - log_alpha_prev_0) * x
573
+ - sigma_t * h_phi_1 * model_prev_0
574
+ )
575
+ if x_t is None:
576
+ if use_predictor:
577
+ pred_res = torch.einsum('k,bkchw->bchw', rhos_p, D1s)
578
+ else:
579
+ pred_res = 0
580
+ x_t = x_t_ - sigma_t * B_h * pred_res
581
+
582
+ if use_corrector:
583
+ model_t = self.model_fn(x_t, t)
584
+ if D1s is not None:
585
+ corr_res = torch.einsum('k,bkchw->bchw', rhos_c[:-1], D1s)
586
+ else:
587
+ corr_res = 0
588
+ D1_t = (model_t - model_prev_0)
589
+ x_t = x_t_ - sigma_t * B_h * (corr_res + rhos_c[-1] * D1_t)
590
+ return x_t, model_t
591
+
592
+ def sample(self, x, steps=20, t_start=None, t_end=None, order=2, skip_type='time_uniform',
593
+ method='multistep', lower_order_final=True, denoise_to_zero=False, atol=0.0078, rtol=0.05, return_intermediate=False,
594
+ ):
595
+ """
596
+ Compute the sample at time `t_end` by UniPC, given the initial `x` at time `t_start`.
597
+ """
598
+ t_0 = 1. / self.noise_schedule.total_N if t_end is None else t_end
599
+ t_T = self.noise_schedule.T if t_start is None else t_start
600
+ assert t_0 > 0 and t_T > 0, "Time range needs to be greater than 0. For discrete-time DPMs, it needs to be in [1 / N, 1], where N is the length of betas array"
601
+ if return_intermediate:
602
+ assert method in ['multistep', 'singlestep', 'singlestep_fixed'], "Cannot use adaptive solver when saving intermediate values"
603
+ if self.correcting_xt_fn is not None:
604
+ assert method in ['multistep', 'singlestep', 'singlestep_fixed'], "Cannot use adaptive solver when correcting_xt_fn is not None"
605
+ device = x.device
606
+ intermediates = []
607
+ with torch.no_grad():
608
+ if method == 'multistep':
609
+ assert steps >= order
610
+ timesteps = self.get_time_steps(skip_type=skip_type, t_T=t_T, t_0=t_0, N=steps, device=device)
611
+ assert timesteps.shape[0] - 1 == steps
612
+ # Init the initial values.
613
+ step = 0
614
+ t = timesteps[step]
615
+ t_prev_list = [t]
616
+ model_prev_list = [self.model_fn(x, t)]
617
+ if self.correcting_xt_fn is not None:
618
+ x = self.correcting_xt_fn(x, t, step)
619
+ if return_intermediate:
620
+ intermediates.append(x)
621
+
622
+ # Init the first `order` values by lower order multistep UniPC.
623
+ for step in range(1, order):
624
+ t = timesteps[step]
625
+ x, model_x = self.multistep_uni_pc_update(x, model_prev_list, t_prev_list, t, step, use_corrector=True)
626
+ if model_x is None:
627
+ model_x = self.model_fn(x, t)
628
+ if self.correcting_xt_fn is not None:
629
+ x = self.correcting_xt_fn(x, t, step)
630
+ if return_intermediate:
631
+ intermediates.append(x)
632
+ t_prev_list.append(t)
633
+ model_prev_list.append(model_x)
634
+
635
+ # Compute the remaining values by `order`-th order multistep DPM-Solver.
636
+ for step in range(order, steps + 1):
637
+ t = timesteps[step]
638
+ if lower_order_final:
639
+ step_order = min(order, steps + 1 - step)
640
+ else:
641
+ step_order = order
642
+ if step == steps:
643
+ #print('do not run corrector at the last step')
644
+ use_corrector = False
645
+ else:
646
+ use_corrector = True
647
+ x, model_x = self.multistep_uni_pc_update(x, model_prev_list, t_prev_list, t, step_order, use_corrector=use_corrector)
648
+ if self.correcting_xt_fn is not None:
649
+ x = self.correcting_xt_fn(x, t, step)
650
+ if return_intermediate:
651
+ intermediates.append(x)
652
+ for i in range(order - 1):
653
+ t_prev_list[i] = t_prev_list[i + 1]
654
+ model_prev_list[i] = model_prev_list[i + 1]
655
+ t_prev_list[-1] = t
656
+ # We do not need to evaluate the final model value.
657
+ if step < steps:
658
+ if model_x is None:
659
+ model_x = self.model_fn(x, t)
660
+ model_prev_list[-1] = model_x
661
+ else:
662
+ raise ValueError("Got wrong method {}".format(method))
663
+
664
+ if denoise_to_zero:
665
+ t = torch.ones((1,)).to(device) * t_0
666
+ x = self.denoise_to_zero_fn(x, t)
667
+ if self.correcting_xt_fn is not None:
668
+ x = self.correcting_xt_fn(x, t, step + 1)
669
+ if return_intermediate:
670
+ intermediates.append(x)
671
+ if return_intermediate:
672
+ return x, intermediates
673
+ else:
674
+ return x
675
+
676
+
677
+ #############################################################
678
+ # other utility functions
679
+ #############################################################
680
+
681
+ def interpolate_fn(x, xp, yp):
682
+ """
683
+ A piecewise linear function y = f(x), using xp and yp as keypoints.
684
+ We implement f(x) in a differentiable way (i.e. applicable for autograd).
685
+ The function f(x) is well-defined for all x-axis. (For x beyond the bounds of xp, we use the outmost points of xp to define the linear function.)
686
+
687
+ Args:
688
+ x: PyTorch tensor with shape [N, C], where N is the batch size, C is the number of channels (we use C = 1 for DPM-Solver).
689
+ xp: PyTorch tensor with shape [C, K], where K is the number of keypoints.
690
+ yp: PyTorch tensor with shape [C, K].
691
+ Returns:
692
+ The function values f(x), with shape [N, C].
693
+ """
694
+ N, K = x.shape[0], xp.shape[1]
695
+ all_x = torch.cat([x.unsqueeze(2), xp.unsqueeze(0).repeat((N, 1, 1))], dim=2)
696
+ sorted_all_x, x_indices = torch.sort(all_x, dim=2)
697
+ x_idx = torch.argmin(x_indices, dim=2)
698
+ cand_start_idx = x_idx - 1
699
+ start_idx = torch.where(
700
+ torch.eq(x_idx, 0),
701
+ torch.tensor(1, device=x.device),
702
+ torch.where(
703
+ torch.eq(x_idx, K), torch.tensor(K - 2, device=x.device), cand_start_idx,
704
+ ),
705
+ )
706
+ end_idx = torch.where(torch.eq(start_idx, cand_start_idx), start_idx + 2, start_idx + 1)
707
+ start_x = torch.gather(sorted_all_x, dim=2, index=start_idx.unsqueeze(2)).squeeze(2)
708
+ end_x = torch.gather(sorted_all_x, dim=2, index=end_idx.unsqueeze(2)).squeeze(2)
709
+ start_idx2 = torch.where(
710
+ torch.eq(x_idx, 0),
711
+ torch.tensor(0, device=x.device),
712
+ torch.where(
713
+ torch.eq(x_idx, K), torch.tensor(K - 2, device=x.device), cand_start_idx,
714
+ ),
715
+ )
716
+ y_positions_expanded = yp.unsqueeze(0).expand(N, -1, -1)
717
+ start_y = torch.gather(y_positions_expanded, dim=2, index=start_idx2.unsqueeze(2)).squeeze(2)
718
+ end_y = torch.gather(y_positions_expanded, dim=2, index=(start_idx2 + 1).unsqueeze(2)).squeeze(2)
719
+ cand = start_y + (x - start_x) * (end_y - start_y) / (end_x - start_x)
720
+ return cand
721
+
722
+
723
+ def expand_dims(v, dims):
724
+ """
725
+ Expand the tensor `v` to the dim `dims`.
726
+
727
+ Args:
728
+ `v`: a PyTorch tensor with shape [N].
729
+ `dim`: a `int`.
730
+ Returns:
731
+ a PyTorch tensor with shape [N, 1, 1, ..., 1] and the total dimension is `dims`.
732
+ """
733
+ return v[(...,) + (None,)*(dims - 1)]
diffusion/unit2mel.py ADDED
@@ -0,0 +1,167 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+
3
+ import numpy as np
4
+ import torch
5
+ import torch.nn as nn
6
+ import yaml
7
+
8
+ from .diffusion import GaussianDiffusion
9
+ from .vocoder import Vocoder
10
+ from .wavenet import WaveNet
11
+
12
+
13
+ class DotDict(dict):
14
+ def __getattr__(*args):
15
+ val = dict.get(*args)
16
+ return DotDict(val) if type(val) is dict else val
17
+
18
+ __setattr__ = dict.__setitem__
19
+ __delattr__ = dict.__delitem__
20
+
21
+
22
+ def load_model_vocoder(
23
+ model_path,
24
+ device='cpu',
25
+ config_path = None
26
+ ):
27
+ if config_path is None:
28
+ config_file = os.path.join(os.path.split(model_path)[0], 'config.yaml')
29
+ else:
30
+ config_file = config_path
31
+
32
+ with open(config_file, "r") as config:
33
+ args = yaml.safe_load(config)
34
+ args = DotDict(args)
35
+
36
+ # load vocoder
37
+ vocoder = Vocoder(args.vocoder.type, args.vocoder.ckpt, device=device)
38
+
39
+ # load model
40
+ model = Unit2Mel(
41
+ args.data.encoder_out_channels,
42
+ args.model.n_spk,
43
+ args.model.use_pitch_aug,
44
+ vocoder.dimension,
45
+ args.model.n_layers,
46
+ args.model.n_chans,
47
+ args.model.n_hidden,
48
+ args.model.timesteps,
49
+ args.model.k_step_max
50
+ )
51
+
52
+ print(' [Loading] ' + model_path)
53
+ ckpt = torch.load(model_path, map_location=torch.device(device))
54
+ model.to(device)
55
+ model.load_state_dict(ckpt['model'])
56
+ model.eval()
57
+ print(f'Loaded diffusion model, sampler is {args.infer.method}, speedup: {args.infer.speedup} ')
58
+ return model, vocoder, args
59
+
60
+
61
+ class Unit2Mel(nn.Module):
62
+ def __init__(
63
+ self,
64
+ input_channel,
65
+ n_spk,
66
+ use_pitch_aug=False,
67
+ out_dims=128,
68
+ n_layers=20,
69
+ n_chans=384,
70
+ n_hidden=256,
71
+ timesteps=1000,
72
+ k_step_max=1000
73
+ ):
74
+ super().__init__()
75
+ self.unit_embed = nn.Linear(input_channel, n_hidden)
76
+ self.f0_embed = nn.Linear(1, n_hidden)
77
+ self.volume_embed = nn.Linear(1, n_hidden)
78
+ if use_pitch_aug:
79
+ self.aug_shift_embed = nn.Linear(1, n_hidden, bias=False)
80
+ else:
81
+ self.aug_shift_embed = None
82
+ self.n_spk = n_spk
83
+ if n_spk is not None and n_spk > 1:
84
+ self.spk_embed = nn.Embedding(n_spk, n_hidden)
85
+
86
+ self.timesteps = timesteps if timesteps is not None else 1000
87
+ self.k_step_max = k_step_max if k_step_max is not None and k_step_max>0 and k_step_max<self.timesteps else self.timesteps
88
+
89
+ self.n_hidden = n_hidden
90
+ # diffusion
91
+ self.decoder = GaussianDiffusion(WaveNet(out_dims, n_layers, n_chans, n_hidden),timesteps=self.timesteps,k_step=self.k_step_max, out_dims=out_dims)
92
+ self.input_channel = input_channel
93
+
94
+ def init_spkembed(self, units, f0, volume, spk_id = None, spk_mix_dict = None, aug_shift = None,
95
+ gt_spec=None, infer=True, infer_speedup=10, method='dpm-solver', k_step=300, use_tqdm=True):
96
+
97
+ '''
98
+ input:
99
+ B x n_frames x n_unit
100
+ return:
101
+ dict of B x n_frames x feat
102
+ '''
103
+ x = self.unit_embed(units) + self.f0_embed((1+ f0 / 700).log()) + self.volume_embed(volume)
104
+ if self.n_spk is not None and self.n_spk > 1:
105
+ if spk_mix_dict is not None:
106
+ spk_embed_mix = torch.zeros((1,1,self.hidden_size))
107
+ for k, v in spk_mix_dict.items():
108
+ spk_id_torch = torch.LongTensor(np.array([[k]])).to(units.device)
109
+ spk_embeddd = self.spk_embed(spk_id_torch)
110
+ self.speaker_map[k] = spk_embeddd
111
+ spk_embed_mix = spk_embed_mix + v * spk_embeddd
112
+ x = x + spk_embed_mix
113
+ else:
114
+ x = x + self.spk_embed(spk_id - 1)
115
+ self.speaker_map = self.speaker_map.unsqueeze(0)
116
+ self.speaker_map = self.speaker_map.detach()
117
+ return x.transpose(1, 2)
118
+
119
+ def init_spkmix(self, n_spk):
120
+ self.speaker_map = torch.zeros((n_spk,1,1,self.n_hidden))
121
+ hubert_hidden_size = self.input_channel
122
+ n_frames = 10
123
+ hubert = torch.randn((1, n_frames, hubert_hidden_size))
124
+ f0 = torch.randn((1, n_frames))
125
+ volume = torch.randn((1, n_frames))
126
+ spks = {}
127
+ for i in range(n_spk):
128
+ spks.update({i:1.0/float(self.n_spk)})
129
+ self.init_spkembed(hubert, f0.unsqueeze(-1), volume.unsqueeze(-1), spk_mix_dict=spks)
130
+
131
+ def forward(self, units, f0, volume, spk_id = None, spk_mix_dict = None, aug_shift = None,
132
+ gt_spec=None, infer=True, infer_speedup=10, method='dpm-solver', k_step=300, use_tqdm=True):
133
+
134
+ '''
135
+ input:
136
+ B x n_frames x n_unit
137
+ return:
138
+ dict of B x n_frames x feat
139
+ '''
140
+
141
+ if not self.training and gt_spec is not None and k_step>self.k_step_max:
142
+ raise Exception("The shallow diffusion k_step is greater than the maximum diffusion k_step(k_step_max)!")
143
+
144
+ if not self.training and gt_spec is None and self.k_step_max!=self.timesteps:
145
+ raise Exception("This model can only be used for shallow diffusion and can not infer alone!")
146
+
147
+ x = self.unit_embed(units) + self.f0_embed((1+ f0 / 700).log()) + self.volume_embed(volume)
148
+ if self.n_spk is not None and self.n_spk > 1:
149
+ if spk_mix_dict is not None:
150
+ for k, v in spk_mix_dict.items():
151
+ spk_id_torch = torch.LongTensor(np.array([[k]])).to(units.device)
152
+ x = x + v * self.spk_embed(spk_id_torch)
153
+ else:
154
+ if spk_id.shape[1] > 1:
155
+ g = spk_id.reshape((spk_id.shape[0], spk_id.shape[1], 1, 1, 1)) # [N, S, B, 1, 1]
156
+ g = g * self.speaker_map # [N, S, B, 1, H]
157
+ g = torch.sum(g, dim=1) # [N, 1, B, 1, H]
158
+ g = g.transpose(0, -1).transpose(0, -2).squeeze(0) # [B, H, N]
159
+ x = x + g
160
+ else:
161
+ x = x + self.spk_embed(spk_id)
162
+ if self.aug_shift_embed is not None and aug_shift is not None:
163
+ x = x + self.aug_shift_embed(aug_shift / 5)
164
+ x = self.decoder(x, gt_spec=gt_spec, infer=infer, infer_speedup=infer_speedup, method=method, k_step=k_step, use_tqdm=use_tqdm)
165
+
166
+ return x
167
+
diffusion/vocoder.py ADDED
@@ -0,0 +1,95 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from torchaudio.transforms import Resample
3
+
4
+ from vdecoder.nsf_hifigan.models import load_config, load_model
5
+ from vdecoder.nsf_hifigan.nvSTFT import STFT
6
+
7
+
8
+ class Vocoder:
9
+ def __init__(self, vocoder_type, vocoder_ckpt, device = None):
10
+ if device is None:
11
+ device = 'cuda' if torch.cuda.is_available() else 'cpu'
12
+ self.device = device
13
+
14
+ if vocoder_type == 'nsf-hifigan':
15
+ self.vocoder = NsfHifiGAN(vocoder_ckpt, device = device)
16
+ elif vocoder_type == 'nsf-hifigan-log10':
17
+ self.vocoder = NsfHifiGANLog10(vocoder_ckpt, device = device)
18
+ else:
19
+ raise ValueError(f" [x] Unknown vocoder: {vocoder_type}")
20
+
21
+ self.resample_kernel = {}
22
+ self.vocoder_sample_rate = self.vocoder.sample_rate()
23
+ self.vocoder_hop_size = self.vocoder.hop_size()
24
+ self.dimension = self.vocoder.dimension()
25
+
26
+ def extract(self, audio, sample_rate, keyshift=0):
27
+
28
+ # resample
29
+ if sample_rate == self.vocoder_sample_rate:
30
+ audio_res = audio
31
+ else:
32
+ key_str = str(sample_rate)
33
+ if key_str not in self.resample_kernel:
34
+ self.resample_kernel[key_str] = Resample(sample_rate, self.vocoder_sample_rate, lowpass_filter_width = 128).to(self.device)
35
+ audio_res = self.resample_kernel[key_str](audio)
36
+
37
+ # extract
38
+ mel = self.vocoder.extract(audio_res, keyshift=keyshift) # B, n_frames, bins
39
+ return mel
40
+
41
+ def infer(self, mel, f0):
42
+ f0 = f0[:,:mel.size(1),0] # B, n_frames
43
+ audio = self.vocoder(mel, f0)
44
+ return audio
45
+
46
+
47
+ class NsfHifiGAN(torch.nn.Module):
48
+ def __init__(self, model_path, device=None):
49
+ super().__init__()
50
+ if device is None:
51
+ device = 'cuda' if torch.cuda.is_available() else 'cpu'
52
+ self.device = device
53
+ self.model_path = model_path
54
+ self.model = None
55
+ self.h = load_config(model_path)
56
+ self.stft = STFT(
57
+ self.h.sampling_rate,
58
+ self.h.num_mels,
59
+ self.h.n_fft,
60
+ self.h.win_size,
61
+ self.h.hop_size,
62
+ self.h.fmin,
63
+ self.h.fmax)
64
+
65
+ def sample_rate(self):
66
+ return self.h.sampling_rate
67
+
68
+ def hop_size(self):
69
+ return self.h.hop_size
70
+
71
+ def dimension(self):
72
+ return self.h.num_mels
73
+
74
+ def extract(self, audio, keyshift=0):
75
+ mel = self.stft.get_mel(audio, keyshift=keyshift).transpose(1, 2) # B, n_frames, bins
76
+ return mel
77
+
78
+ def forward(self, mel, f0):
79
+ if self.model is None:
80
+ print('| Load HifiGAN: ', self.model_path)
81
+ self.model, self.h = load_model(self.model_path, device=self.device)
82
+ with torch.no_grad():
83
+ c = mel.transpose(1, 2)
84
+ audio = self.model(c, f0)
85
+ return audio
86
+
87
+ class NsfHifiGANLog10(NsfHifiGAN):
88
+ def forward(self, mel, f0):
89
+ if self.model is None:
90
+ print('| Load HifiGAN: ', self.model_path)
91
+ self.model, self.h = load_model(self.model_path, device=self.device)
92
+ with torch.no_grad():
93
+ c = 0.434294 * mel.transpose(1, 2)
94
+ audio = self.model(c, f0)
95
+ return audio
diffusion/wavenet.py ADDED
@@ -0,0 +1,108 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+ from math import sqrt
3
+
4
+ import torch
5
+ import torch.nn as nn
6
+ import torch.nn.functional as F
7
+ from torch.nn import Mish
8
+
9
+
10
+ class Conv1d(torch.nn.Conv1d):
11
+ def __init__(self, *args, **kwargs):
12
+ super().__init__(*args, **kwargs)
13
+ nn.init.kaiming_normal_(self.weight)
14
+
15
+
16
+ class SinusoidalPosEmb(nn.Module):
17
+ def __init__(self, dim):
18
+ super().__init__()
19
+ self.dim = dim
20
+
21
+ def forward(self, x):
22
+ device = x.device
23
+ half_dim = self.dim // 2
24
+ emb = math.log(10000) / (half_dim - 1)
25
+ emb = torch.exp(torch.arange(half_dim, device=device) * -emb)
26
+ emb = x[:, None] * emb[None, :]
27
+ emb = torch.cat((emb.sin(), emb.cos()), dim=-1)
28
+ return emb
29
+
30
+
31
+ class ResidualBlock(nn.Module):
32
+ def __init__(self, encoder_hidden, residual_channels, dilation):
33
+ super().__init__()
34
+ self.residual_channels = residual_channels
35
+ self.dilated_conv = nn.Conv1d(
36
+ residual_channels,
37
+ 2 * residual_channels,
38
+ kernel_size=3,
39
+ padding=dilation,
40
+ dilation=dilation
41
+ )
42
+ self.diffusion_projection = nn.Linear(residual_channels, residual_channels)
43
+ self.conditioner_projection = nn.Conv1d(encoder_hidden, 2 * residual_channels, 1)
44
+ self.output_projection = nn.Conv1d(residual_channels, 2 * residual_channels, 1)
45
+
46
+ def forward(self, x, conditioner, diffusion_step):
47
+ diffusion_step = self.diffusion_projection(diffusion_step).unsqueeze(-1)
48
+ conditioner = self.conditioner_projection(conditioner)
49
+ y = x + diffusion_step
50
+
51
+ y = self.dilated_conv(y) + conditioner
52
+
53
+ # Using torch.split instead of torch.chunk to avoid using onnx::Slice
54
+ gate, filter = torch.split(y, [self.residual_channels, self.residual_channels], dim=1)
55
+ y = torch.sigmoid(gate) * torch.tanh(filter)
56
+
57
+ y = self.output_projection(y)
58
+
59
+ # Using torch.split instead of torch.chunk to avoid using onnx::Slice
60
+ residual, skip = torch.split(y, [self.residual_channels, self.residual_channels], dim=1)
61
+ return (x + residual) / math.sqrt(2.0), skip
62
+
63
+
64
+ class WaveNet(nn.Module):
65
+ def __init__(self, in_dims=128, n_layers=20, n_chans=384, n_hidden=256):
66
+ super().__init__()
67
+ self.input_projection = Conv1d(in_dims, n_chans, 1)
68
+ self.diffusion_embedding = SinusoidalPosEmb(n_chans)
69
+ self.mlp = nn.Sequential(
70
+ nn.Linear(n_chans, n_chans * 4),
71
+ Mish(),
72
+ nn.Linear(n_chans * 4, n_chans)
73
+ )
74
+ self.residual_layers = nn.ModuleList([
75
+ ResidualBlock(
76
+ encoder_hidden=n_hidden,
77
+ residual_channels=n_chans,
78
+ dilation=1
79
+ )
80
+ for i in range(n_layers)
81
+ ])
82
+ self.skip_projection = Conv1d(n_chans, n_chans, 1)
83
+ self.output_projection = Conv1d(n_chans, in_dims, 1)
84
+ nn.init.zeros_(self.output_projection.weight)
85
+
86
+ def forward(self, spec, diffusion_step, cond):
87
+ """
88
+ :param spec: [B, 1, M, T]
89
+ :param diffusion_step: [B, 1]
90
+ :param cond: [B, M, T]
91
+ :return:
92
+ """
93
+ x = spec.squeeze(1)
94
+ x = self.input_projection(x) # [B, residual_channel, T]
95
+
96
+ x = F.relu(x)
97
+ diffusion_step = self.diffusion_embedding(diffusion_step)
98
+ diffusion_step = self.mlp(diffusion_step)
99
+ skip = []
100
+ for layer in self.residual_layers:
101
+ x, skip_connection = layer(x, cond, diffusion_step)
102
+ skip.append(skip_connection)
103
+
104
+ x = torch.sum(torch.stack(skip), dim=0) / sqrt(len(self.residual_layers))
105
+ x = self.skip_projection(x)
106
+ x = F.relu(x)
107
+ x = self.output_projection(x) # [B, mel_bins, T]
108
+ return x[:, None, :, :]
edgetts/tts.py ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import asyncio
2
+ import random
3
+ import sys
4
+
5
+ import edge_tts
6
+ from edge_tts import VoicesManager
7
+ from langdetect import DetectorFactory, detect
8
+
9
+ DetectorFactory.seed = 0
10
+
11
+ TEXT = sys.argv[1]
12
+ LANG = detect(TEXT) if sys.argv[2] == "Auto" else sys.argv[2]
13
+ RATE = sys.argv[3]
14
+ VOLUME = sys.argv[4]
15
+ GENDER = sys.argv[5] if len(sys.argv) == 6 else None
16
+ OUTPUT_FILE = "tts.wav"
17
+
18
+ print("Running TTS...")
19
+ print(f"Text: {TEXT}, Language: {LANG}, Gender: {GENDER}, Rate: {RATE}, Volume: {VOLUME}")
20
+
21
+ async def _main() -> None:
22
+ voices = await VoicesManager.create()
23
+ if GENDER is not None:
24
+ # From "zh-cn" to "zh-CN" etc.
25
+ if LANG == "zh-cn" or LANG == "zh-tw":
26
+ LOCALE = LANG[:-2] + LANG[-2:].upper()
27
+ voice = voices.find(Gender=GENDER, Locale=LOCALE)
28
+ else:
29
+ voice = voices.find(Gender=GENDER, Language=LANG)
30
+ VOICE = random.choice(voice)["Name"]
31
+ print(f"Using random {LANG} voice: {VOICE}")
32
+ else:
33
+ VOICE = LANG
34
+
35
+ communicate = edge_tts.Communicate(text = TEXT, voice = VOICE, rate = RATE, volume = VOLUME)
36
+ await communicate.save(OUTPUT_FILE)
37
+
38
+ if __name__ == "__main__":
39
+ if sys.platform.startswith("win"):
40
+ asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())
41
+ asyncio.run(_main())
42
+ else:
43
+ loop = asyncio.get_event_loop_policy().get_event_loop()
44
+ try:
45
+ loop.run_until_complete(_main())
46
+ finally:
47
+ loop.close()
edgetts/tts_voices.py ADDED
@@ -0,0 +1,306 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #List of Supported Voices for edge_TTS
2
+ SUPPORTED_VOICES = {
3
+ 'zh-CN-XiaoxiaoNeural': 'zh-CN',
4
+ 'zh-CN-XiaoyiNeural': 'zh-CN',
5
+ 'zh-CN-YunjianNeural': 'zh-CN',
6
+ 'zh-CN-YunxiNeural': 'zh-CN',
7
+ 'zh-CN-YunxiaNeural': 'zh-CN',
8
+ 'zh-CN-YunyangNeural': 'zh-CN',
9
+ 'zh-HK-HiuGaaiNeural': 'zh-HK',
10
+ 'zh-HK-HiuMaanNeural': 'zh-HK',
11
+ 'zh-HK-WanLungNeural': 'zh-HK',
12
+ 'zh-TW-HsiaoChenNeural': 'zh-TW',
13
+ 'zh-TW-YunJheNeural': 'zh-TW',
14
+ 'zh-TW-HsiaoYuNeural': 'zh-TW',
15
+ 'af-ZA-AdriNeural': 'af-ZA',
16
+ 'af-ZA-WillemNeural': 'af-ZA',
17
+ 'am-ET-AmehaNeural': 'am-ET',
18
+ 'am-ET-MekdesNeural': 'am-ET',
19
+ 'ar-AE-FatimaNeural': 'ar-AE',
20
+ 'ar-AE-HamdanNeural': 'ar-AE',
21
+ 'ar-BH-AliNeural': 'ar-BH',
22
+ 'ar-BH-LailaNeural': 'ar-BH',
23
+ 'ar-DZ-AminaNeural': 'ar-DZ',
24
+ 'ar-DZ-IsmaelNeural': 'ar-DZ',
25
+ 'ar-EG-SalmaNeural': 'ar-EG',
26
+ 'ar-EG-ShakirNeural': 'ar-EG',
27
+ 'ar-IQ-BasselNeural': 'ar-IQ',
28
+ 'ar-IQ-RanaNeural': 'ar-IQ',
29
+ 'ar-JO-SanaNeural': 'ar-JO',
30
+ 'ar-JO-TaimNeural': 'ar-JO',
31
+ 'ar-KW-FahedNeural': 'ar-KW',
32
+ 'ar-KW-NouraNeural': 'ar-KW',
33
+ 'ar-LB-LaylaNeural': 'ar-LB',
34
+ 'ar-LB-RamiNeural': 'ar-LB',
35
+ 'ar-LY-ImanNeural': 'ar-LY',
36
+ 'ar-LY-OmarNeural': 'ar-LY',
37
+ 'ar-MA-JamalNeural': 'ar-MA',
38
+ 'ar-MA-MounaNeural': 'ar-MA',
39
+ 'ar-OM-AbdullahNeural': 'ar-OM',
40
+ 'ar-OM-AyshaNeural': 'ar-OM',
41
+ 'ar-QA-AmalNeural': 'ar-QA',
42
+ 'ar-QA-MoazNeural': 'ar-QA',
43
+ 'ar-SA-HamedNeural': 'ar-SA',
44
+ 'ar-SA-ZariyahNeural': 'ar-SA',
45
+ 'ar-SY-AmanyNeural': 'ar-SY',
46
+ 'ar-SY-LaithNeural': 'ar-SY',
47
+ 'ar-TN-HediNeural': 'ar-TN',
48
+ 'ar-TN-ReemNeural': 'ar-TN',
49
+ 'ar-YE-MaryamNeural': 'ar-YE',
50
+ 'ar-YE-SalehNeural': 'ar-YE',
51
+ 'az-AZ-BabekNeural': 'az-AZ',
52
+ 'az-AZ-BanuNeural': 'az-AZ',
53
+ 'bg-BG-BorislavNeural': 'bg-BG',
54
+ 'bg-BG-KalinaNeural': 'bg-BG',
55
+ 'bn-BD-NabanitaNeural': 'bn-BD',
56
+ 'bn-BD-PradeepNeural': 'bn-BD',
57
+ 'bn-IN-BashkarNeural': 'bn-IN',
58
+ 'bn-IN-TanishaaNeural': 'bn-IN',
59
+ 'bs-BA-GoranNeural': 'bs-BA',
60
+ 'bs-BA-VesnaNeural': 'bs-BA',
61
+ 'ca-ES-EnricNeural': 'ca-ES',
62
+ 'ca-ES-JoanaNeural': 'ca-ES',
63
+ 'cs-CZ-AntoninNeural': 'cs-CZ',
64
+ 'cs-CZ-VlastaNeural': 'cs-CZ',
65
+ 'cy-GB-AledNeural': 'cy-GB',
66
+ 'cy-GB-NiaNeural': 'cy-GB',
67
+ 'da-DK-ChristelNeural': 'da-DK',
68
+ 'da-DK-JeppeNeural': 'da-DK',
69
+ 'de-AT-IngridNeural': 'de-AT',
70
+ 'de-AT-JonasNeural': 'de-AT',
71
+ 'de-CH-JanNeural': 'de-CH',
72
+ 'de-CH-LeniNeural': 'de-CH',
73
+ 'de-DE-AmalaNeural': 'de-DE',
74
+ 'de-DE-ConradNeural': 'de-DE',
75
+ 'de-DE-KatjaNeural': 'de-DE',
76
+ 'de-DE-KillianNeural': 'de-DE',
77
+ 'el-GR-AthinaNeural': 'el-GR',
78
+ 'el-GR-NestorasNeural': 'el-GR',
79
+ 'en-AU-NatashaNeural': 'en-AU',
80
+ 'en-AU-WilliamNeural': 'en-AU',
81
+ 'en-CA-ClaraNeural': 'en-CA',
82
+ 'en-CA-LiamNeural': 'en-CA',
83
+ 'en-GB-LibbyNeural': 'en-GB',
84
+ 'en-GB-MaisieNeural': 'en-GB',
85
+ 'en-GB-RyanNeural': 'en-GB',
86
+ 'en-GB-SoniaNeural': 'en-GB',
87
+ 'en-GB-ThomasNeural': 'en-GB',
88
+ 'en-HK-SamNeural': 'en-HK',
89
+ 'en-HK-YanNeural': 'en-HK',
90
+ 'en-IE-ConnorNeural': 'en-IE',
91
+ 'en-IE-EmilyNeural': 'en-IE',
92
+ 'en-IN-NeerjaNeural': 'en-IN',
93
+ 'en-IN-PrabhatNeural': 'en-IN',
94
+ 'en-KE-AsiliaNeural': 'en-KE',
95
+ 'en-KE-ChilembaNeural': 'en-KE',
96
+ 'en-NG-AbeoNeural': 'en-NG',
97
+ 'en-NG-EzinneNeural': 'en-NG',
98
+ 'en-NZ-MitchellNeural': 'en-NZ',
99
+ 'en-NZ-MollyNeural': 'en-NZ',
100
+ 'en-PH-JamesNeural': 'en-PH',
101
+ 'en-PH-RosaNeural': 'en-PH',
102
+ 'en-SG-LunaNeural': 'en-SG',
103
+ 'en-SG-WayneNeural': 'en-SG',
104
+ 'en-TZ-ElimuNeural': 'en-TZ',
105
+ 'en-TZ-ImaniNeural': 'en-TZ',
106
+ 'en-US-AnaNeural': 'en-US',
107
+ 'en-US-AriaNeural': 'en-US',
108
+ 'en-US-ChristopherNeural': 'en-US',
109
+ 'en-US-EricNeural': 'en-US',
110
+ 'en-US-GuyNeural': 'en-US',
111
+ 'en-US-JennyNeural': 'en-US',
112
+ 'en-US-MichelleNeural': 'en-US',
113
+ 'en-ZA-LeahNeural': 'en-ZA',
114
+ 'en-ZA-LukeNeural': 'en-ZA',
115
+ 'es-AR-ElenaNeural': 'es-AR',
116
+ 'es-AR-TomasNeural': 'es-AR',
117
+ 'es-BO-MarceloNeural': 'es-BO',
118
+ 'es-BO-SofiaNeural': 'es-BO',
119
+ 'es-CL-CatalinaNeural': 'es-CL',
120
+ 'es-CL-LorenzoNeural': 'es-CL',
121
+ 'es-CO-GonzaloNeural': 'es-CO',
122
+ 'es-CO-SalomeNeural': 'es-CO',
123
+ 'es-CR-JuanNeural': 'es-CR',
124
+ 'es-CR-MariaNeural': 'es-CR',
125
+ 'es-CU-BelkysNeural': 'es-CU',
126
+ 'es-CU-ManuelNeural': 'es-CU',
127
+ 'es-DO-EmilioNeural': 'es-DO',
128
+ 'es-DO-RamonaNeural': 'es-DO',
129
+ 'es-EC-AndreaNeural': 'es-EC',
130
+ 'es-EC-LuisNeural': 'es-EC',
131
+ 'es-ES-AlvaroNeural': 'es-ES',
132
+ 'es-ES-ElviraNeural': 'es-ES',
133
+ 'es-ES-ManuelEsCUNeural': 'es-ES',
134
+ 'es-GQ-JavierNeural': 'es-GQ',
135
+ 'es-GQ-TeresaNeural': 'es-GQ',
136
+ 'es-GT-AndresNeural': 'es-GT',
137
+ 'es-GT-MartaNeural': 'es-GT',
138
+ 'es-HN-CarlosNeural': 'es-HN',
139
+ 'es-HN-KarlaNeural': 'es-HN',
140
+ 'es-MX-DaliaNeural': 'es-MX',
141
+ 'es-MX-JorgeNeural': 'es-MX',
142
+ 'es-MX-LorenzoEsCLNeural': 'es-MX',
143
+ 'es-NI-FedericoNeural': 'es-NI',
144
+ 'es-NI-YolandaNeural': 'es-NI',
145
+ 'es-PA-MargaritaNeural': 'es-PA',
146
+ 'es-PA-RobertoNeural': 'es-PA',
147
+ 'es-PE-AlexNeural': 'es-PE',
148
+ 'es-PE-CamilaNeural': 'es-PE',
149
+ 'es-PR-KarinaNeural': 'es-PR',
150
+ 'es-PR-VictorNeural': 'es-PR',
151
+ 'es-PY-MarioNeural': 'es-PY',
152
+ 'es-PY-TaniaNeural': 'es-PY',
153
+ 'es-SV-LorenaNeural': 'es-SV',
154
+ 'es-SV-RodrigoNeural': 'es-SV',
155
+ 'es-US-AlonsoNeural': 'es-US',
156
+ 'es-US-PalomaNeural': 'es-US',
157
+ 'es-UY-MateoNeural': 'es-UY',
158
+ 'es-UY-ValentinaNeural': 'es-UY',
159
+ 'es-VE-PaolaNeural': 'es-VE',
160
+ 'es-VE-SebastianNeural': 'es-VE',
161
+ 'et-EE-AnuNeural': 'et-EE',
162
+ 'et-EE-KertNeural': 'et-EE',
163
+ 'fa-IR-DilaraNeural': 'fa-IR',
164
+ 'fa-IR-FaridNeural': 'fa-IR',
165
+ 'fi-FI-HarriNeural': 'fi-FI',
166
+ 'fi-FI-NooraNeural': 'fi-FI',
167
+ 'fil-PH-AngeloNeural': 'fil-PH',
168
+ 'fil-PH-BlessicaNeural': 'fil-PH',
169
+ 'fr-BE-CharlineNeural': 'fr-BE',
170
+ 'fr-BE-GerardNeural': 'fr-BE',
171
+ 'fr-CA-AntoineNeural': 'fr-CA',
172
+ 'fr-CA-JeanNeural': 'fr-CA',
173
+ 'fr-CA-SylvieNeural': 'fr-CA',
174
+ 'fr-CH-ArianeNeural': 'fr-CH',
175
+ 'fr-CH-FabriceNeural': 'fr-CH',
176
+ 'fr-FR-DeniseNeural': 'fr-FR',
177
+ 'fr-FR-EloiseNeural': 'fr-FR',
178
+ 'fr-FR-HenriNeural': 'fr-FR',
179
+ 'ga-IE-ColmNeural': 'ga-IE',
180
+ 'ga-IE-OrlaNeural': 'ga-IE',
181
+ 'gl-ES-RoiNeural': 'gl-ES',
182
+ 'gl-ES-SabelaNeural': 'gl-ES',
183
+ 'gu-IN-DhwaniNeural': 'gu-IN',
184
+ 'gu-IN-NiranjanNeural': 'gu-IN',
185
+ 'he-IL-AvriNeural': 'he-IL',
186
+ 'he-IL-HilaNeural': 'he-IL',
187
+ 'hi-IN-MadhurNeural': 'hi-IN',
188
+ 'hi-IN-SwaraNeural': 'hi-IN',
189
+ 'hr-HR-GabrijelaNeural': 'hr-HR',
190
+ 'hr-HR-SreckoNeural': 'hr-HR',
191
+ 'hu-HU-NoemiNeural': 'hu-HU',
192
+ 'hu-HU-TamasNeural': 'hu-HU',
193
+ 'id-ID-ArdiNeural': 'id-ID',
194
+ 'id-ID-GadisNeural': 'id-ID',
195
+ 'is-IS-GudrunNeural': 'is-IS',
196
+ 'is-IS-GunnarNeural': 'is-IS',
197
+ 'it-IT-DiegoNeural': 'it-IT',
198
+ 'it-IT-ElsaNeural': 'it-IT',
199
+ 'it-IT-IsabellaNeural': 'it-IT',
200
+ 'ja-JP-KeitaNeural': 'ja-JP',
201
+ 'ja-JP-NanamiNeural': 'ja-JP',
202
+ 'jv-ID-DimasNeural': 'jv-ID',
203
+ 'jv-ID-SitiNeural': 'jv-ID',
204
+ 'ka-GE-EkaNeural': 'ka-GE',
205
+ 'ka-GE-GiorgiNeural': 'ka-GE',
206
+ 'kk-KZ-AigulNeural': 'kk-KZ',
207
+ 'kk-KZ-DauletNeural': 'kk-KZ',
208
+ 'km-KH-PisethNeural': 'km-KH',
209
+ 'km-KH-SreymomNeural': 'km-KH',
210
+ 'kn-IN-GaganNeural': 'kn-IN',
211
+ 'kn-IN-SapnaNeural': 'kn-IN',
212
+ 'ko-KR-InJoonNeural': 'ko-KR',
213
+ 'ko-KR-SunHiNeural': 'ko-KR',
214
+ 'lo-LA-ChanthavongNeural': 'lo-LA',
215
+ 'lo-LA-KeomanyNeural': 'lo-LA',
216
+ 'lt-LT-LeonasNeural': 'lt-LT',
217
+ 'lt-LT-OnaNeural': 'lt-LT',
218
+ 'lv-LV-EveritaNeural': 'lv-LV',
219
+ 'lv-LV-NilsNeural': 'lv-LV',
220
+ 'mk-MK-AleksandarNeural': 'mk-MK',
221
+ 'mk-MK-MarijaNeural': 'mk-MK',
222
+ 'ml-IN-MidhunNeural': 'ml-IN',
223
+ 'ml-IN-SobhanaNeural': 'ml-IN',
224
+ 'mn-MN-BataaNeural': 'mn-MN',
225
+ 'mn-MN-YesuiNeural': 'mn-MN',
226
+ 'mr-IN-AarohiNeural': 'mr-IN',
227
+ 'mr-IN-ManoharNeural': 'mr-IN',
228
+ 'ms-MY-OsmanNeural': 'ms-MY',
229
+ 'ms-MY-YasminNeural': 'ms-MY',
230
+ 'mt-MT-GraceNeural': 'mt-MT',
231
+ 'mt-MT-JosephNeural': 'mt-MT',
232
+ 'my-MM-NilarNeural': 'my-MM',
233
+ 'my-MM-ThihaNeural': 'my-MM',
234
+ 'nb-NO-FinnNeural': 'nb-NO',
235
+ 'nb-NO-PernilleNeural': 'nb-NO',
236
+ 'ne-NP-HemkalaNeural': 'ne-NP',
237
+ 'ne-NP-SagarNeural': 'ne-NP',
238
+ 'nl-BE-ArnaudNeural': 'nl-BE',
239
+ 'nl-BE-DenaNeural': 'nl-BE',
240
+ 'nl-NL-ColetteNeural': 'nl-NL',
241
+ 'nl-NL-FennaNeural': 'nl-NL',
242
+ 'nl-NL-MaartenNeural': 'nl-NL',
243
+ 'pl-PL-MarekNeural': 'pl-PL',
244
+ 'pl-PL-ZofiaNeural': 'pl-PL',
245
+ 'ps-AF-GulNawazNeural': 'ps-AF',
246
+ 'ps-AF-LatifaNeural': 'ps-AF',
247
+ 'pt-BR-AntonioNeural': 'pt-BR',
248
+ 'pt-BR-FranciscaNeural': 'pt-BR',
249
+ 'pt-PT-DuarteNeural': 'pt-PT',
250
+ 'pt-PT-RaquelNeural': 'pt-PT',
251
+ 'ro-RO-AlinaNeural': 'ro-RO',
252
+ 'ro-RO-EmilNeural': 'ro-RO',
253
+ 'ru-RU-DmitryNeural': 'ru-RU',
254
+ 'ru-RU-SvetlanaNeural': 'ru-RU',
255
+ 'si-LK-SameeraNeural': 'si-LK',
256
+ 'si-LK-ThiliniNeural': 'si-LK',
257
+ 'sk-SK-LukasNeural': 'sk-SK',
258
+ 'sk-SK-ViktoriaNeural': 'sk-SK',
259
+ 'sl-SI-PetraNeural': 'sl-SI',
260
+ 'sl-SI-RokNeural': 'sl-SI',
261
+ 'so-SO-MuuseNeural': 'so-SO',
262
+ 'so-SO-UbaxNeural': 'so-SO',
263
+ 'sq-AL-AnilaNeural': 'sq-AL',
264
+ 'sq-AL-IlirNeural': 'sq-AL',
265
+ 'sr-RS-NicholasNeural': 'sr-RS',
266
+ 'sr-RS-SophieNeural': 'sr-RS',
267
+ 'su-ID-JajangNeural': 'su-ID',
268
+ 'su-ID-TutiNeural': 'su-ID',
269
+ 'sv-SE-MattiasNeural': 'sv-SE',
270
+ 'sv-SE-SofieNeural': 'sv-SE',
271
+ 'sw-KE-RafikiNeural': 'sw-KE',
272
+ 'sw-KE-ZuriNeural': 'sw-KE',
273
+ 'sw-TZ-DaudiNeural': 'sw-TZ',
274
+ 'sw-TZ-RehemaNeural': 'sw-TZ',
275
+ 'ta-IN-PallaviNeural': 'ta-IN',
276
+ 'ta-IN-ValluvarNeural': 'ta-IN',
277
+ 'ta-LK-KumarNeural': 'ta-LK',
278
+ 'ta-LK-SaranyaNeural': 'ta-LK',
279
+ 'ta-MY-KaniNeural': 'ta-MY',
280
+ 'ta-MY-SuryaNeural': 'ta-MY',
281
+ 'ta-SG-AnbuNeural': 'ta-SG',
282
+ 'ta-SG-VenbaNeural': 'ta-SG',
283
+ 'te-IN-MohanNeural': 'te-IN',
284
+ 'te-IN-ShrutiNeural': 'te-IN',
285
+ 'th-TH-NiwatNeural': 'th-TH',
286
+ 'th-TH-PremwadeeNeural': 'th-TH',
287
+ 'tr-TR-AhmetNeural': 'tr-TR',
288
+ 'tr-TR-EmelNeural': 'tr-TR',
289
+ 'uk-UA-OstapNeural': 'uk-UA',
290
+ 'uk-UA-PolinaNeural': 'uk-UA',
291
+ 'ur-IN-GulNeural': 'ur-IN',
292
+ 'ur-IN-SalmanNeural': 'ur-IN',
293
+ 'ur-PK-AsadNeural': 'ur-PK',
294
+ 'ur-PK-UzmaNeural': 'ur-PK',
295
+ 'uz-UZ-MadinaNeural': 'uz-UZ',
296
+ 'uz-UZ-SardorNeural': 'uz-UZ',
297
+ 'vi-VN-HoaiMyNeural': 'vi-VN',
298
+ 'vi-VN-NamMinhNeural': 'vi-VN',
299
+ 'zu-ZA-ThandoNeural': 'zu-ZA',
300
+ 'zu-ZA-ThembaNeural': 'zu-ZA',
301
+ }
302
+
303
+ SUPPORTED_LANGUAGES = [
304
+ "Auto",
305
+ *SUPPORTED_VOICES.keys()
306
+ ]