hansyan commited on
Commit
94ef601
β€’
1 Parent(s): 3aaaaf0

Upload 7 files

Browse files
src/__pycache__/__init__.cpython-38.pyc ADDED
Binary file (147 Bytes). View file
 
src/__pycache__/scheduler_perflow.cpython-310.pyc ADDED
Binary file (12.7 kB). View file
 
src/__pycache__/scheduler_perflow.cpython-38.pyc ADDED
Binary file (12.1 kB). View file
 
src/__pycache__/utils_perflow.cpython-38.pyc ADDED
Binary file (2.64 kB). View file
 
src/pfode_solver.py ADDED
@@ -0,0 +1,283 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os, math, random, argparse, logging
2
+ from pathlib import Path
3
+ from typing import Optional, Union, List, Callable
4
+ from collections import OrderedDict
5
+ from packaging import version
6
+ from tqdm.auto import tqdm
7
+ from omegaconf import OmegaConf
8
+
9
+ import numpy as np
10
+ import torch
11
+ import torch.nn.functional as F
12
+ import torch.utils.checkpoint
13
+ import torchvision
14
+
15
+
16
+ class PFODESolver():
17
+ def __init__(self, scheduler, t_initial=1, t_terminal=0,) -> None:
18
+ self.t_initial = t_initial
19
+ self.t_terminal = t_terminal
20
+ self.scheduler = scheduler
21
+
22
+ train_step_terminal = 0
23
+ train_step_initial = train_step_terminal + self.scheduler.config.num_train_timesteps # 0+1000
24
+
25
+ self.stepsize = (t_terminal-t_initial) / (train_step_terminal - train_step_initial) #1/1000
26
+
27
+
28
+ def get_timesteps(self, t_start, t_end, num_steps):
29
+ # (b,) -> (b,1)
30
+ t_start = t_start[:, None]
31
+ t_end = t_end[:, None]
32
+ assert t_start.dim() == 2
33
+
34
+ timepoints = torch.arange(0, num_steps, 1).expand(t_start.shape[0], num_steps).to(device=t_start.device)
35
+ interval = (t_end - t_start) / (torch.ones([1], device=t_start.device) * num_steps)
36
+ timepoints = t_start + interval * timepoints
37
+
38
+ timesteps = (self.scheduler.num_train_timesteps - 1) + (timepoints - self.t_initial) / self.stepsize # correspondint to StableDiffusion indexing system, from 999 (t_init) -> 0 (dt)
39
+ return timesteps.round().long()
40
+ # return timesteps.floor().long()
41
+
42
+ def solve(self,
43
+ latents,
44
+ unet,
45
+ t_start,
46
+ t_end,
47
+ prompt_embeds,
48
+ negative_prompt_embeds,
49
+ guidance_scale=1.0,
50
+ num_steps = 2,
51
+ num_windows = 1,
52
+ ):
53
+ assert t_start.dim() == 1
54
+ assert guidance_scale >= 1 and torch.all(torch.gt(t_start, t_end))
55
+
56
+ do_classifier_free_guidance = True if guidance_scale > 1 else False
57
+ bsz = latents.shape[0]
58
+
59
+ if do_classifier_free_guidance:
60
+ prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds])
61
+
62
+ timestep_cond = None
63
+ if unet.config.time_cond_proj_dim is not None:
64
+ guidance_scale_tensor = torch.tensor(guidance_scale - 1).repeat(bsz)
65
+ timestep_cond = self.get_guidance_scale_embedding(
66
+ guidance_scale_tensor, embedding_dim=unet.config.time_cond_proj_dim
67
+ ).to(device=latents.device, dtype=latents.dtype)
68
+
69
+
70
+ timesteps = self.get_timesteps(t_start, t_end, num_steps).to(device=latents.device)
71
+ timestep_interval = self.scheduler.config.num_train_timesteps // (num_windows * num_steps)
72
+
73
+ # 7. Denoising loop
74
+ with torch.no_grad():
75
+ # for i in tqdm(range(num_steps)):
76
+ for i in range(num_steps):
77
+
78
+ t = torch.cat([timesteps[:, i]]*2) if do_classifier_free_guidance else timesteps[:, i]
79
+ # expand the latents if we are doing classifier free guidance
80
+ latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
81
+ latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
82
+
83
+ # predict the noise residual
84
+ noise_pred = unet(
85
+ latent_model_input,
86
+ t,
87
+ encoder_hidden_states=prompt_embeds,
88
+ timestep_cond=timestep_cond,
89
+ return_dict=False,
90
+ )[0]
91
+
92
+ if do_classifier_free_guidance:
93
+ noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
94
+ noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
95
+
96
+
97
+ # STEP: compute the previous noisy sample x_t -> x_t-1
98
+ # latents = self.scheduler.step(noise_pred, timesteps[:, i].cpu(), latents, return_dict=False)[0]
99
+
100
+ batch_timesteps = timesteps[:, i].cpu()
101
+ prev_timestep = batch_timesteps - timestep_interval
102
+ # prev_timestep = batch_timesteps - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
103
+
104
+ alpha_prod_t = self.scheduler.alphas_cumprod[batch_timesteps]
105
+ alpha_prod_t_prev = torch.zeros_like(alpha_prod_t)
106
+ for ib in range(prev_timestep.shape[0]):
107
+ alpha_prod_t_prev[ib] = self.scheduler.alphas_cumprod[prev_timestep[ib]] if prev_timestep[ib] >= 0 else self.scheduler.final_alpha_cumprod
108
+ beta_prod_t = 1 - alpha_prod_t
109
+
110
+ alpha_prod_t = alpha_prod_t.to(device=latents.device, dtype=latents.dtype)
111
+ alpha_prod_t_prev = alpha_prod_t_prev.to(device=latents.device, dtype=latents.dtype)
112
+ beta_prod_t = beta_prod_t.to(device=latents.device, dtype=latents.dtype)
113
+
114
+ # 3. compute predicted original sample from predicted noise also called
115
+ # "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
116
+ if self.scheduler.config.prediction_type == "epsilon":
117
+ pred_original_sample = (latents - beta_prod_t[:,None,None,None] ** (0.5) * noise_pred) / alpha_prod_t[:, None,None,None] ** (0.5)
118
+ pred_epsilon = noise_pred
119
+ # elif self.scheduler.config.prediction_type == "sample":
120
+ # pred_original_sample = noise_pred
121
+ # pred_epsilon = (latents - alpha_prod_t ** (0.5) * pred_original_sample) / beta_prod_t ** (0.5)
122
+ elif self.scheduler.config.prediction_type == "v_prediction":
123
+ pred_original_sample = (alpha_prod_t[:,None,None,None]**0.5) * latents - (beta_prod_t[:,None,None,None]**0.5) * noise_pred
124
+ pred_epsilon = (alpha_prod_t[:,None,None,None]**0.5) * noise_pred + (beta_prod_t[:,None,None,None]**0.5) * latents
125
+ else:
126
+ raise ValueError(
127
+ f"prediction_type given as {self.scheduler.config.prediction_type} must be one of `epsilon`, `sample`, or"
128
+ " `v_prediction`"
129
+ )
130
+
131
+ pred_sample_direction = (1 - alpha_prod_t_prev[:,None,None,None]) ** (0.5) * pred_epsilon
132
+ latents = alpha_prod_t_prev[:,None,None,None] ** (0.5) * pred_original_sample + pred_sample_direction
133
+
134
+
135
+ return latents
136
+
137
+
138
+
139
+
140
+
141
+
142
+
143
+
144
+ class PFODESolverSDXL():
145
+ def __init__(self, scheduler, t_initial=1, t_terminal=0,) -> None:
146
+ self.t_initial = t_initial
147
+ self.t_terminal = t_terminal
148
+ self.scheduler = scheduler
149
+
150
+ train_step_terminal = 0
151
+ train_step_initial = train_step_terminal + self.scheduler.config.num_train_timesteps # 0+1000
152
+
153
+ self.stepsize = (t_terminal-t_initial) / (train_step_terminal - train_step_initial) #1/1000
154
+
155
+ def get_timesteps(self, t_start, t_end, num_steps):
156
+ # (b,) -> (b,1)
157
+ t_start = t_start[:, None]
158
+ t_end = t_end[:, None]
159
+ assert t_start.dim() == 2
160
+
161
+ timepoints = torch.arange(0, num_steps, 1).expand(t_start.shape[0], num_steps).to(device=t_start.device)
162
+ interval = (t_end - t_start) / (torch.ones([1], device=t_start.device) * num_steps)
163
+ timepoints = t_start + interval * timepoints
164
+
165
+ timesteps = (self.scheduler.num_train_timesteps - 1) + (timepoints - self.t_initial) / self.stepsize # correspondint to StableDiffusion indexing system, from 999 (t_init) -> 0 (dt)
166
+ return timesteps.round().long()
167
+ # return timesteps.floor().long()
168
+
169
+ def _get_add_time_ids(self, original_size, crops_coords_top_left, target_size, dtype):
170
+ # Adapted from pipeline.StableDiffusionXLPipeline._get_add_time_ids
171
+ add_time_ids = list(original_size + crops_coords_top_left + target_size)
172
+ add_time_ids = torch.tensor([add_time_ids], dtype=dtype)
173
+ return add_time_ids
174
+
175
+ def solve(self,
176
+ latents,
177
+ unet,
178
+ t_start,
179
+ t_end,
180
+ prompt_embeds,
181
+ pooled_prompt_embeds,
182
+ negative_prompt_embeds,
183
+ negative_pooled_prompt_embeds,
184
+ guidance_scale=1.0,
185
+ num_steps = 10,
186
+ num_windows = 4,
187
+ resolution = 1024,
188
+ ):
189
+ assert t_start.dim() == 1
190
+ assert guidance_scale >= 1 and torch.all(torch.gt(t_start, t_end))
191
+ dtype = latents.dtype
192
+ device = latents.device
193
+ bsz = latents.shape[0]
194
+ do_classifier_free_guidance = True if guidance_scale > 1 else False
195
+
196
+ add_text_embeds = pooled_prompt_embeds
197
+ add_time_ids = torch.cat(
198
+ # [self._get_add_time_ids((1024, 1024), (0, 0), (1024, 1024), dtype) for _ in range(bsz)]
199
+ [self._get_add_time_ids((resolution, resolution), (0, 0), (resolution, resolution), dtype) for _ in range(bsz)]
200
+ ).to(device)
201
+ negative_add_time_ids = add_time_ids
202
+
203
+ if do_classifier_free_guidance:
204
+ # prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds])
205
+ prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0)
206
+ add_text_embeds = torch.cat([negative_pooled_prompt_embeds, add_text_embeds], dim=0)
207
+ add_time_ids = torch.cat([negative_add_time_ids, add_time_ids], dim=0)
208
+
209
+ timestep_cond = None
210
+ if unet.config.time_cond_proj_dim is not None:
211
+ guidance_scale_tensor = torch.tensor(guidance_scale - 1).repeat(bsz)
212
+ timestep_cond = self.get_guidance_scale_embedding(
213
+ guidance_scale_tensor, embedding_dim=unet.config.time_cond_proj_dim
214
+ ).to(device=latents.device, dtype=latents.dtype)
215
+
216
+
217
+ timesteps = self.get_timesteps(t_start, t_end, num_steps).to(device=latents.device)
218
+ timestep_interval = self.scheduler.config.num_train_timesteps // (num_windows * num_steps)
219
+
220
+ # 7. Denoising loop
221
+ with torch.no_grad():
222
+ # for i in tqdm(range(num_steps)):
223
+ for i in range(num_steps):
224
+ # expand the latents if we are doing classifier free guidance
225
+ t = torch.cat([timesteps[:, i]]*2) if do_classifier_free_guidance else timesteps[:, i]
226
+ latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
227
+ latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
228
+
229
+ # predict the noise residual
230
+ added_cond_kwargs = {"text_embeds": add_text_embeds, "time_ids": add_time_ids}
231
+ noise_pred = unet(
232
+ latent_model_input,
233
+ t,
234
+ encoder_hidden_states=prompt_embeds,
235
+ timestep_cond=timestep_cond,
236
+ added_cond_kwargs=added_cond_kwargs,
237
+ return_dict=False,
238
+ )[0]
239
+
240
+ if do_classifier_free_guidance:
241
+ noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
242
+ noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
243
+
244
+
245
+ # STEP: compute the previous noisy sample x_t -> x_t-1
246
+ # latents = self.scheduler.step(noise_pred, timesteps[:, i].cpu(), latents, return_dict=False)[0]
247
+
248
+ batch_timesteps = timesteps[:, i].cpu()
249
+ prev_timestep = batch_timesteps - timestep_interval
250
+ # prev_timestep = batch_timesteps - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
251
+
252
+ alpha_prod_t = self.scheduler.alphas_cumprod[batch_timesteps]
253
+ alpha_prod_t_prev = torch.zeros_like(alpha_prod_t)
254
+ for ib in range(prev_timestep.shape[0]):
255
+ alpha_prod_t_prev[ib] = self.scheduler.alphas_cumprod[prev_timestep[ib]] if prev_timestep[ib] >= 0 else self.scheduler.final_alpha_cumprod
256
+ beta_prod_t = 1 - alpha_prod_t
257
+
258
+ alpha_prod_t = alpha_prod_t.to(device=latents.device, dtype=latents.dtype)
259
+ alpha_prod_t_prev = alpha_prod_t_prev.to(device=latents.device, dtype=latents.dtype)
260
+ beta_prod_t = beta_prod_t.to(device=latents.device, dtype=latents.dtype)
261
+
262
+ # 3. compute predicted original sample from predicted noise also called
263
+ # "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
264
+ if self.scheduler.config.prediction_type == "epsilon":
265
+ pred_original_sample = (latents - beta_prod_t[:,None,None,None] ** (0.5) * noise_pred) / alpha_prod_t[:, None,None,None] ** (0.5)
266
+ pred_epsilon = noise_pred
267
+ # elif self.scheduler.config.prediction_type == "sample":
268
+ # pred_original_sample = noise_pred
269
+ # pred_epsilon = (latents - alpha_prod_t ** (0.5) * pred_original_sample) / beta_prod_t ** (0.5)
270
+ # elif self.scheduler.config.prediction_type == "v_prediction":
271
+ # pred_original_sample = (alpha_prod_t**0.5) * latents - (beta_prod_t**0.5) * noise_pred
272
+ # pred_epsilon = (alpha_prod_t**0.5) * noise_pred + (beta_prod_t**0.5) * latents
273
+ else:
274
+ raise ValueError(
275
+ f"prediction_type given as {self.scheduler.config.prediction_type} must be one of `epsilon`, `sample`, or"
276
+ " `v_prediction`"
277
+ )
278
+
279
+ pred_sample_direction = (1 - alpha_prod_t_prev[:,None,None,None]) ** (0.5) * pred_epsilon
280
+ latents = alpha_prod_t_prev[:,None,None,None] ** (0.5) * pred_original_sample + pred_sample_direction
281
+
282
+
283
+ return latents
src/scheduler_perflow.py ADDED
@@ -0,0 +1,377 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023 Stanford University Team and The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ # DISCLAIMER: This code is strongly influenced by https://github.com/pesser/pytorch_diffusion
16
+ # and https://github.com/hojonathanho/diffusion
17
+
18
+ import math
19
+ from dataclasses import dataclass
20
+ from typing import List, Optional, Tuple, Union
21
+ import numpy as np
22
+ import torch
23
+ from diffusers.configuration_utils import ConfigMixin, register_to_config
24
+ from diffusers.utils import BaseOutput
25
+ from diffusers.utils.torch_utils import randn_tensor
26
+ from diffusers.schedulers.scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin
27
+
28
+
29
+ class Time_Windows():
30
+ def __init__(self, t_initial=1, t_terminal=0, num_windows=4, precision=1./1000) -> None:
31
+ assert t_terminal < t_initial
32
+ time_windows = [ 1.*i/num_windows for i in range(1, num_windows+1)][::-1]
33
+
34
+ self.window_starts = time_windows # [1.0, 0.75, 0.5, 0.25]
35
+ self.window_ends = time_windows[1:] + [t_terminal] # [0.75, 0.5, 0.25, 0]
36
+ self.precision = precision
37
+
38
+ def get_window(self, tp):
39
+ idx = 0
40
+ # robust to numerical error; e.g, (0.6+1/10000) belongs to [0.6, 0.3)
41
+ while (tp-0.1*self.precision) <= self.window_ends[idx]:
42
+ idx += 1
43
+ return self.window_starts[idx], self.window_ends[idx]
44
+
45
+ def lookup_window(self, timepoint):
46
+ if timepoint.dim() == 0:
47
+ t_start, t_end = self.get_window(timepoint)
48
+ t_start = torch.ones_like(timepoint) * t_start
49
+ t_end = torch.ones_like(timepoint) * t_end
50
+ else:
51
+ t_start = torch.zeros_like(timepoint)
52
+ t_end = torch.zeros_like(timepoint)
53
+ bsz = timepoint.shape[0]
54
+ for i in range(bsz):
55
+ tp = timepoint[i]
56
+ ts, te = self.get_window(tp)
57
+ t_start[i] = ts
58
+ t_end[i] = te
59
+ return t_start, t_end
60
+
61
+
62
+ @dataclass
63
+ class PeRFlowSchedulerOutput(BaseOutput):
64
+ """
65
+ Output class for the scheduler's `step` function output.
66
+
67
+ Args:
68
+ prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
69
+ Computed sample `(x_{t-1})` of previous timestep. `prev_sample` should be used as next model input in the
70
+ denoising loop.
71
+ pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
72
+ The predicted denoised sample `(x_{0})` based on the model output from the current timestep.
73
+ `pred_original_sample` can be used to preview progress or for guidance.
74
+ """
75
+
76
+ prev_sample: torch.FloatTensor
77
+ pred_original_sample: Optional[torch.FloatTensor] = None
78
+
79
+
80
+ # Copied from diffusers.schedulers.scheduling_ddpm.betas_for_alpha_bar
81
+ def betas_for_alpha_bar(
82
+ num_diffusion_timesteps,
83
+ max_beta=0.999,
84
+ alpha_transform_type="cosine",
85
+ ):
86
+ """
87
+ Create a beta schedule that discretizes the given alpha_t_bar function, which defines the cumulative product of
88
+ (1-beta) over time from t = [0,1].
89
+
90
+ Contains a function alpha_bar that takes an argument t and transforms it to the cumulative product of (1-beta) up
91
+ to that part of the diffusion process.
92
+
93
+
94
+ Args:
95
+ num_diffusion_timesteps (`int`): the number of betas to produce.
96
+ max_beta (`float`): the maximum beta to use; use values lower than 1 to
97
+ prevent singularities.
98
+ alpha_transform_type (`str`, *optional*, default to `cosine`): the type of noise schedule for alpha_bar.
99
+ Choose from `cosine` or `exp`
100
+
101
+ Returns:
102
+ betas (`np.ndarray`): the betas used by the scheduler to step the model outputs
103
+ """
104
+ if alpha_transform_type == "cosine":
105
+
106
+ def alpha_bar_fn(t):
107
+ return math.cos((t + 0.008) / 1.008 * math.pi / 2) ** 2
108
+
109
+ elif alpha_transform_type == "exp":
110
+
111
+ def alpha_bar_fn(t):
112
+ return math.exp(t * -12.0)
113
+
114
+ else:
115
+ raise ValueError(f"Unsupported alpha_tranform_type: {alpha_transform_type}")
116
+
117
+ betas = []
118
+ for i in range(num_diffusion_timesteps):
119
+ t1 = i / num_diffusion_timesteps
120
+ t2 = (i + 1) / num_diffusion_timesteps
121
+ betas.append(min(1 - alpha_bar_fn(t2) / alpha_bar_fn(t1), max_beta))
122
+ return torch.tensor(betas, dtype=torch.float32)
123
+
124
+
125
+
126
+ class PeRFlowScheduler(SchedulerMixin, ConfigMixin):
127
+ """
128
+ `ReFlowScheduler` extends the denoising procedure introduced in denoising diffusion probabilistic models (DDPMs) with
129
+ non-Markovian guidance.
130
+
131
+ This model inherits from [`SchedulerMixin`] and [`ConfigMixin`]. Check the superclass documentation for the generic
132
+ methods the library implements for all schedulers such as loading and saving.
133
+
134
+ Args:
135
+ num_train_timesteps (`int`, defaults to 1000):
136
+ The number of diffusion steps to train the model.
137
+ beta_start (`float`, defaults to 0.0001):
138
+ The starting `beta` value of inference.
139
+ beta_end (`float`, defaults to 0.02):
140
+ The final `beta` value.
141
+ beta_schedule (`str`, defaults to `"linear"`):
142
+ The beta schedule, a mapping from a beta range to a sequence of betas for stepping the model. Choose from
143
+ `linear`, `scaled_linear`, or `squaredcos_cap_v2`.
144
+ trained_betas (`np.ndarray`, *optional*):
145
+ Pass an array of betas directly to the constructor to bypass `beta_start` and `beta_end`.
146
+ set_alpha_to_one (`bool`, defaults to `True`):
147
+ Each diffusion step uses the alphas product value at that step and at the previous one. For the final step
148
+ there is no previous alpha. When this option is `True` the previous alpha product is fixed to `1`,
149
+ otherwise it uses the alpha value at step 0.
150
+ prediction_type (`str`, defaults to `epsilon`, *optional*)
151
+ """
152
+
153
+ _compatibles = [e.name for e in KarrasDiffusionSchedulers]
154
+ order = 1
155
+
156
+ @register_to_config
157
+ def __init__(
158
+ self,
159
+ num_train_timesteps: int = 1000,
160
+ beta_start: float = 0.00085,
161
+ beta_end: float = 0.012,
162
+ beta_schedule: str = "scaled_linear",
163
+ trained_betas: Optional[Union[np.ndarray, List[float]]] = None,
164
+ set_alpha_to_one: bool = False,
165
+ prediction_type: str = "ddim_eps",
166
+ t_noise: float = 1,
167
+ t_clean: float = 0,
168
+ num_time_windows = 4,
169
+ ):
170
+ if trained_betas is not None:
171
+ self.betas = torch.tensor(trained_betas, dtype=torch.float32)
172
+ elif beta_schedule == "linear":
173
+ self.betas = torch.linspace(beta_start, beta_end, num_train_timesteps, dtype=torch.float32)
174
+ elif beta_schedule == "scaled_linear":
175
+ # this schedule is very specific to the latent diffusion model.
176
+ self.betas = torch.linspace(beta_start**0.5, beta_end**0.5, num_train_timesteps, dtype=torch.float32) ** 2
177
+ elif beta_schedule == "squaredcos_cap_v2":
178
+ # Glide cosine schedule
179
+ self.betas = betas_for_alpha_bar(num_train_timesteps)
180
+ else:
181
+ raise NotImplementedError(f"{beta_schedule} does is not implemented for {self.__class__}")
182
+
183
+ self.alphas = 1.0 - self.betas
184
+ self.alphas_cumprod = torch.cumprod(self.alphas, dim=0)
185
+
186
+ # At every step in ddim, we are looking into the previous alphas_cumprod
187
+ # For the final step, there is no previous alphas_cumprod because we are already at 0
188
+ # `set_alpha_to_one` decides whether we set this parameter simply to one or
189
+ # whether we use the final alpha of the "non-previous" one.
190
+ self.final_alpha_cumprod = torch.tensor(1.0) if set_alpha_to_one else self.alphas_cumprod[0]
191
+
192
+ # # standard deviation of the initial noise distribution
193
+ self.init_noise_sigma = 1.0
194
+
195
+ self.time_windows = Time_Windows(t_initial=t_noise, t_terminal=t_clean,
196
+ num_windows=num_time_windows,
197
+ precision=1./num_train_timesteps)
198
+
199
+ assert prediction_type in ["ddim_eps", "diff_eps", "velocity"]
200
+
201
+
202
+ def scale_model_input(self, sample: torch.FloatTensor, timestep: Optional[int] = None) -> torch.FloatTensor:
203
+ """
204
+ Ensures interchangeability with schedulers that need to scale the denoising model input depending on the
205
+ current timestep.
206
+
207
+ Args:
208
+ sample (`torch.FloatTensor`):
209
+ The input sample.
210
+ timestep (`int`, *optional*):
211
+ The current timestep in the diffusion chain.
212
+
213
+ Returns:
214
+ `torch.FloatTensor`:
215
+ A scaled input sample.
216
+ """
217
+ return sample
218
+
219
+
220
+ def set_timesteps(self, num_inference_steps: int, device: Union[str, torch.device] = None):
221
+ """
222
+ Sets the discrete timesteps used for the diffusion chain (to be run before inference).
223
+
224
+ Args:
225
+ num_inference_steps (`int`):
226
+ The number of diffusion steps used when generating samples with a pre-trained model.
227
+ """
228
+ if num_inference_steps < self.config.num_time_windows:
229
+ num_inference_steps = self.config.num_time_windows
230
+ print(f"### We recommend a num_inference_steps not less than num_time_windows. It's set as {self.config.num_time_windows}.")
231
+
232
+ timesteps = []
233
+ for i in range(self.config.num_time_windows):
234
+ if i < num_inference_steps%self.config.num_time_windows:
235
+ num_steps_cur_win = num_inference_steps//self.config.num_time_windows+1
236
+ else:
237
+ num_steps_cur_win = num_inference_steps//self.config.num_time_windows
238
+
239
+ t_s = self.time_windows.window_starts[i]
240
+ t_e = self.time_windows.window_ends[i]
241
+ timesteps_cur_win = np.linspace(t_s, t_e, num=num_steps_cur_win, endpoint=False)
242
+ timesteps.append(timesteps_cur_win)
243
+
244
+ timesteps = np.concatenate(timesteps)
245
+
246
+ self.timesteps = torch.from_numpy(
247
+ (timesteps*self.config.num_train_timesteps).astype(np.int64)
248
+ ).to(device)
249
+
250
+ def get_window_alpha(self, timepoints):
251
+ time_windows = self.time_windows
252
+ num_train_timesteps = self.config.num_train_timesteps
253
+
254
+ t_win_start, t_win_end = time_windows.lookup_window(timepoints)
255
+ t_win_len = t_win_end - t_win_start
256
+ t_interval = timepoints - t_win_start # NOTE: negative value
257
+
258
+ idx_start = (t_win_start*num_train_timesteps - 1 ).long()
259
+ alphas_cumprod_start = self.alphas_cumprod[idx_start]
260
+
261
+ idx_end = torch.clamp( (t_win_end*num_train_timesteps - 1 ).long(), min=0) #FIXME:
262
+ alphas_cumprod_end = self.alphas_cumprod[idx_end]
263
+
264
+ alpha_cumprod_s_e = alphas_cumprod_start / alphas_cumprod_end
265
+ gamma_s_e = alpha_cumprod_s_e ** 0.5
266
+
267
+ return t_win_start, t_win_end, t_win_len, t_interval, gamma_s_e, alphas_cumprod_start, alphas_cumprod_end
268
+
269
+ def step(
270
+ self,
271
+ model_output: torch.FloatTensor,
272
+ timestep: int,
273
+ sample: torch.FloatTensor,
274
+ return_dict: bool = True,
275
+ ) -> Union[PeRFlowSchedulerOutput, Tuple]:
276
+ """
277
+ Predict the sample from the previous timestep by reversing the SDE. This function propagates the diffusion
278
+ process from the learned model outputs (most often the predicted noise).
279
+
280
+ Args:
281
+ model_output (`torch.FloatTensor`):
282
+ The direct output from learned diffusion model.
283
+ timestep (`float`):
284
+ The current discrete timestep in the diffusion chain.
285
+ sample (`torch.FloatTensor`):
286
+ A current instance of a sample created by the diffusion process.
287
+ return_dict (`bool`, *optional*, defaults to `True`):
288
+ Whether or not to return a [`~schedulers.scheduling_ddim.PeRFlowSchedulerOutput`] or `tuple`.
289
+
290
+ Returns:
291
+ [`~schedulers.scheduling_utils.PeRFlowSchedulerOutput`] or `tuple`:
292
+ If return_dict is `True`, [`~schedulers.scheduling_ddim.PeRFlowSchedulerOutput`] is returned, otherwise a
293
+ tuple is returned where the first element is the sample tensor.
294
+ """
295
+
296
+ if self.config.prediction_type == "ddim_eps":
297
+ pred_epsilon = model_output
298
+ t_c = timestep / self.config.num_train_timesteps
299
+ t_s, t_e, _, c_to_s, _, alphas_cumprod_start, alphas_cumprod_end = self.get_window_alpha(t_c)
300
+
301
+ lambda_s = (alphas_cumprod_end / alphas_cumprod_start)**0.5
302
+ eta_s = (1-alphas_cumprod_end)**0.5 - ( alphas_cumprod_end / alphas_cumprod_start * (1-alphas_cumprod_start) )**0.5
303
+
304
+ lambda_t = ( lambda_s * (t_e - t_s) ) / ( lambda_s *(t_c - t_s) + (t_e - t_c) )
305
+ eta_t = ( eta_s * (t_e - t_c) ) / ( lambda_s *(t_c - t_s) + (t_e - t_c) )
306
+
307
+ pred_win_end = lambda_t * sample + eta_t * pred_epsilon
308
+ pred_velocity = (pred_win_end - sample) / (t_e - (t_s + c_to_s))
309
+
310
+ # elif self.config.prediction_type == "diff_eps":
311
+ # pred_epsilon = model_output
312
+ # t_c = timestep / self.config.num_train_timesteps
313
+ # t_s, t_e, win_len, c_to_s, gamma_s_e, _, _ = self.get_window_alpha(t_c)
314
+ # pred_sample_end = ( sample - (1-c_to_s/win_len) * ((1-gamma_s_e**2)**0.5) * pred_epsilon ) \
315
+ # / ( gamma_s_e + c_to_s / win_len * (1-gamma_s_e) )
316
+ # pred_velocity = (pred_sample_end - sample) / (t_e - (t_s + c_to_s))
317
+
318
+ elif self.config.prediction_type == "diff_eps":
319
+ pred_epsilon = model_output
320
+ t_c = timestep / self.config.num_train_timesteps
321
+ t_s, t_e, _, c_to_s, gamma_s_e, _, _ = self.get_window_alpha(t_c)
322
+
323
+ lambda_s = 1 / gamma_s_e
324
+ eta_s = -1 * ( 1- gamma_s_e**2)**0.5 / gamma_s_e
325
+
326
+ lambda_t = ( lambda_s * (t_e - t_s) ) / ( lambda_s *(t_c - t_s) + (t_e - t_c) )
327
+ eta_t = ( eta_s * (t_e - t_c) ) / ( lambda_s *(t_c - t_s) + (t_e - t_c) )
328
+
329
+ pred_win_end = lambda_t * sample + eta_t * pred_epsilon
330
+ pred_velocity = (pred_win_end - sample) / (t_e - (t_s + c_to_s))
331
+
332
+ elif self.config.prediction_type == "velocity":
333
+ pred_velocity = model_output
334
+ else:
335
+ raise ValueError(
336
+ f"prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `velocity`."
337
+ )
338
+
339
+ # get dt
340
+ idx = torch.argwhere(torch.where(self.timesteps==timestep, 1,0))
341
+ prev_step = self.timesteps[idx+1] if (idx+1)<len(self.timesteps) else 0
342
+ dt = (prev_step - timestep) / self.config.num_train_timesteps
343
+ dt = dt.to(sample.device, sample.dtype)
344
+
345
+ prev_sample = sample + dt * pred_velocity
346
+
347
+ if not return_dict:
348
+ return (prev_sample,)
349
+ return PeRFlowSchedulerOutput(prev_sample=prev_sample, pred_original_sample=None)
350
+
351
+
352
+ # Copied from diffusers.schedulers.scheduling_ddpm.DDPMScheduler.add_noise
353
+ def add_noise(
354
+ self,
355
+ original_samples: torch.FloatTensor,
356
+ noise: torch.FloatTensor,
357
+ timesteps: torch.IntTensor,
358
+ ) -> torch.FloatTensor:
359
+ # Make sure alphas_cumprod and timestep have same device and dtype as original_samples
360
+ alphas_cumprod = self.alphas_cumprod.to(device=original_samples.device, dtype=original_samples.dtype)
361
+ timesteps = timesteps.to(original_samples.device) - 1 # indexing from 0
362
+
363
+ sqrt_alpha_prod = alphas_cumprod[timesteps] ** 0.5
364
+ sqrt_alpha_prod = sqrt_alpha_prod.flatten()
365
+ while len(sqrt_alpha_prod.shape) < len(original_samples.shape):
366
+ sqrt_alpha_prod = sqrt_alpha_prod.unsqueeze(-1)
367
+
368
+ sqrt_one_minus_alpha_prod = (1 - alphas_cumprod[timesteps]) ** 0.5
369
+ sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.flatten()
370
+ while len(sqrt_one_minus_alpha_prod.shape) < len(original_samples.shape):
371
+ sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.unsqueeze(-1)
372
+
373
+ noisy_samples = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
374
+ return noisy_samples
375
+
376
+ def __len__(self):
377
+ return self.config.num_train_timesteps
src/utils_perflow.py ADDED
@@ -0,0 +1,77 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from collections import OrderedDict
3
+ import torch
4
+ from safetensors import safe_open
5
+ from safetensors.torch import save_file
6
+ from diffusers.pipelines.stable_diffusion import StableDiffusionPipeline
7
+ from diffusers.pipelines.stable_diffusion.convert_from_ckpt import convert_ldm_unet_checkpoint, convert_ldm_vae_checkpoint, convert_ldm_clip_checkpoint
8
+
9
+
10
+ def merge_delta_weights_into_unet(pipe, delta_weights):
11
+ unet_weights = pipe.unet.state_dict()
12
+ assert unet_weights.keys() == delta_weights.keys()
13
+ for key in delta_weights.keys():
14
+ dtype = unet_weights[key].dtype
15
+ unet_weights[key] = unet_weights[key].to(dtype=delta_weights[key].dtype) + delta_weights[key].to(device=unet_weights[key].device)
16
+ unet_weights[key] = unet_weights[key].to(dtype)
17
+ pipe.unet.load_state_dict(unet_weights, strict=True)
18
+ return pipe
19
+
20
+
21
+ def load_delta_weights_into_unet(
22
+ pipe,
23
+ model_path = "hsyan/piecewise-rectified-flow-v0-1",
24
+ base_path = "runwayml/stable-diffusion-v1-5",
25
+ ):
26
+ ## load delta_weights
27
+ if os.path.exists(os.path.join(model_path, "delta_weights.safetensors")):
28
+ print("### delta_weights exists, loading...")
29
+ delta_weights = OrderedDict()
30
+ with safe_open(os.path.join(model_path, "delta_weights.safetensors"), framework="pt", device="cpu") as f:
31
+ for key in f.keys():
32
+ delta_weights[key] = f.get_tensor(key)
33
+
34
+ elif os.path.exists(os.path.join(model_path, "diffusion_pytorch_model.safetensors")):
35
+ print("### merged_weights exists, loading...")
36
+ merged_weights = OrderedDict()
37
+ with safe_open(os.path.join(model_path, "diffusion_pytorch_model.safetensors"), framework="pt", device="cpu") as f:
38
+ for key in f.keys():
39
+ merged_weights[key] = f.get_tensor(key)
40
+
41
+ base_weights = StableDiffusionPipeline.from_pretrained(
42
+ base_path, torch_dtype=torch.float16, safety_checker=None).unet.state_dict()
43
+ assert base_weights.keys() == merged_weights.keys()
44
+
45
+ delta_weights = OrderedDict()
46
+ for key in merged_weights.keys():
47
+ delta_weights[key] = merged_weights[key] - base_weights[key].to(device=merged_weights[key].device, dtype=merged_weights[key].dtype)
48
+
49
+ print("### saving delta_weights...")
50
+ save_file(delta_weights, os.path.join(model_path, "delta_weights.safetensors"))
51
+
52
+ else:
53
+ raise ValueError(f"{model_path} does not contain delta weights or merged weights")
54
+
55
+ ## merge delta_weights to the target pipeline
56
+ pipe = merge_delta_weights_into_unet(pipe, delta_weights)
57
+ return pipe
58
+
59
+
60
+
61
+
62
+ def load_dreambooth_into_pipeline(pipe, sd_dreambooth):
63
+ assert sd_dreambooth.endswith(".safetensors")
64
+ state_dict = {}
65
+ with safe_open(sd_dreambooth, framework="pt", device="cpu") as f:
66
+ for key in f.keys():
67
+ state_dict[key] = f.get_tensor(key)
68
+
69
+ unet_config = {} # unet, line 449 in convert_ldm_unet_checkpoint
70
+ for key in pipe.unet.config.keys():
71
+ if key != 'num_class_embeds':
72
+ unet_config[key] = pipe.unet.config[key]
73
+
74
+ pipe.unet.load_state_dict(convert_ldm_unet_checkpoint(state_dict, unet_config), strict=False)
75
+ pipe.vae.load_state_dict(convert_ldm_vae_checkpoint(state_dict, pipe.vae.config))
76
+ pipe.text_encoder = convert_ldm_clip_checkpoint(state_dict, text_encoder=pipe.text_encoder)
77
+ return pipe