dikdimon commited on
Commit
2fbbd91
·
verified ·
1 Parent(s): 236682d

Update webUI_ExtraSchedulers/scripts/gradient_estimation.py

Browse files
webUI_ExtraSchedulers/scripts/gradient_estimation.py CHANGED
@@ -1,71 +1,97 @@
1
- ## lifted from ReForge, original implementation from Comfy
2
- ## CFG++ attempt by me
3
-
4
- import torch
5
- from tqdm.auto import trange
6
-
7
-
8
- # copied from kdiffusion/sampling.py
9
- def to_d(x, sigma, denoised):
10
- """Converts a denoiser output to a Karras ODE derivative."""
11
- return (x - denoised) / append_dims(sigma, x.ndim)
12
- def append_dims(x, target_dims):
13
- """Appends dimensions to the end of a tensor until it has target_dims dimensions."""
14
- dims_to_append = target_dims - x.ndim
15
- if dims_to_append < 0:
16
- raise ValueError(f'input has {x.ndim} dims but target_dims is {target_dims}, which is less')
17
- return x[(...,) + (None,) * dims_to_append]
18
-
19
-
20
- @torch.no_grad()
21
- def sample_gradient_e(model, x, sigmas, extra_args=None, callback=None, disable=None, ge_gamma=2.):
22
- """Gradient-estimation sampler. Paper: https://openreview.net/pdf?id=o2ND9v0CeK"""
23
- extra_args = {} if extra_args is None else extra_args
24
- s_in = x.new_ones([x.shape[0]])
25
- old_d = None
26
-
27
- sigmas = sigmas.to(x.device)
28
-
29
- for i in trange(len(sigmas) - 1, disable=disable):
30
- denoised = model(x, sigmas[i] * s_in, **extra_args)
31
-
32
- d = to_d(x, sigmas[i], denoised)
33
- if callback is not None:
34
- callback({'x': x, 'i': i, 'sigma': sigmas[i], 'sigma_hat': sigmas[i], 'denoised': denoised})
35
- dt = sigmas[i + 1] - sigmas[i]
36
- if i == 0: # Euler method
37
- x = x + d * dt
38
- else:
39
- # Gradient estimation
40
- d_bar = ge_gamma * d + (1 - ge_gamma) * old_d
41
- x = x + d_bar * dt
42
- old_d = d
43
- return x
44
-
45
-
46
- @torch.no_grad()
47
- def sample_gradient_e_cfgpp(model, x, sigmas, extra_args=None, callback=None, disable=None, ge_gamma=2.):
48
- """Gradient-estimation sampler. Paper: https://openreview.net/pdf?id=o2ND9v0CeK"""
49
- extra_args = {} if extra_args is None else extra_args
50
- s_in = x.new_ones([x.shape[0]])
51
- old_d = None
52
-
53
- model.need_last_noise_uncond = True
54
- model.inner_model.inner_model.forge_objects.unet.model_options["disable_cfg1_optimization"] = True
55
-
56
- for i in trange(len(sigmas) - 1, disable=disable):
57
- denoised = model(x, sigmas[i] * s_in, **extra_args)
58
-
59
- d = model.last_noise_uncond
60
-
61
- if callback is not None:
62
- callback({'x': x, 'i': i, 'sigma': sigmas[i], 'sigma_hat': sigmas[i], 'denoised': denoised})
63
- dt = sigmas[i + 1] - sigmas[i]
64
- if i == 0: # Euler method
65
- x = denoised + d * sigmas[i+1]
66
- else:
67
- # Gradient estimation
68
- d_bar = ge_gamma * d + (1 - ge_gamma) * old_d
69
- x = denoised + d_bar * sigmas[i+1]
70
- old_d = d
71
- return x
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ## lifted from ReForge, original implementation from Comfy
2
+ ## CFG++ attempt by me
3
+
4
+ import torch
5
+ from tqdm.auto import trange
6
+
7
+ from k_diffusion.sampling import to_d
8
+
9
+
10
+ @torch.no_grad()
11
+ def sample_gradient_e(model, x, sigmas, extra_args=None, callback=None, disable=None, ge_gamma=2.):
12
+ """Gradient-estimation sampler. Paper: https://openreview.net/pdf?id=o2ND9v0CeK"""
13
+ extra_args = {} if extra_args is None else extra_args
14
+ s_in = x.new_ones([x.shape[0]])
15
+ old_d = None
16
+
17
+ sigmas = sigmas.to(x.device)
18
+
19
+ for i in trange(len(sigmas) - 1, disable=disable):
20
+ denoised = model(x, sigmas[i] * s_in, **extra_args)
21
+
22
+ d = to_d(x, sigmas[i], denoised)
23
+ if callback is not None:
24
+ callback({'x': x, 'i': i, 'sigma': sigmas[i], 'sigma_hat': sigmas[i], 'denoised': denoised})
25
+ dt = sigmas[i + 1] - sigmas[i]
26
+ if i == 0: # Euler method
27
+ x.addcmul_(d, dt)
28
+ else:
29
+ # Gradient estimation
30
+ d_bar = ge_gamma * (d - old_d) + old_d
31
+ x.addcmul_(d_bar, dt)
32
+ old_d = d
33
+ return x
34
+
35
+
36
+ @torch.no_grad()
37
+ def sample_gradient_e_cfgpp(model, x, sigmas, extra_args=None, callback=None, disable=None, ge_gamma=2.):
38
+ """Gradient-estimation sampler. Paper: https://openreview.net/pdf?id=o2ND9v0CeK"""
39
+ extra_args = {} if extra_args is None else extra_args
40
+ s_in = x.new_ones([x.shape[0]])
41
+ old_d = None
42
+
43
+ model.need_last_noise_uncond = True
44
+ model.inner_model.inner_model.forge_objects.unet.model_options["disable_cfg1_optimization"] = True
45
+
46
+ for i in trange(len(sigmas) - 1, disable=disable):
47
+ denoised = model(x, sigmas[i] * s_in, **extra_args)
48
+
49
+ d = model.last_noise_uncond
50
+
51
+ if callback is not None:
52
+ callback({'x': x, 'i': i, 'sigma': sigmas[i], 'sigma_hat': sigmas[i], 'denoised': denoised})
53
+
54
+ if i == 0: # Euler method
55
+ x = torch.addcmul(denoised, d, sigmas[i+1])
56
+ else:
57
+ # Gradient estimation
58
+ d_bar = ge_gamma * (d - old_d) + old_d
59
+ x = torch.addcmul(denoised, d_bar, sigmas[i+1])
60
+ old_d = d
61
+ return x
62
+
63
+
64
+ @torch.no_grad()
65
+ def sample_gradient_e_2s_cfgpp(model, x, sigmas, extra_args=None, callback=None, disable=None, eta=1., s_noise=1., ge_gamma=2.):
66
+ """Gradient-estimation sampler. Paper: https://openreview.net/pdf?id=o2ND9v0CeK"""
67
+ extra_args = {} if extra_args is None else extra_args
68
+ s_in = x.new_ones([x.shape[0]])
69
+ old_d = None
70
+
71
+ model.need_last_noise_uncond = True
72
+ model.inner_model.inner_model.forge_objects.unet.model_options["disable_cfg1_optimization"] = True
73
+
74
+ for i in trange(len(sigmas) - 1, disable=disable):
75
+ denoised = model(x, sigmas[i] * s_in, **extra_args)
76
+ sigma_mid = 0.5 * (sigmas[i] + sigmas[i+1])
77
+
78
+ d = model.last_noise_uncond
79
+
80
+ if callback is not None:
81
+ callback({'x': x, 'i': i, 'sigma': sigmas[i], 'sigma_hat': sigmas[i], 'denoised': denoised})
82
+
83
+ if i == 0: # Euler method
84
+ x = denoised + d * sigmas[i+1]
85
+ else:
86
+ # Gradient estimation
87
+ d_bar = ge_gamma * d + (1 - ge_gamma) * old_d
88
+ x_2 = denoised + d_bar * sigma_mid
89
+ old_d = d
90
+ denoised_2 = model(x_2, sigma_mid * s_in, **extra_args)
91
+ d = model.last_noise_uncond
92
+ d_bar = ge_gamma * d + (1 - ge_gamma) * old_d
93
+ x = denoised_2 + d * sigmas[i+1]
94
+
95
+ old_d = d
96
+
97
+ return x