Spaces:
Runtime error
Runtime error
cocktailpeanut
commited on
Commit
·
23a7ae2
1
Parent(s):
aa247f2
update
Browse files
app.py
CHANGED
@@ -20,7 +20,7 @@ from torch import autocast
|
|
20 |
import cv2
|
21 |
import imageio
|
22 |
import devicetorch
|
23 |
-
|
24 |
|
25 |
sys.path.append("./stable_diffusion")
|
26 |
|
@@ -85,7 +85,7 @@ class CompVisDenoiser(K.external.CompVisDenoiser):
|
|
85 |
def forward(self, input_0, input_1, sigma, **kwargs):
|
86 |
c_out, c_in = [append_dims(x, input_0.ndim) for x in self.get_scalings(sigma)]
|
87 |
# eps_0, eps_1 = self.get_eps(input_0 * c_in, input_1 * c_in, self.sigma_to_t(sigma), **kwargs)
|
88 |
-
eps_0, eps_1 = self.get_eps(input_0 * c_in, self.sigma_to_t(sigma.cpu().float()).to(
|
89 |
|
90 |
return input_0 + eps_0 * c_out, eps_1
|
91 |
|
@@ -122,7 +122,6 @@ def sample_euler_ancestral(model, x_0, x_1, sigmas, height, width, extra_args=No
|
|
122 |
|
123 |
mask_list = []
|
124 |
image_list = []
|
125 |
-
model.to(device)
|
126 |
for i in trange(len(sigmas) - 1, disable=disable):
|
127 |
denoised_0, denoised_1 = model(x_0, x_1, sigmas[i] * s_in, **extra_args)
|
128 |
image_list.append(denoised_0)
|
@@ -153,7 +152,7 @@ args = parser.parse_args()
|
|
153 |
|
154 |
config = OmegaConf.load(args.config)
|
155 |
model = load_model_from_config(config, args.ckpt, args.vae_ckpt)
|
156 |
-
model.eval().to(
|
157 |
model_wrap = CompVisDenoiser(model)
|
158 |
model_wrap_cfg = CFGDenoiser(model_wrap)
|
159 |
null_token = model.get_learned_conditioning([""])
|
@@ -186,8 +185,8 @@ def generate(
|
|
186 |
if instruction == "":
|
187 |
return [input_image, seed]
|
188 |
|
189 |
-
model.to(
|
190 |
-
if
|
191 |
with torch.no_grad(), autocast("cuda"), model.ema_scope():
|
192 |
return run(model, input_image, input_image_copy, width, height, instruction, steps, seed)
|
193 |
else:
|
|
|
20 |
import cv2
|
21 |
import imageio
|
22 |
import devicetorch
|
23 |
+
DEVICE = devicetorch.get(torch)
|
24 |
|
25 |
sys.path.append("./stable_diffusion")
|
26 |
|
|
|
85 |
def forward(self, input_0, input_1, sigma, **kwargs):
|
86 |
c_out, c_in = [append_dims(x, input_0.ndim) for x in self.get_scalings(sigma)]
|
87 |
# eps_0, eps_1 = self.get_eps(input_0 * c_in, input_1 * c_in, self.sigma_to_t(sigma), **kwargs)
|
88 |
+
eps_0, eps_1 = self.get_eps(input_0 * c_in, self.sigma_to_t(sigma.cpu().float()).to(DEVICE), **kwargs)
|
89 |
|
90 |
return input_0 + eps_0 * c_out, eps_1
|
91 |
|
|
|
122 |
|
123 |
mask_list = []
|
124 |
image_list = []
|
|
|
125 |
for i in trange(len(sigmas) - 1, disable=disable):
|
126 |
denoised_0, denoised_1 = model(x_0, x_1, sigmas[i] * s_in, **extra_args)
|
127 |
image_list.append(denoised_0)
|
|
|
152 |
|
153 |
config = OmegaConf.load(args.config)
|
154 |
model = load_model_from_config(config, args.ckpt, args.vae_ckpt)
|
155 |
+
model.eval().to(DEVICE)
|
156 |
model_wrap = CompVisDenoiser(model)
|
157 |
model_wrap_cfg = CFGDenoiser(model_wrap)
|
158 |
null_token = model.get_learned_conditioning([""])
|
|
|
185 |
if instruction == "":
|
186 |
return [input_image, seed]
|
187 |
|
188 |
+
model.to(DEVICE)
|
189 |
+
if DEVICE == "cuda":
|
190 |
with torch.no_grad(), autocast("cuda"), model.ema_scope():
|
191 |
return run(model, input_image, input_image_copy, width, height, instruction, steps, seed)
|
192 |
else:
|