Spaces:
kadirnar
/
Runtime error

kadirnar commited on
Commit
ba59f3d
1 Parent(s): 8e4e52b

Update demo_gradio.py

Browse files
Files changed (1) hide show
  1. demo_gradio.py +3 -3
demo_gradio.py CHANGED
@@ -26,7 +26,7 @@ import glob
26
  import torch
27
  import cv2
28
  import argparse
29
-
30
  import DPT.util.io
31
 
32
  from torchvision.transforms import Compose
@@ -55,7 +55,7 @@ pipe = StableDiffusionXLControlNetInpaintPipeline.from_pretrained(
55
  add_watermarker=False,
56
  ).to(device)
57
  pipe.unet = register_cross_attention_hook(pipe.unet)
58
-
59
  ip_model = IPAdapterXL(pipe, image_encoder_path, ip_ckpt, device)
60
 
61
 
@@ -161,7 +161,7 @@ def greet(input_image, material_exemplar):
161
 
162
 
163
  num_samples = 1
164
- images = ip_model.generate(guidance_scale=2, pil_image=ip_image, image=init_img, control_image=depth_map, mask_image=mask, controlnet_conditioning_scale=0.9, num_samples=num_samples, num_inference_steps=6, seed=42)
165
 
166
  return images[0]
167
 
 
26
  import torch
27
  import cv2
28
  import argparse
29
+ from diffusers.models.attention_processor import AttnProcessor2_0
30
  import DPT.util.io
31
 
32
  from torchvision.transforms import Compose
 
55
  add_watermarker=False,
56
  ).to(device)
57
  pipe.unet = register_cross_attention_hook(pipe.unet)
58
+ pipe.unet.set_attn_processor(AttnProcessor2_0())
59
  ip_model = IPAdapterXL(pipe, image_encoder_path, ip_ckpt, device)
60
 
61
 
 
161
 
162
 
163
  num_samples = 1
164
+ images = ip_model.generate(guidance_scale=2, pil_image=ip_image, image=init_img, control_image=depth_map, mask_image=mask, controlnet_conditioning_scale=0.9, num_samples=num_samples, num_inference_steps=4, seed=42)
165
 
166
  return images[0]
167