mokady commited on
Commit
73e4bf0
1 Parent(s): 24d8b2a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +16 -7
app.py CHANGED
@@ -1,3 +1,4 @@
 
1
  from diffusers import ControlNetModel, StableDiffusionXLControlNetPipeline, AutoencoderKL, EulerAncestralDiscreteScheduler
2
  from diffusers.utils import load_image
3
  from PIL import Image
@@ -28,7 +29,7 @@ pipe.scheduler = EulerAncestralDiscreteScheduler(
28
  steps_offset=1
29
  )
30
  # pipe.enable_freeu(b1=1.1, b2=1.1, s1=0.5, s2=0.7)
31
- pipe.enable_xformers_memory_efficient_attention()
32
  pipe.force_zeros_for_empty_prompt = False
33
 
34
  from transformers import DPTFeatureExtractor, DPTForDepthEstimation
@@ -65,21 +66,29 @@ def get_depth_map(image):
65
  return image
66
 
67
 
 
 
 
 
 
 
 
 
 
 
68
  def process(input_image, prompt, negative_prompt, num_steps, controlnet_conditioning_scale, seed):
69
- generator = torch.manual_seed(seed)
70
 
71
  # resize input_image to 1024x1024
72
  input_image = resize_image(input_image)
73
 
74
  depth_image = get_depth_map(input_image)
75
 
76
- images = pipe(
77
- prompt, negative_prompt=negative_prompt, image=depth_image, num_inference_steps=num_steps, controlnet_conditioning_scale=float(controlnet_conditioning_scale),
78
- generator=generator,
79
- ).images
80
 
81
  return [depth_image, images[0]]
82
-
 
 
83
  block = gr.Blocks().queue()
84
 
85
  with block:
 
1
+ import spaces
2
  from diffusers import ControlNetModel, StableDiffusionXLControlNetPipeline, AutoencoderKL, EulerAncestralDiscreteScheduler
3
  from diffusers.utils import load_image
4
  from PIL import Image
 
29
  steps_offset=1
30
  )
31
  # pipe.enable_freeu(b1=1.1, b2=1.1, s1=0.5, s2=0.7)
32
+ # pipe.enable_xformers_memory_efficient_attention()
33
  pipe.force_zeros_for_empty_prompt = False
34
 
35
  from transformers import DPTFeatureExtractor, DPTForDepthEstimation
 
66
  return image
67
 
68
 
69
+ @spaces.GPU
70
+ def generate_(prompt, negative_prompt, canny_image, num_steps, controlnet_conditioning_scale, seed):
71
+ generator = torch.Generator("cuda").manual_seed(seed)
72
+ images = pipe(
73
+ prompt, negative_prompt=negative_prompt, image=canny_image, num_inference_steps=num_steps, controlnet_conditioning_scale=float(controlnet_conditioning_scale),
74
+ generator=generator,
75
+ ).images
76
+ return images
77
+
78
+ @spaces.GPU
79
  def process(input_image, prompt, negative_prompt, num_steps, controlnet_conditioning_scale, seed):
 
80
 
81
  # resize input_image to 1024x1024
82
  input_image = resize_image(input_image)
83
 
84
  depth_image = get_depth_map(input_image)
85
 
86
+ images = generate_(prompt, negative_prompt, depth_image, num_steps, controlnet_conditioning_scale, seed)
 
 
 
87
 
88
  return [depth_image, images[0]]
89
+
90
+
91
+
92
  block = gr.Blocks().queue()
93
 
94
  with block: