karimbenharrak commited on
Commit
a93f6d3
1 Parent(s): 0a096db

Update handler.py

Browse files
Files changed (1) hide show
  1. handler.py +37 -6
handler.py CHANGED
@@ -1,10 +1,11 @@
1
  from typing import Dict, List, Any
2
  import torch
3
- from diffusers import DPMSolverMultistepScheduler, EulerDiscreteScheduler, EulerAncestralDiscreteScheduler, DDIMScheduler, StableDiffusionInpaintPipeline, AutoPipelineForInpainting, AutoPipelineForImage2Image, DiffusionPipeline, StableDiffusionXLImg2ImgPipeline, StableDiffusionControlNetInpaintPipeline, ControlNetModel
4
  from PIL import Image
5
  import base64
6
  from io import BytesIO
7
  import numpy as np
 
8
 
9
  # set device
10
  device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
@@ -24,13 +25,27 @@ class EndpointHandler():
24
  # )
25
  # self.smooth_pipe.to("cuda")
26
 
27
-
 
 
 
 
 
 
 
 
 
 
 
 
 
28
  self.controlnet = ControlNetModel.from_pretrained(
29
  "lllyasviel/control_v11p_sd15_inpaint", torch_dtype=torch.float16
30
  )
 
31
 
32
  self.pipe = StableDiffusionControlNetInpaintPipeline.from_pretrained(
33
- "runwayml/stable-diffusion-v1-5", controlnet=self.controlnet, torch_dtype=torch.float16
34
  )
35
 
36
  self.pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(self.pipe.scheduler.config)
@@ -98,6 +113,8 @@ class EndpointHandler():
98
  else:
99
  image = None
100
  mask_image = None
 
 
101
  """
102
  if(method == "fast"):
103
  image = self.fast_pipe(
@@ -156,17 +173,31 @@ class EndpointHandler():
156
 
157
  control_image = self.make_inpaint_condition(image, mask_image)
158
 
 
 
 
 
 
 
 
 
 
 
 
 
 
159
  # generate image
160
  image = self.pipe(
161
  prompt=prompt,
162
  negative_prompt=negative_prompt,
163
  num_inference_steps=num_inference_steps,
164
  eta=1.0,
165
- image=image,
166
  mask_image=mask_image,
167
  control_image=control_image,
168
- guidance_scale=guidance_scale,
169
- strength=strength
 
170
  ).images[0]
171
 
172
  return image
 
1
  from typing import Dict, List, Any
2
  import torch
3
+ from diffusers import DPMSolverMultistepScheduler, EulerDiscreteScheduler, EulerAncestralDiscreteScheduler, DDIMScheduler, StableDiffusionInpaintPipeline, AutoPipelineForInpainting, AutoPipelineForImage2Image, DiffusionPipeline, StableDiffusionXLImg2ImgPipeline, StableDiffusionControlNetInpaintPipeline, ControlNetModel, StableDiffusionPipeline
4
  from PIL import Image
5
  import base64
6
  from io import BytesIO
7
  import numpy as np
8
+ import cv2
9
 
10
  # set device
11
  device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
 
25
  # )
26
  # self.smooth_pipe.to("cuda")
27
 
28
+ self.canny_pipe = StableDiffusionPipeline.from_pretrained(
29
+ "runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16
30
+ )
31
+ self.canny_pipe = self.canny_pipe.to("cuda")
32
+
33
+ self.controlnets = [
34
+ ControlNetModel.from_pretrained(
35
+ "diffusers/controlnet-canny-sdxl-1.0", torch_dtype=torch.float16, use_safetensors=True
36
+ ),
37
+ ControlNetModel.from_pretrained(
38
+ "lllyasviel/control_v11p_sd15_inpaint", torch_dtype=torch.float16
39
+ )
40
+ ]
41
+ """
42
  self.controlnet = ControlNetModel.from_pretrained(
43
  "lllyasviel/control_v11p_sd15_inpaint", torch_dtype=torch.float16
44
  )
45
+ """
46
 
47
  self.pipe = StableDiffusionControlNetInpaintPipeline.from_pretrained(
48
+ "runwayml/stable-diffusion-v1-5", controlnet=self.controlnets, torch_dtype=torch.float16
49
  )
50
 
51
  self.pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(self.pipe.scheduler.config)
 
113
  else:
114
  image = None
115
  mask_image = None
116
+
117
+
118
  """
119
  if(method == "fast"):
120
  image = self.fast_pipe(
 
173
 
174
  control_image = self.make_inpaint_condition(image, mask_image)
175
 
176
+ low_threshold = 100
177
+ high_threshold = 200
178
+
179
+ # generate a first version of the prompt for the canny image
180
+ gen_canny_img = self.canny_pipe(prompt)
181
+
182
+ help_image = cv2.Canny(image, low_threshold, high_threshold)
183
+ help_image = help_image[:, :, None]
184
+ help_image = np.concatenate([help_image, help_image, help_image], axis=2)
185
+ canny_image = Image.fromarray(image)
186
+
187
+ input_images = [canny_image.resize((1024, 1024)), image.resize((1024, 1024))]
188
+
189
  # generate image
190
  image = self.pipe(
191
  prompt=prompt,
192
  negative_prompt=negative_prompt,
193
  num_inference_steps=num_inference_steps,
194
  eta=1.0,
195
+ image=input_images,
196
  mask_image=mask_image,
197
  control_image=control_image,
198
+ # guidance_scale=guidance_scale,
199
+ strength=strength,
200
+ controlnet_conditioning_scale=[0.8, 1.0]
201
  ).images[0]
202
 
203
  return image