karimbenharrak commited on
Commit
56e3148
1 Parent(s): 02f29e1

Update handler.py

Browse files
Files changed (1) hide show
  1. handler.py +27 -6
handler.py CHANGED
@@ -14,21 +14,25 @@ if device.type != 'cuda':
14
 
15
  class EndpointHandler():
16
  def __init__(self, path=""):
 
 
 
 
17
  # load StableDiffusionInpaintPipeline pipeline
18
- self.pipe = AutoPipelineForInpainting.from_pretrained(
19
  "runwayml/stable-diffusion-inpainting",
20
  revision="fp16",
21
  torch_dtype=torch.float16,
22
  )
23
  # use DPMSolverMultistepScheduler
24
- self.pipe.scheduler = DPMSolverMultistepScheduler.from_config(self.pipe.scheduler.config)
25
  # move to device
26
- self.pipe = self.pipe.to(device)
27
 
28
- self.pipe2 = AutoPipelineForInpainting.from_pretrained("stabilityai/stable-diffusion-xl-refiner-1.0", torch_dtype=torch.float16, variant="fp16", use_safetensors=True)
29
- self.pipe2.to("cuda")
30
 
31
- self.pipe3 = AutoPipelineForImage2Image.from_pipe(self.pipe2)
32
 
33
 
34
 
@@ -52,6 +56,22 @@ class EndpointHandler():
52
  image = None
53
  mask_image = None
54
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
55
  self.pipe.enable_xformers_memory_efficient_attention()
56
 
57
  # run inference pipeline
@@ -94,6 +114,7 @@ class EndpointHandler():
94
 
95
  # return first generate PIL image
96
  return result
 
97
 
98
  # helper to decode input image
99
  def decode_base64_image(self, image_string):
 
14
 
15
  class EndpointHandler():
16
  def __init__(self, path=""):
17
+
18
+ self.pipe = AutoPipelineForInpainting.from_pretrained("diffusers/stable-diffusion-xl-1.0-inpainting-0.1", torch_dtype=torch.float16, variant="fp16").to("cuda")
19
+ self.generator = torch.Generator(device="cuda").manual_seed(0)
20
+
21
  # load StableDiffusionInpaintPipeline pipeline
22
+ #self.pipe = AutoPipelineForInpainting.from_pretrained(
23
  "runwayml/stable-diffusion-inpainting",
24
  revision="fp16",
25
  torch_dtype=torch.float16,
26
  )
27
  # use DPMSolverMultistepScheduler
28
+ #self.pipe.scheduler = DPMSolverMultistepScheduler.from_config(self.pipe.scheduler.config)
29
  # move to device
30
+ #self.pipe = self.pipe.to(device)
31
 
32
+ #self.pipe2 = AutoPipelineForInpainting.from_pretrained("stabilityai/stable-diffusion-xl-refiner-1.0", torch_dtype=torch.float16, variant="fp16", use_safetensors=True)
33
+ #self.pipe2.to("cuda")
34
 
35
+ #self.pipe3 = AutoPipelineForImage2Image.from_pipe(self.pipe2)
36
 
37
 
38
 
 
56
  image = None
57
  mask_image = None
58
 
59
+
60
+ image = self.pipe(
61
+ prompt=prompt,
62
+ image=image,
63
+ mask_image=mask_image,
64
+ guidance_scale=8.0,
65
+ num_inference_steps=20, # steps between 15 and 30 work well for us
66
+ strength=0.99, # make sure to use `strength` below 1.0
67
+ generator=generator,
68
+ ).images[0]
69
+
70
+ return image
71
+
72
+ """
73
+ pipe = AutoPipelineForInpainting.from_pretrained("diffusers/stable-diffusion-xl-1.0-inpainting-0.1", torch_dtype=torch.float16, variant="fp16").to("cuda")
74
+
75
  self.pipe.enable_xformers_memory_efficient_attention()
76
 
77
  # run inference pipeline
 
114
 
115
  # return first generate PIL image
116
  return result
117
+ """
118
 
119
  # helper to decode input image
120
  def decode_base64_image(self, image_string):