karimbenharrak commited on
Commit
14da090
1 Parent(s): 0beb34a

Update handler.py

Browse files
Files changed (1) hide show
  1. handler.py +28 -34
handler.py CHANGED
@@ -15,10 +15,10 @@ if device.type != 'cuda':
15
  class EndpointHandler():
16
  def __init__(self, path=""):
17
 
18
- self.pipe = AutoPipelineForInpainting.from_pretrained("diffusers/stable-diffusion-xl-1.0-inpainting-0.1", torch_dtype=torch.float16, variant="fp16").to("cuda")
19
  self.generator = torch.Generator(device="cuda").manual_seed(0)
20
 
21
- """
22
  # load StableDiffusionInpaintPipeline pipeline
23
  self.pipe = AutoPipelineForInpainting.from_pretrained(
24
  "runwayml/stable-diffusion-inpainting",
@@ -34,7 +34,6 @@ class EndpointHandler():
34
  self.pipe2.to("cuda")
35
 
36
  self.pipe3 = AutoPipelineForImage2Image.from_pipe(self.pipe2)
37
- """
38
 
39
 
40
 
@@ -49,6 +48,11 @@ class EndpointHandler():
49
  prompt = data.pop("prompt", "")
50
 
51
  negative_prompt = data.pop("negative_prompt", "")
 
 
 
 
 
52
 
53
  # process image
54
  if encoded_image is not None and encoded_mask_image is not None:
@@ -58,21 +62,21 @@ class EndpointHandler():
58
  image = None
59
  mask_image = None
60
 
 
 
 
 
 
 
 
 
 
 
 
 
 
61
 
62
- image = self.pipe(
63
- prompt=prompt,
64
- image=image,
65
- mask_image=mask_image,
66
- guidance_scale=8.0,
67
- num_inference_steps=20, # steps between 15 and 30 work well for us
68
- strength=0.99, # make sure to use `strength` below 1.0
69
- generator=self.generator,
70
- ).images[0]
71
-
72
- return image
73
-
74
- """
75
- pipe = AutoPipelineForInpainting.from_pretrained("diffusers/stable-diffusion-xl-1.0-inpainting-0.1", torch_dtype=torch.float16, variant="fp16").to("cuda")
76
 
77
  self.pipe.enable_xformers_memory_efficient_attention()
78
 
@@ -88,9 +92,9 @@ class EndpointHandler():
88
  negative_prompt=negative_prompt,
89
  image=image,
90
  mask_image=mask_image,
91
- guidance_scale=8.0,
92
- num_inference_steps=100,
93
- strength=0.2,
94
  output_type="latent", # let's keep in latent to save some VRAM
95
  ).images[0]
96
 
@@ -99,24 +103,14 @@ class EndpointHandler():
99
  image2 = self.pipe3(
100
  prompt=prompt,
101
  image=image,
102
- guidance_scale=8.0,
103
- num_inference_steps=100,
104
- strength=0.2,
105
  ).images[0]
106
-
107
- print(type(image2))
108
- print(type(out.images[0]))
109
- print(type(out.images[0].resize((1024, 1024))))
110
- print(type(image))
111
 
112
- result = {
113
- "final_image": image2,
114
- "pipe1_img": out.images[0],
115
- }
116
 
117
  # return first generate PIL image
118
- return result
119
- """
120
 
121
  # helper to decode input image
122
  def decode_base64_image(self, image_string):
 
15
  class EndpointHandler():
16
  def __init__(self, path=""):
17
 
18
+ self.fast_pipe = AutoPipelineForInpainting.from_pretrained("diffusers/stable-diffusion-xl-1.0-inpainting-0.1", torch_dtype=torch.float16, variant="fp16").to("cuda")
19
  self.generator = torch.Generator(device="cuda").manual_seed(0)
20
 
21
+
22
  # load StableDiffusionInpaintPipeline pipeline
23
  self.pipe = AutoPipelineForInpainting.from_pretrained(
24
  "runwayml/stable-diffusion-inpainting",
 
34
  self.pipe2.to("cuda")
35
 
36
  self.pipe3 = AutoPipelineForImage2Image.from_pipe(self.pipe2)
 
37
 
38
 
39
 
 
48
  prompt = data.pop("prompt", "")
49
 
50
  negative_prompt = data.pop("negative_prompt", "")
51
+
52
+ method = data.pop("method", "slow")
53
+ strength = data.pop("strength", 0.2)
54
+ guidance_scale = data.pop("guidance_scale", 8.0)
55
+ num_inference_steps = data.pop("num_inference_steps", 20)
56
 
57
  # process image
58
  if encoded_image is not None and encoded_mask_image is not None:
 
62
  image = None
63
  mask_image = None
64
 
65
+ if(method == "fast"):
66
+ image = self.fast_pipe(
67
+ prompt=prompt,
68
+ negative_prompt=negative_prompt
69
+ image=image,
70
+ mask_image=mask_image,
71
+ guidance_scale=guidance_scale,
72
+ num_inference_steps=num_inference_steps, # steps between 15 and 30 work well for us
73
+ strength=strength, # make sure to use `strength` below 1.0
74
+ generator=self.generator,
75
+ ).images[0]
76
+
77
+ return image
78
 
79
+ #pipe = AutoPipelineForInpainting.from_pretrained("diffusers/stable-diffusion-xl-1.0-inpainting-0.1", torch_dtype=torch.float16, variant="fp16").to("cuda")
 
 
 
 
 
 
 
 
 
 
 
 
 
80
 
81
  self.pipe.enable_xformers_memory_efficient_attention()
82
 
 
92
  negative_prompt=negative_prompt,
93
  image=image,
94
  mask_image=mask_image,
95
+ guidance_scale=guidance_scale, #8.0
96
+ num_inference_steps=num_inference_steps, #100
97
+ strength=strength, #0.2
98
  output_type="latent", # let's keep in latent to save some VRAM
99
  ).images[0]
100
 
 
103
  image2 = self.pipe3(
104
  prompt=prompt,
105
  image=image,
106
+ guidance_scale=guidance_scale, #8.0
107
+ num_inference_steps=num_inference_steps, #100
108
+ strength=strength, #0.2
109
  ).images[0]
 
 
 
 
 
110
 
 
 
 
 
111
 
112
  # return first generate PIL image
113
+ return image2
 
114
 
115
  # helper to decode input image
116
  def decode_base64_image(self, image_string):