karimbenharrak commited on
Commit
91b2943
1 Parent(s): 01f7239

Update handler.py

Browse files
Files changed (1) hide show
  1. handler.py +8 -0
handler.py CHANGED
@@ -98,8 +98,12 @@ class EndpointHandler():
98
  # run inference pipeline
99
  out = self.pipe(prompt=prompt, negative_prompt=negative_prompt, image=image, mask_image=mask_image)
100
 
 
 
101
  image = out.images[0].resize((1024, 1024))
102
 
 
 
103
  self.pipe2.enable_xformers_memory_efficient_attention()
104
 
105
  image = self.pipe2(
@@ -112,6 +116,8 @@ class EndpointHandler():
112
  strength=strength, #0.2
113
  output_type="latent", # let's keep in latent to save some VRAM
114
  ).images[0]
 
 
115
 
116
  self.pipe3.enable_xformers_memory_efficient_attention()
117
 
@@ -122,6 +128,8 @@ class EndpointHandler():
122
  num_inference_steps=num_inference_steps, #100
123
  strength=strength, #0.2
124
  ).images[0]
 
 
125
 
126
 
127
  # return first generate PIL image
 
98
  # run inference pipeline
99
  out = self.pipe(prompt=prompt, negative_prompt=negative_prompt, image=image, mask_image=mask_image)
100
 
101
+ print("1st pipeline part successful!")
102
+
103
  image = out.images[0].resize((1024, 1024))
104
 
105
+ print("image resizing successful!")
106
+
107
  self.pipe2.enable_xformers_memory_efficient_attention()
108
 
109
  image = self.pipe2(
 
116
  strength=strength, #0.2
117
  output_type="latent", # let's keep in latent to save some VRAM
118
  ).images[0]
119
+
120
+ print("2nd pipeline part successful!")
121
 
122
  self.pipe3.enable_xformers_memory_efficient_attention()
123
 
 
128
  num_inference_steps=num_inference_steps, #100
129
  strength=strength, #0.2
130
  ).images[0]
131
+
132
+ print("3rd pipeline part successful!")
133
 
134
 
135
  # return first generate PIL image