Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -201,51 +201,51 @@ def main_fun(image_pil, x_units, y_units, text_prompt):
|
|
201 |
output = draw_image(image_pil, masks, boxes, alpha=0.4)
|
202 |
|
203 |
|
204 |
-
|
205 |
-
|
206 |
-
|
207 |
-
|
208 |
-
|
209 |
-
|
210 |
-
|
211 |
-
|
212 |
-
|
213 |
-
|
214 |
-
|
215 |
-
|
216 |
-
|
217 |
-
|
218 |
-
|
219 |
-
|
220 |
-
|
221 |
-
|
222 |
-
|
223 |
-
|
224 |
-
|
225 |
-
|
226 |
-
|
227 |
-
|
228 |
-
|
229 |
-
|
230 |
-
|
231 |
-
|
232 |
-
|
233 |
-
|
234 |
-
|
235 |
-
|
236 |
-
|
237 |
-
|
238 |
-
|
239 |
-
|
240 |
-
|
241 |
-
|
242 |
-
|
243 |
-
|
244 |
-
|
245 |
-
|
246 |
-
|
247 |
-
|
248 |
-
# visualize_results(image_pil, output, 'shifted')
|
249 |
|
250 |
return output
|
251 |
|
|
|
201 |
output = draw_image(image_pil, masks, boxes, alpha=0.4)
|
202 |
|
203 |
|
204 |
+
# '''Get masked object and background as two separate images'''
|
205 |
+
# mask = np.expand_dims(masks[0], axis=-1)
|
206 |
+
# masked_object = image_pil * mask
|
207 |
+
# background = image_pil * ~mask
|
208 |
+
|
209 |
+
|
210 |
+
# '''Shifts image by x_units and y_units'''
|
211 |
+
# M = np.float32([[1, 0, x_units], [0, 1, y_units]])
|
212 |
+
# shifted_image = cv2.warpAffine(masked_object, M, (masked_object.shape[1] , masked_object.shape[0] ), borderMode=cv2.BORDER_CONSTANT, borderValue=(0, 0, 0))
|
213 |
+
# masked_shifted_image = np.where(shifted_image[:, :, 0] != 0, True, False)
|
214 |
+
|
215 |
+
# '''Load stable diffuser model at checkpoint finetuned for inpainting task'''
|
216 |
+
# pipe = StableDiffusionInpaintPipeline.from_pretrained(
|
217 |
+
# # "runwayml/stable-diffusion-inpainting", torch_dtype=torch.float16
|
218 |
+
# "stabilityai/stable-diffusion-2-inpainting",torch_dtype=torch.float16)
|
219 |
+
# pipe.to(CFG.device)
|
220 |
+
|
221 |
+
|
222 |
+
# # With Dilation
|
223 |
+
# structuring_element = np.ones((15, 15, 1), dtype=bool)
|
224 |
+
# extrapolated_mask = binary_dilation(mask, structure=structuring_element)
|
225 |
+
# mask_as_uint8 = extrapolated_mask.astype(np.uint8) * 255
|
226 |
+
# pil_mask = Image.fromarray(mask_as_uint8.squeeze(), mode='L').resize((1024, 1024))
|
227 |
+
|
228 |
+
# # # Without Dilation
|
229 |
+
# # pil_background = Image.fromarray(background)
|
230 |
+
# # mask_as_uint8 = mask.astype(np.uint8) * 255
|
231 |
+
# # pil_mask = Image.fromarray(mask_as_uint8.squeeze(), mode='L')
|
232 |
+
|
233 |
+
# '''Do inpainting on masked locations of original image'''
|
234 |
+
# prompt = 'fill as per background and neighborhood'
|
235 |
+
# inpainted_image = pipe(prompt=prompt, image=image_pil, mask_image=pil_mask).images[0]
|
236 |
+
# # inpainted_image
|
237 |
+
|
238 |
+
# '''Get composite of shifted object and background inpainted imaage'''
|
239 |
+
# pil_shifted_image = Image.fromarray(shifted_image).resize(inpainted_image.size)
|
240 |
+
# np_shifted_image = np.array(pil_shifted_image)
|
241 |
+
# masked_shifted_image = np.where(np_shifted_image[:, :, 0] != 0, True, False)
|
242 |
+
# masked_shifted_image = np.expand_dims(masked_shifted_image, axis=-1)
|
243 |
+
# inpainted_shifted = np.array(inpainted_image) * ~masked_shifted_image
|
244 |
+
|
245 |
+
# shifted_image = cv2.resize(shifted_image, inpainted_image.size)
|
246 |
+
# output = inpainted_shifted + shifted_image
|
247 |
+
# output = Image.fromarray(output)
|
248 |
+
# # visualize_results(image_pil, output, 'shifted')
|
249 |
|
250 |
return output
|
251 |
|