Spaces:
Running
Running
hasibzunair
commited on
Commit
•
bd7f9be
1
Parent(s):
aa6faa6
update app.py
Browse files
app.py
CHANGED
@@ -5,7 +5,7 @@ import torch.nn as nn
|
|
5 |
import torch.nn.functional as F
|
6 |
import gradio as gr
|
7 |
import codecs
|
8 |
-
import numpy as np
|
9 |
import cv2
|
10 |
|
11 |
from PIL import Image
|
@@ -15,41 +15,45 @@ from torchvision import transforms as T
|
|
15 |
|
16 |
NORMALIZE = T.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))
|
17 |
|
18 |
-
if __name__ == "__main__":
|
19 |
-
|
20 |
-
def inference(img_path):
|
21 |
-
# Load the image
|
22 |
-
with open(img_path, "rb") as f:
|
23 |
-
img = Image.open(f)
|
24 |
-
img = img.convert("RGB")
|
25 |
-
img_np = np.array(img)
|
26 |
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
31 |
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
|
|
|
|
|
|
|
|
|
|
37 |
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
preds_up = preds_up.cpu().squeeze().numpy()
|
45 |
|
46 |
-
|
47 |
-
|
48 |
-
preds_up_np_3d = np.stack([preds_up_np, preds_up_np, preds_up_np], axis=-1)
|
49 |
-
combined_image = cv2.addWeighted(img_np, 0.5, preds_up_np_3d, 0.5, 0)
|
50 |
-
print(f"Output shape is {combined_image.shape}")
|
51 |
-
return combined_image
|
52 |
-
|
53 |
parser = argparse.ArgumentParser(
|
54 |
description="Evaluation of Peekaboo",
|
55 |
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
|
@@ -108,7 +112,7 @@ if __name__ == "__main__":
|
|
108 |
"./data/examples/b.jpeg",
|
109 |
"./data/examples/c.jpeg",
|
110 |
"./data/examples/d.jpeg",
|
111 |
-
"./data/examples/e.jpeg"
|
112 |
],
|
113 |
title=title,
|
114 |
description=description,
|
|
|
5 |
import torch.nn.functional as F
|
6 |
import gradio as gr
|
7 |
import codecs
|
8 |
+
import numpy as np
|
9 |
import cv2
|
10 |
|
11 |
from PIL import Image
|
|
|
15 |
|
16 |
NORMALIZE = T.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))
|
17 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
18 |
|
19 |
+
def inference(img_path):
|
20 |
+
# Load the image
|
21 |
+
with open(img_path, "rb") as f:
|
22 |
+
img = Image.open(f)
|
23 |
+
img = img.convert("RGB")
|
24 |
+
img_np = np.array(img)
|
25 |
+
|
26 |
+
# Preprocess
|
27 |
+
t = T.Compose([T.ToTensor(), NORMALIZE])
|
28 |
+
img_t = t(img)[None, :, :, :]
|
29 |
+
inputs = img_t.to(device)
|
30 |
+
|
31 |
+
# Forward step
|
32 |
+
print(f"Start Peekaboo prediction.")
|
33 |
+
with torch.no_grad():
|
34 |
+
preds = model(inputs, for_eval=True)
|
35 |
+
print(f"Done Peekaboo prediction.")
|
36 |
|
37 |
+
sigmoid = nn.Sigmoid()
|
38 |
+
h, w = img_t.shape[-2:]
|
39 |
+
preds_up = F.interpolate(
|
40 |
+
preds,
|
41 |
+
scale_factor=model.vit_patch_size,
|
42 |
+
mode="bilinear",
|
43 |
+
align_corners=False,
|
44 |
+
)[..., :h, :w]
|
45 |
+
preds_up = (sigmoid(preds_up.detach()) > 0.5).squeeze(0).float()
|
46 |
+
preds_up = preds_up.cpu().squeeze().numpy()
|
47 |
|
48 |
+
# Overlay predicted mask with input image
|
49 |
+
preds_up_np = (preds_up / np.max(preds_up) * 255).astype(np.uint8)
|
50 |
+
preds_up_np_3d = np.stack([preds_up_np, preds_up_np, preds_up_np], axis=-1)
|
51 |
+
combined_image = cv2.addWeighted(img_np, 0.5, preds_up_np_3d, 0.5, 0)
|
52 |
+
print(f"Output shape is {combined_image.shape}")
|
53 |
+
return combined_image
|
|
|
54 |
|
55 |
+
|
56 |
+
if __name__ == "__main__":
|
|
|
|
|
|
|
|
|
|
|
57 |
parser = argparse.ArgumentParser(
|
58 |
description="Evaluation of Peekaboo",
|
59 |
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
|
|
|
112 |
"./data/examples/b.jpeg",
|
113 |
"./data/examples/c.jpeg",
|
114 |
"./data/examples/d.jpeg",
|
115 |
+
"./data/examples/e.jpeg",
|
116 |
],
|
117 |
title=title,
|
118 |
description=description,
|