Spaces:
Runtime error
Runtime error
hasibzunair
commited on
Commit
β’
cbe6be9
1
Parent(s):
00a9f3d
update
Browse files
README.md
CHANGED
@@ -1,10 +1,9 @@
|
|
1 |
---
|
2 |
title: Masksup Segmentation Demo
|
3 |
-
emoji:
|
4 |
colorFrom: green
|
5 |
colorTo: gray
|
6 |
sdk: gradio
|
7 |
-
sdk_version: 3.14.0
|
8 |
app_file: app.py
|
9 |
pinned: false
|
10 |
license: afl-3.0
|
|
|
1 |
---
|
2 |
title: Masksup Segmentation Demo
|
3 |
+
emoji: ποΈ ποΈ π¦
|
4 |
colorFrom: green
|
5 |
colorTo: gray
|
6 |
sdk: gradio
|
|
|
7 |
app_file: app.py
|
8 |
pinned: false
|
9 |
license: afl-3.0
|
app.py
CHANGED
@@ -19,18 +19,22 @@ DEVICE = "cpu"
|
|
19 |
print(DEVICE)
|
20 |
|
21 |
# Load color map
|
22 |
-
cmap = np.load(
|
23 |
|
24 |
# Make directories
|
25 |
os.system("mkdir ./models")
|
26 |
|
27 |
# Get model weights
|
28 |
if not os.path.exists("./models/masksupnyu39.31d.pth"):
|
29 |
-
os.system(
|
|
|
|
|
30 |
|
31 |
# Load model
|
32 |
model = NestedUNet(num_classes=40)
|
33 |
-
checkpoint = torch.load(
|
|
|
|
|
34 |
model.load_state_dict(checkpoint)
|
35 |
model = model.to(DEVICE)
|
36 |
model.eval()
|
@@ -49,14 +53,23 @@ def inference(img_path):
|
|
49 |
)
|
50 |
|
51 |
image = transforms_image(image)
|
52 |
-
image = image[None, :]
|
|
|
53 |
# Predict
|
54 |
with torch.no_grad():
|
55 |
output = torch.sigmoid(model(image.to(DEVICE).float()))
|
56 |
-
output =
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
57 |
pred = cmap[output]
|
58 |
return pred
|
59 |
|
|
|
60 |
# App
|
61 |
title = "Masked Supervised Learning for Semantic Segmentation"
|
62 |
description = codecs.open("description.html", "r", "utf-8").read()
|
@@ -64,13 +77,17 @@ article = "<p style='text-align: center'><a href='https://arxiv.org/abs/2210.009
|
|
64 |
|
65 |
gr.Interface(
|
66 |
inference,
|
67 |
-
gr.inputs.Image(type=
|
68 |
gr.outputs.Image(type="filepath", label="Predicted Output"),
|
69 |
-
examples=[
|
70 |
-
|
|
|
|
|
|
|
|
|
71 |
title=title,
|
72 |
description=description,
|
73 |
article=article,
|
74 |
allow_flagging=False,
|
75 |
analytics_enabled=False,
|
76 |
-
|
|
|
19 |
print(DEVICE)
|
20 |
|
21 |
# Load color map
|
22 |
+
cmap = np.load("cmap.npy")
|
23 |
|
24 |
# Make directories
|
25 |
os.system("mkdir ./models")
|
26 |
|
27 |
# Get model weights
|
28 |
if not os.path.exists("./models/masksupnyu39.31d.pth"):
|
29 |
+
os.system(
|
30 |
+
"wget -O ./models/masksupnyu39.31d.pth https://github.com/hasibzunair/masksup-segmentation/releases/download/v0.1/masksupnyu39.31iou.pth"
|
31 |
+
)
|
32 |
|
33 |
# Load model
|
34 |
model = NestedUNet(num_classes=40)
|
35 |
+
checkpoint = torch.load(
|
36 |
+
"./models/masksupnyu39.31d.pth", map_location=torch.device("cpu")
|
37 |
+
)
|
38 |
model.load_state_dict(checkpoint)
|
39 |
model = model.to(DEVICE)
|
40 |
model.eval()
|
|
|
53 |
)
|
54 |
|
55 |
image = transforms_image(image)
|
56 |
+
image = image[None, :] # batch dimension
|
57 |
+
|
58 |
# Predict
|
59 |
with torch.no_grad():
|
60 |
output = torch.sigmoid(model(image.to(DEVICE).float()))
|
61 |
+
output = (
|
62 |
+
torch.softmax(output, dim=1)
|
63 |
+
.argmax(dim=1)[0]
|
64 |
+
.float()
|
65 |
+
.cpu()
|
66 |
+
.numpy()
|
67 |
+
.astype(np.uint8)
|
68 |
+
)
|
69 |
pred = cmap[output]
|
70 |
return pred
|
71 |
|
72 |
+
|
73 |
# App
|
74 |
title = "Masked Supervised Learning for Semantic Segmentation"
|
75 |
description = codecs.open("description.html", "r", "utf-8").read()
|
|
|
77 |
|
78 |
gr.Interface(
|
79 |
inference,
|
80 |
+
gr.inputs.Image(type="filepath", label="Input Image"),
|
81 |
gr.outputs.Image(type="filepath", label="Predicted Output"),
|
82 |
+
examples=[
|
83 |
+
"./sample_images/a.png",
|
84 |
+
"./sample_images/b.png",
|
85 |
+
"./sample_images/c.png",
|
86 |
+
"./sample_images/d.png",
|
87 |
+
],
|
88 |
title=title,
|
89 |
description=description,
|
90 |
article=article,
|
91 |
allow_flagging=False,
|
92 |
analytics_enabled=False,
|
93 |
+
).launch(debug=True, enable_queue=True)
|