Update app.py
Browse files
app.py
CHANGED
@@ -8,33 +8,34 @@ import tensorflow as tf
|
|
8 |
from transformers import SegformerFeatureExtractor, TFSegformerForSemanticSegmentation
|
9 |
|
10 |
feature_extractor = SegformerFeatureExtractor.from_pretrained(
|
11 |
-
"nvidia/segformer-
|
12 |
)
|
13 |
model = TFSegformerForSemanticSegmentation.from_pretrained(
|
14 |
-
"nvidia/segformer-
|
15 |
)
|
16 |
|
17 |
def ade_palette():
|
18 |
"""ADE20K palette that maps each class to RGB values."""
|
19 |
return [
|
20 |
-
[
|
21 |
-
[
|
22 |
-
[
|
23 |
-
[
|
24 |
-
[
|
25 |
-
[
|
26 |
-
[
|
27 |
-
[
|
28 |
-
[
|
29 |
-
[
|
30 |
-
[
|
31 |
-
[
|
32 |
-
[
|
33 |
-
[
|
34 |
-
[
|
35 |
-
[
|
36 |
-
[
|
37 |
-
[
|
|
|
38 |
]
|
39 |
|
40 |
labels_list = []
|
@@ -103,8 +104,8 @@ def sepia(input_img):
|
|
103 |
demo = gr.Interface(fn=sepia,
|
104 |
inputs=gr.Image(shape=(400, 600)),
|
105 |
outputs=['plot'],
|
106 |
-
examples=["
|
107 |
allow_flagging='never')
|
108 |
|
109 |
|
110 |
-
demo.launch()
|
|
|
8 |
from transformers import SegformerFeatureExtractor, TFSegformerForSemanticSegmentation
|
9 |
|
10 |
feature_extractor = SegformerFeatureExtractor.from_pretrained(
|
11 |
+
"nvidia/segformer-b1-finetuned-cityscapes-1024-1024"
|
12 |
)
|
13 |
model = TFSegformerForSemanticSegmentation.from_pretrained(
|
14 |
+
"nvidia/segformer-b1-finetuned-cityscapes-1024-1024"
|
15 |
)
|
16 |
|
17 |
def ade_palette():
|
18 |
"""ADE20K palette that maps each class to RGB values."""
|
19 |
return [
|
20 |
+
[204, 87, 92],
|
21 |
+
[112, 185, 212],
|
22 |
+
[45, 189, 106],
|
23 |
+
[234, 123, 67],
|
24 |
+
[78, 56, 123],
|
25 |
+
[210, 32, 89],
|
26 |
+
[90, 180, 56],
|
27 |
+
[155, 102, 200],
|
28 |
+
[33, 147, 176],
|
29 |
+
[255, 183, 76],
|
30 |
+
[67, 123, 89],
|
31 |
+
[190, 60, 45],
|
32 |
+
[134, 112, 200],
|
33 |
+
[56, 45, 189],
|
34 |
+
[200, 56, 123],
|
35 |
+
[87, 92, 204],
|
36 |
+
[120, 56, 123],
|
37 |
+
[45, 78, 123],
|
38 |
+
[156, 200, 56]
|
39 |
]
|
40 |
|
41 |
labels_list = []
|
|
|
104 |
demo = gr.Interface(fn=sepia,
|
105 |
inputs=gr.Image(shape=(400, 600)),
|
106 |
outputs=['plot'],
|
107 |
+
examples=["img_1.jpg", "img_2.jpeg", "img_3.jpg", "img_4.jpg", "img_5.png"],
|
108 |
allow_flagging='never')
|
109 |
|
110 |
|
111 |
+
demo.launch()
|