Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -8,10 +8,10 @@ import tensorflow as tf
|
|
8 |
from transformers import SegformerFeatureExtractor, TFSegformerForSemanticSegmentation
|
9 |
|
10 |
feature_extractor = SegformerFeatureExtractor.from_pretrained(
|
11 |
-
"
|
12 |
)
|
13 |
model = TFSegformerForSemanticSegmentation.from_pretrained(
|
14 |
-
"
|
15 |
)
|
16 |
|
17 |
def ade_palette():
|
@@ -34,139 +34,7 @@ def ade_palette():
|
|
34 |
[200, 56, 123],
|
35 |
[87, 92, 204],
|
36 |
[120, 56, 123],
|
37 |
-
[45, 78, 123]
|
38 |
-
[156, 200, 56],
|
39 |
-
[32, 90, 210],
|
40 |
-
[56, 123, 67],
|
41 |
-
[180, 56, 123],
|
42 |
-
[123, 67, 45],
|
43 |
-
[45, 134, 200],
|
44 |
-
[67, 56, 123],
|
45 |
-
[78, 123, 67],
|
46 |
-
[32, 210, 90],
|
47 |
-
[45, 56, 189],
|
48 |
-
[123, 56, 123],
|
49 |
-
[56, 156, 200],
|
50 |
-
[189, 56, 45],
|
51 |
-
[112, 200, 56],
|
52 |
-
[56, 123, 45],
|
53 |
-
[200, 32, 90],
|
54 |
-
[123, 45, 78],
|
55 |
-
[200, 156, 56],
|
56 |
-
[45, 67, 123],
|
57 |
-
[56, 45, 78],
|
58 |
-
[45, 56, 123],
|
59 |
-
[123, 67, 56],
|
60 |
-
[56, 78, 123],
|
61 |
-
[210, 90, 32],
|
62 |
-
[123, 56, 189],
|
63 |
-
[45, 200, 134],
|
64 |
-
[67, 123, 56],
|
65 |
-
[123, 45, 67],
|
66 |
-
[90, 32, 210],
|
67 |
-
[200, 45, 78],
|
68 |
-
[32, 210, 90],
|
69 |
-
[45, 123, 67],
|
70 |
-
[165, 42, 87],
|
71 |
-
[72, 145, 167],
|
72 |
-
[15, 158, 75],
|
73 |
-
[209, 89, 40],
|
74 |
-
[32, 21, 121],
|
75 |
-
[184, 20, 100],
|
76 |
-
[56, 135, 15],
|
77 |
-
[128, 92, 176],
|
78 |
-
[1, 119, 140],
|
79 |
-
[220, 151, 43],
|
80 |
-
[41, 97, 72],
|
81 |
-
[148, 38, 27],
|
82 |
-
[107, 86, 176],
|
83 |
-
[21, 26, 136],
|
84 |
-
[174, 27, 90],
|
85 |
-
[91, 96, 204],
|
86 |
-
[108, 50, 107],
|
87 |
-
[27, 45, 136],
|
88 |
-
[168, 200, 52],
|
89 |
-
[7, 102, 27],
|
90 |
-
[42, 93, 56],
|
91 |
-
[140, 52, 112],
|
92 |
-
[92, 107, 168],
|
93 |
-
[17, 118, 176],
|
94 |
-
[59, 50, 174],
|
95 |
-
[206, 40, 143],
|
96 |
-
[44, 19, 142],
|
97 |
-
[23, 168, 75],
|
98 |
-
[54, 57, 189],
|
99 |
-
[144, 21, 15],
|
100 |
-
[15, 176, 35],
|
101 |
-
[107, 19, 79],
|
102 |
-
[204, 52, 114],
|
103 |
-
[48, 173, 83],
|
104 |
-
[11, 120, 53],
|
105 |
-
[206, 104, 28],
|
106 |
-
[20, 31, 153],
|
107 |
-
[27, 21, 93],
|
108 |
-
[11, 206, 138],
|
109 |
-
[112, 30, 83],
|
110 |
-
[68, 91, 152],
|
111 |
-
[153, 13, 43],
|
112 |
-
[25, 114, 54],
|
113 |
-
[92, 27, 150],
|
114 |
-
[108, 42, 59],
|
115 |
-
[194, 77, 5],
|
116 |
-
[145, 48, 83],
|
117 |
-
[7, 113, 19],
|
118 |
-
[25, 92, 113],
|
119 |
-
[60, 168, 79],
|
120 |
-
[78, 33, 120],
|
121 |
-
[89, 176, 205],
|
122 |
-
[27, 200, 94],
|
123 |
-
[210, 67, 23],
|
124 |
-
[123, 89, 189],
|
125 |
-
[225, 56, 112],
|
126 |
-
[75, 156, 45],
|
127 |
-
[172, 104, 200],
|
128 |
-
[15, 170, 197],
|
129 |
-
[240, 133, 65],
|
130 |
-
[89, 156, 112],
|
131 |
-
[214, 88, 57],
|
132 |
-
[156, 134, 200],
|
133 |
-
[78, 57, 189],
|
134 |
-
[200, 78, 123],
|
135 |
-
[106, 120, 210],
|
136 |
-
[145, 56, 112],
|
137 |
-
[89, 120, 189],
|
138 |
-
[185, 206, 56],
|
139 |
-
[47, 99, 28],
|
140 |
-
[112, 189, 78],
|
141 |
-
[200, 112, 89],
|
142 |
-
[89, 145, 112],
|
143 |
-
[78, 106, 189],
|
144 |
-
[112, 78, 189],
|
145 |
-
[156, 112, 78],
|
146 |
-
[28, 210, 99],
|
147 |
-
[78, 89, 189],
|
148 |
-
[189, 78, 57],
|
149 |
-
[112, 200, 78],
|
150 |
-
[189, 47, 78],
|
151 |
-
[205, 112, 57],
|
152 |
-
[78, 145, 57],
|
153 |
-
[200, 78, 112],
|
154 |
-
[99, 89, 145],
|
155 |
-
[200, 156, 78],
|
156 |
-
[57, 78, 145],
|
157 |
-
[78, 57, 99],
|
158 |
-
[57, 78, 145],
|
159 |
-
[145, 112, 78],
|
160 |
-
[78, 89, 145],
|
161 |
-
[210, 99, 28],
|
162 |
-
[145, 78, 189],
|
163 |
-
[57, 200, 136],
|
164 |
-
[89, 156, 78],
|
165 |
-
[145, 78, 99],
|
166 |
-
[99, 28, 210],
|
167 |
-
[189, 78, 47],
|
168 |
-
[28, 210, 99],
|
169 |
-
[78, 145, 57],
|
170 |
]
|
171 |
|
172 |
labels_list = []
|
@@ -235,7 +103,7 @@ def sepia(input_img):
|
|
235 |
demo = gr.Interface(fn=sepia,
|
236 |
inputs=gr.Image(shape=(400, 600)),
|
237 |
outputs=['plot'],
|
238 |
-
examples=["
|
239 |
allow_flagging='never')
|
240 |
|
241 |
|
|
|
8 |
from transformers import SegformerFeatureExtractor, TFSegformerForSemanticSegmentation
|
9 |
|
10 |
feature_extractor = SegformerFeatureExtractor.from_pretrained(
|
11 |
+
"mattmdjaga/segformer_b2_clothes"
|
12 |
)
|
13 |
model = TFSegformerForSemanticSegmentation.from_pretrained(
|
14 |
+
"mattmdjaga/segformer_b2_clothes"
|
15 |
)
|
16 |
|
17 |
def ade_palette():
|
|
|
34 |
[200, 56, 123],
|
35 |
[87, 92, 204],
|
36 |
[120, 56, 123],
|
37 |
+
[45, 78, 123]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
38 |
]
|
39 |
|
40 |
labels_list = []
|
|
|
103 |
demo = gr.Interface(fn=sepia,
|
104 |
inputs=gr.Image(shape=(400, 600)),
|
105 |
outputs=['plot'],
|
106 |
+
examples=["person-1.jpg", "person-2.jpg", "person-3.jpg", "person-4.jpg", "person-5.jpg"],
|
107 |
allow_flagging='never')
|
108 |
|
109 |
|